max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
contrib/buildrefactor/tests/python/pants_test/contrib/buildrefactor/test_meta_rename_integration.py
|
revl/pants
| 1
|
6626151
|
<reponame>revl/pants<filename>contrib/buildrefactor/tests/python/pants_test/contrib/buildrefactor/test_meta_rename_integration.py
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
class MetaRenameIntegrationTest(PantsRunIntegrationTest):
def test_meta_rename(self):
pre_dependees_run = self.run_pants(
["dependees", "testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:X"]
)
self.run_pants(
[
"meta-rename",
"--from=testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:X",
"--to=testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:Y",
"testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:X",
]
)
post_dependees_run = self.run_pants(
["dependees", "testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:Y"]
)
self.run_pants(
[
"meta-rename",
"--from=testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:Y",
"--to=testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:X",
"testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:Y",
]
)
self.assertEqual(pre_dependees_run.stdout_data, post_dependees_run.stdout_data)
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
class MetaRenameIntegrationTest(PantsRunIntegrationTest):
def test_meta_rename(self):
pre_dependees_run = self.run_pants(
["dependees", "testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:X"]
)
self.run_pants(
[
"meta-rename",
"--from=testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:X",
"--to=testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:Y",
"testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:X",
]
)
post_dependees_run = self.run_pants(
["dependees", "testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:Y"]
)
self.run_pants(
[
"meta-rename",
"--from=testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:Y",
"--to=testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:X",
"testprojects/tests/java/org/pantsbuild/testproject/buildrefactor:Y",
]
)
self.assertEqual(pre_dependees_run.stdout_data, post_dependees_run.stdout_data)
|
en
| 0.528453
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE).
| 2.034334
| 2
|
setup.py
|
didorothy/coverage-badge
| 0
|
6626152
|
<filename>setup.py
from setuptools import setup
readme = open('README.rst').read()
setup(name='coverage-badge',
version='1.0.2',
description='Generate coverage badges for Coverage.py.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dbrgn/coverage-badge',
install_requires=['coverage==5.*'],
packages=['coverage_badge'],
zip_safe=True,
include_package_data=True,
license='MIT',
keywords='coverage badge shield',
long_description=readme,
entry_points={
'console_scripts': [
'coverage-badge = coverage_badge.__main__:main',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Testing',
],
)
|
<filename>setup.py
from setuptools import setup
readme = open('README.rst').read()
setup(name='coverage-badge',
version='1.0.2',
description='Generate coverage badges for Coverage.py.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dbrgn/coverage-badge',
install_requires=['coverage==5.*'],
packages=['coverage_badge'],
zip_safe=True,
include_package_data=True,
license='MIT',
keywords='coverage badge shield',
long_description=readme,
entry_points={
'console_scripts': [
'coverage-badge = coverage_badge.__main__:main',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Testing',
],
)
|
none
| 1
| 1.099869
| 1
|
|
src/corpus2csv.py
|
tiefenauer/ip9
| 4
|
6626153
|
import argparse
import random
from datetime import timedelta
from operator import getitem
from os import listdir, makedirs, remove
from os.path import join, exists, getsize
import h5py
import librosa
import numpy as np
import pandas as pd
import soundfile as sf
from python_speech_features import mfcc
from scipy.io import wavfile
from tqdm import tqdm
from corpus.corpus import DeepSpeechCorpus
from util.audio_util import distort_audio
from util.corpus_util import get_corpus
from util.log_util import create_args_str
parser = argparse.ArgumentParser(description="""Export speech segments of corpus to CSV files and synthesize data""")
parser.add_argument('-id', type=str, required=True,
help='target-ID for processed files')
parser.add_argument('-s', '--source_dir', type=str, required=True,
help='id of corpus or path to corpus to export')
parser.add_argument('-t', '--target_dir', type=str, required=True,
help='target directory to save results')
parser.add_argument('-l', '--language', type=str, required=True,
help='language to use')
parser.add_argument('-f', '--force', action='store_true',
help='(optional) force override existing files. Default: False')
parser.add_argument('-x', '--synthesize', action='store_true',
help='whether to create synthesized data')
parser.add_argument('-num', '--include_numeric', action='store_true', default=False,
help='(optional) whether to include transcripts with numeric chars (default: False)')
parser.add_argument('-min', '--min_duration', nargs='?', type=int, default=0,
help='(optional) maximum number of speech segments minutes to process (default: all)')
parser.add_argument('-max', '--max_duration', nargs='?', type=int, default=0,
help='(optional) maximum number of speech segments minutes to process (default: all)')
parser.add_argument('-p', '--precompute_features', action='store_true',
help='(optional) precompute MFCC features in HDF5 format. Default: False')
args = parser.parse_args()
def main(args):
print(create_args_str(args))
target_dir, corpus_id, force, synthesize, min_dur, max_dur, precompute_features = setup(args)
corpus = get_corpus(args.source_dir, args.language)
corpus.summary()
print(f'processing {corpus.name} corpus and saving split segments in {target_dir}')
csv_train, csv_dev, csv_test = extract_segments(target_dir, corpus_id, corpus, synthesize, min_dur, max_dur, force)
print(f'done! All files are in {target_dir}')
corpus = DeepSpeechCorpus(args.language, csv_train, csv_dev, csv_test)
corpus.summary()
if precompute_features:
print(f'pre-computing features')
compute_features(csv_train, csv_dev, csv_test, target_dir, force)
def setup(args):
target_dir = join(args.target_dir, args.id)
if not exists(target_dir):
print(f'target directory {target_dir} does not exist. Creating...')
makedirs(target_dir)
force = args.force
if not force and listdir(target_dir):
inp = input(f"""
WARNING: target directory {target_dir} already exists. Override?
(this will overwrite all existing files in {target_dir} with the same names!!!) (Y/n)
""")
force = inp.lower() in ['', 'y']
return target_dir, args.id, force, args.synthesize, args.min_duration, args.max_duration, args.precompute_features
def extract_segments(target_dir, corpus_id, corpus, synthesize=False, min_dur=0, max_dur=0, force=False):
train_set = corpus.train_set(numeric=args.include_numeric)
dev_set = corpus.dev_set(numeric=args.include_numeric)
test_set = corpus.test_set(numeric=args.include_numeric)
print(f'training length is: {timedelta(seconds=sum(seg.duration for seg in train_set))}')
print(f'dev length is: {timedelta(seconds=sum(seg.duration for seg in dev_set))}')
print(f'test length is: {timedelta(seconds=sum(seg.duration for seg in test_set))}')
print(f'processing training segments')
csv_train = process_subset('train', train_set, synthesize, corpus_id, target_dir, min_dur, max_dur, force)
print(f'processing validation segments (data is only synthesized for training set)')
csv_dev = process_subset('dev', dev_set, False, corpus_id, target_dir, min_dur, max_dur, force)
print(f'processing validation segments (data is only synthesized for training set)')
csv_test = process_subset('test', test_set, False, corpus_id, target_dir, min_dur, max_dur, force)
return csv_train, csv_dev, csv_test
def process_subset(subset_id, subset, synthesize, corpus_id, target_dir, min_dur, max_dur, force):
df = split_speech_segments(subset, corpus_id, subset_id, target_dir, synthesize, min_dur, max_dur, force)
csv_path = join(target_dir, f'{corpus_id}-{subset_id}.csv')
print(f'saving metadata in {csv_path}')
df.to_csv(csv_path, index=False)
return csv_path
def split_speech_segments(subset, corpus_id, subset_id, target_dir, synthesize, min_dur, max_dur, force):
total = len(subset)
if max_dur:
print(f'trying to cap numer of speech segments to a total length of {max_dur} minutes. '
f'Speech segements will be sorted by length before capping.')
tot_duration = sum(s.duration for s in subset) / 60
if tot_duration < max_dur:
print(f'WARNING: maximum length of corpus was set to {max_dur} minutes, but total length of all '
f'speech segments is only {tot_duration} minutes! '
f'-> using all entries from corpus ({total} speech segments)')
else:
for i, s in enumerate(sorted(subset, key=lambda s: s.duration)):
if sum(s.duration for s in subset[:i]) > max_dur * 60:
break
print(f'total length of corpus will be capped at {max_dur} minutes ({i} speech segments)')
total = i
subset = subset[:i]
segments = []
files = []
sum_duration = 0
progress = tqdm(subset, total=total, unit=' speech segments')
for i, segment in enumerate(progress):
segment_id = f'{corpus_id}-{subset_id}-{i:0=4d}'
wav_path = f'{segment_id}.wav'
wav_path_absolute = join(target_dir, wav_path)
if not exists(wav_path_absolute) or not getsize(wav_path_absolute) or force:
sf.write(wav_path_absolute, segment.audio, segment.rate, subtype='PCM_16')
segments.append((segment_id, segment.audio, segment.rate, segment.transcript))
files.append((wav_path, getsize(wav_path_absolute), segment.duration, segment.transcript))
sum_duration += segment.duration
if synthesize:
audio, rate = librosa.load(wav_path_absolute, sr=16000, mono=True)
wav_shift = f'{segment_id}-shift.wav'
wav_echo = f'{segment_id}-echo.wav'
wav_high = f'{segment_id}-high.wav'
wav_low = f'{segment_id}-low.wav'
wav_fast = f'{segment_id}-fast.wav'
wav_slow = f'{segment_id}-slow.wav'
wav_loud = f'{segment_id}-loud.wav'
wav_quiet = f'{segment_id}-quiet.wav'
shift = random.uniform(0.5, 1.5)
wav_shift_path = join(target_dir, wav_shift)
wav_shift_len = synthesize_and_write(audio, rate, wav_shift_path, shift=shift, force=force)
files.append((wav_shift, getsize(wav_shift_path), wav_shift_len, segment.transcript))
echo = random.randint(30, 100)
wav_echo_path = join(target_dir, wav_echo)
wav_echo_len = synthesize_and_write(audio, rate, wav_echo_path, echo=echo, force=force)
files.append((wav_echo, getsize(wav_echo_path), wav_echo_len, segment.transcript))
higher = random.uniform(1.5, 5)
wav_high_path = join(target_dir, wav_high)
wav_high_len = synthesize_and_write(audio, rate, wav_high_path, pitch=higher, force=force)
files.append((wav_high, getsize(wav_high_path), wav_high_len, segment.transcript))
lower = random.uniform(-5, -1.5)
wav_low_path = join(target_dir, wav_low)
wav_low_len = synthesize_and_write(audio, rate, wav_low_path, pitch=lower, force=force)
files.append((wav_low, getsize(wav_low_path), wav_low_len, segment.transcript))
faster = random.uniform(1.2, 1.6)
wav_fast_path = join(target_dir, wav_fast)
wav_fast_len = synthesize_and_write(audio, rate, wav_fast_path, tempo=faster, force=force)
files.append((wav_fast, getsize(wav_fast_path), wav_fast_len, segment.transcript))
slower = random.uniform(0.6, 0.8)
wav_slow_path = join(target_dir, wav_slow)
wav_slow_len = synthesize_and_write(audio, rate, wav_slow_path, tempo=slower, force=force)
files.append((wav_slow, getsize(wav_slow_path), wav_slow_len, segment.transcript))
louder = random.randint(5, 15)
wav_loud_path = join(target_dir, wav_loud)
wav_loud_len = synthesize_and_write(audio, rate, wav_loud_path, volume=louder, force=force)
files.append((wav_loud, getsize(wav_loud_path), wav_loud_len, segment.transcript))
quieter = random.randint(-15, 5)
wav_quiet_path = join(target_dir, wav_quiet)
wav_quiet_len = synthesize_and_write(audio, rate, wav_quiet_path, volume=quieter, force=force)
files.append((wav_quiet, getsize(wav_quiet_path), wav_quiet_len, segment.transcript))
description = wav_path
if max_dur:
description += f' {timedelta(seconds=sum_duration)}'
progress.set_description(description)
if max_dur and sum_duration > max_dur * 60:
break
sum_duration = sum(getitem(t, 2) for t in files)
if synthesize or min_dur and sum_duration < min_dur * 60 or max_dur and sum_duration < max_dur * 60:
print(f'total length: {timedelta(seconds=sum_duration)}')
print(f'filling up with distorted data until {timedelta(minutes=1000)} is reached')
i = 0
while sum_duration < 1000 * 60:
i += 1
for segment_id, audio, rate, transcript in tqdm(segments, unit=' segments'):
shift = random.uniform(0.5, 1.5)
pitch = random.uniform(-5, 5)
tempo = random.uniform(0.6, 1.6)
volume = random.randint(-15, 15)
echo = random.randint(30, 100)
wav_distort = f'{segment_id}-distorted-{i}.wav'
wav_distort_path = join(target_dir, wav_distort)
wav_distort_len = synthesize_and_write(audio, rate, wav_distort_path, shift=shift, pitch=pitch,
tempo=tempo, volume=volume, echo=echo, force=force)
files.append((wav_distort, getsize(wav_distort_path), wav_distort_len, transcript))
sum_duration += wav_distort_len
if sum_duration > 1000 * 60:
break
print(f'total length: {timedelta(seconds=sum_duration)}')
return pd.DataFrame(data=files, columns=['wav_filename', 'wav_filesize', 'wav_length', 'transcript']).sort_values(
'wav_length')
def synthesize_and_write(audio, rate, wav_path, shift=0, pitch=0, tempo=1, volume=0, echo=0, force=False):
audio_synth = distort_audio(audio, rate,
shift_s=shift,
pitch_factor=pitch,
tempo_factor=tempo,
volume=volume,
echo=echo)
if not exists(wav_path) or not getsize(wav_path) or force:
sf.write(wav_path, audio_synth, rate, subtype='PCM_16')
return len(audio_synth) / rate
def compute_features(csv_train, csv_valid, csv_test, target_dir, force):
df_train = pd.read_csv(csv_train)
df_dev = pd.read_csv(csv_valid)
df_test = pd.read_csv(csv_test)
h5_file_path = join(target_dir, 'features_mfcc.h5')
if exists(h5_file_path) and force:
remove(h5_file_path)
if not exists(h5_file_path):
with h5py.File(h5_file_path) as h5_file:
create_subset(h5_file, 'train', df_train)
create_subset(h5_file, 'test', df_dev)
create_subset(h5_file, 'valid', df_test)
def create_subset(h5_file, name, df):
h5_file.create_dataset(f'{name}/features', shape=(0,), maxshape=(None,), dtype=h5py.special_dtype(vlen=np.float32))
h5_file.create_dataset(f'{name}/labels', shape=(0,), maxshape=(None,), dtype=h5py.special_dtype(vlen=str))
h5_file.create_dataset(f'{name}/durations', shape=(0,), maxshape=(None,))
progress = tqdm(zip(df['wav_filename'], df['wav_filesize'], df['transcript']), total=len(df.index))
for wav_file_path, wav_file_size, transcript in progress:
progress.set_description(f'{name}: {wav_file_path}')
inputs = h5_file[name]['features']
labels = h5_file[name]['labels']
durations = h5_file[name]['durations']
rate, audio = wavfile.read(wav_file_path)
inp = mfcc(audio, samplerate=rate, numcep=26) # (num_timesteps x num_features)
inputs.resize(inputs.shape[0] + 1, axis=0)
inputs[inputs.shape[0] - 1] = inp.flatten().astype(np.float32)
labels.resize(labels.shape[0] + 1, axis=0)
labels[labels.shape[0] - 1] = transcript
durations.resize(durations.shape[0] + 1, axis=0)
durations[durations.shape[0] - 1] = wav_file_size
if __name__ == '__main__':
main(args)
|
import argparse
import random
from datetime import timedelta
from operator import getitem
from os import listdir, makedirs, remove
from os.path import join, exists, getsize
import h5py
import librosa
import numpy as np
import pandas as pd
import soundfile as sf
from python_speech_features import mfcc
from scipy.io import wavfile
from tqdm import tqdm
from corpus.corpus import DeepSpeechCorpus
from util.audio_util import distort_audio
from util.corpus_util import get_corpus
from util.log_util import create_args_str
parser = argparse.ArgumentParser(description="""Export speech segments of corpus to CSV files and synthesize data""")
parser.add_argument('-id', type=str, required=True,
help='target-ID for processed files')
parser.add_argument('-s', '--source_dir', type=str, required=True,
help='id of corpus or path to corpus to export')
parser.add_argument('-t', '--target_dir', type=str, required=True,
help='target directory to save results')
parser.add_argument('-l', '--language', type=str, required=True,
help='language to use')
parser.add_argument('-f', '--force', action='store_true',
help='(optional) force override existing files. Default: False')
parser.add_argument('-x', '--synthesize', action='store_true',
help='whether to create synthesized data')
parser.add_argument('-num', '--include_numeric', action='store_true', default=False,
help='(optional) whether to include transcripts with numeric chars (default: False)')
parser.add_argument('-min', '--min_duration', nargs='?', type=int, default=0,
help='(optional) maximum number of speech segments minutes to process (default: all)')
parser.add_argument('-max', '--max_duration', nargs='?', type=int, default=0,
help='(optional) maximum number of speech segments minutes to process (default: all)')
parser.add_argument('-p', '--precompute_features', action='store_true',
help='(optional) precompute MFCC features in HDF5 format. Default: False')
args = parser.parse_args()
def main(args):
print(create_args_str(args))
target_dir, corpus_id, force, synthesize, min_dur, max_dur, precompute_features = setup(args)
corpus = get_corpus(args.source_dir, args.language)
corpus.summary()
print(f'processing {corpus.name} corpus and saving split segments in {target_dir}')
csv_train, csv_dev, csv_test = extract_segments(target_dir, corpus_id, corpus, synthesize, min_dur, max_dur, force)
print(f'done! All files are in {target_dir}')
corpus = DeepSpeechCorpus(args.language, csv_train, csv_dev, csv_test)
corpus.summary()
if precompute_features:
print(f'pre-computing features')
compute_features(csv_train, csv_dev, csv_test, target_dir, force)
def setup(args):
target_dir = join(args.target_dir, args.id)
if not exists(target_dir):
print(f'target directory {target_dir} does not exist. Creating...')
makedirs(target_dir)
force = args.force
if not force and listdir(target_dir):
inp = input(f"""
WARNING: target directory {target_dir} already exists. Override?
(this will overwrite all existing files in {target_dir} with the same names!!!) (Y/n)
""")
force = inp.lower() in ['', 'y']
return target_dir, args.id, force, args.synthesize, args.min_duration, args.max_duration, args.precompute_features
def extract_segments(target_dir, corpus_id, corpus, synthesize=False, min_dur=0, max_dur=0, force=False):
train_set = corpus.train_set(numeric=args.include_numeric)
dev_set = corpus.dev_set(numeric=args.include_numeric)
test_set = corpus.test_set(numeric=args.include_numeric)
print(f'training length is: {timedelta(seconds=sum(seg.duration for seg in train_set))}')
print(f'dev length is: {timedelta(seconds=sum(seg.duration for seg in dev_set))}')
print(f'test length is: {timedelta(seconds=sum(seg.duration for seg in test_set))}')
print(f'processing training segments')
csv_train = process_subset('train', train_set, synthesize, corpus_id, target_dir, min_dur, max_dur, force)
print(f'processing validation segments (data is only synthesized for training set)')
csv_dev = process_subset('dev', dev_set, False, corpus_id, target_dir, min_dur, max_dur, force)
print(f'processing validation segments (data is only synthesized for training set)')
csv_test = process_subset('test', test_set, False, corpus_id, target_dir, min_dur, max_dur, force)
return csv_train, csv_dev, csv_test
def process_subset(subset_id, subset, synthesize, corpus_id, target_dir, min_dur, max_dur, force):
df = split_speech_segments(subset, corpus_id, subset_id, target_dir, synthesize, min_dur, max_dur, force)
csv_path = join(target_dir, f'{corpus_id}-{subset_id}.csv')
print(f'saving metadata in {csv_path}')
df.to_csv(csv_path, index=False)
return csv_path
def split_speech_segments(subset, corpus_id, subset_id, target_dir, synthesize, min_dur, max_dur, force):
total = len(subset)
if max_dur:
print(f'trying to cap numer of speech segments to a total length of {max_dur} minutes. '
f'Speech segements will be sorted by length before capping.')
tot_duration = sum(s.duration for s in subset) / 60
if tot_duration < max_dur:
print(f'WARNING: maximum length of corpus was set to {max_dur} minutes, but total length of all '
f'speech segments is only {tot_duration} minutes! '
f'-> using all entries from corpus ({total} speech segments)')
else:
for i, s in enumerate(sorted(subset, key=lambda s: s.duration)):
if sum(s.duration for s in subset[:i]) > max_dur * 60:
break
print(f'total length of corpus will be capped at {max_dur} minutes ({i} speech segments)')
total = i
subset = subset[:i]
segments = []
files = []
sum_duration = 0
progress = tqdm(subset, total=total, unit=' speech segments')
for i, segment in enumerate(progress):
segment_id = f'{corpus_id}-{subset_id}-{i:0=4d}'
wav_path = f'{segment_id}.wav'
wav_path_absolute = join(target_dir, wav_path)
if not exists(wav_path_absolute) or not getsize(wav_path_absolute) or force:
sf.write(wav_path_absolute, segment.audio, segment.rate, subtype='PCM_16')
segments.append((segment_id, segment.audio, segment.rate, segment.transcript))
files.append((wav_path, getsize(wav_path_absolute), segment.duration, segment.transcript))
sum_duration += segment.duration
if synthesize:
audio, rate = librosa.load(wav_path_absolute, sr=16000, mono=True)
wav_shift = f'{segment_id}-shift.wav'
wav_echo = f'{segment_id}-echo.wav'
wav_high = f'{segment_id}-high.wav'
wav_low = f'{segment_id}-low.wav'
wav_fast = f'{segment_id}-fast.wav'
wav_slow = f'{segment_id}-slow.wav'
wav_loud = f'{segment_id}-loud.wav'
wav_quiet = f'{segment_id}-quiet.wav'
shift = random.uniform(0.5, 1.5)
wav_shift_path = join(target_dir, wav_shift)
wav_shift_len = synthesize_and_write(audio, rate, wav_shift_path, shift=shift, force=force)
files.append((wav_shift, getsize(wav_shift_path), wav_shift_len, segment.transcript))
echo = random.randint(30, 100)
wav_echo_path = join(target_dir, wav_echo)
wav_echo_len = synthesize_and_write(audio, rate, wav_echo_path, echo=echo, force=force)
files.append((wav_echo, getsize(wav_echo_path), wav_echo_len, segment.transcript))
higher = random.uniform(1.5, 5)
wav_high_path = join(target_dir, wav_high)
wav_high_len = synthesize_and_write(audio, rate, wav_high_path, pitch=higher, force=force)
files.append((wav_high, getsize(wav_high_path), wav_high_len, segment.transcript))
lower = random.uniform(-5, -1.5)
wav_low_path = join(target_dir, wav_low)
wav_low_len = synthesize_and_write(audio, rate, wav_low_path, pitch=lower, force=force)
files.append((wav_low, getsize(wav_low_path), wav_low_len, segment.transcript))
faster = random.uniform(1.2, 1.6)
wav_fast_path = join(target_dir, wav_fast)
wav_fast_len = synthesize_and_write(audio, rate, wav_fast_path, tempo=faster, force=force)
files.append((wav_fast, getsize(wav_fast_path), wav_fast_len, segment.transcript))
slower = random.uniform(0.6, 0.8)
wav_slow_path = join(target_dir, wav_slow)
wav_slow_len = synthesize_and_write(audio, rate, wav_slow_path, tempo=slower, force=force)
files.append((wav_slow, getsize(wav_slow_path), wav_slow_len, segment.transcript))
louder = random.randint(5, 15)
wav_loud_path = join(target_dir, wav_loud)
wav_loud_len = synthesize_and_write(audio, rate, wav_loud_path, volume=louder, force=force)
files.append((wav_loud, getsize(wav_loud_path), wav_loud_len, segment.transcript))
quieter = random.randint(-15, 5)
wav_quiet_path = join(target_dir, wav_quiet)
wav_quiet_len = synthesize_and_write(audio, rate, wav_quiet_path, volume=quieter, force=force)
files.append((wav_quiet, getsize(wav_quiet_path), wav_quiet_len, segment.transcript))
description = wav_path
if max_dur:
description += f' {timedelta(seconds=sum_duration)}'
progress.set_description(description)
if max_dur and sum_duration > max_dur * 60:
break
sum_duration = sum(getitem(t, 2) for t in files)
if synthesize or min_dur and sum_duration < min_dur * 60 or max_dur and sum_duration < max_dur * 60:
print(f'total length: {timedelta(seconds=sum_duration)}')
print(f'filling up with distorted data until {timedelta(minutes=1000)} is reached')
i = 0
while sum_duration < 1000 * 60:
i += 1
for segment_id, audio, rate, transcript in tqdm(segments, unit=' segments'):
shift = random.uniform(0.5, 1.5)
pitch = random.uniform(-5, 5)
tempo = random.uniform(0.6, 1.6)
volume = random.randint(-15, 15)
echo = random.randint(30, 100)
wav_distort = f'{segment_id}-distorted-{i}.wav'
wav_distort_path = join(target_dir, wav_distort)
wav_distort_len = synthesize_and_write(audio, rate, wav_distort_path, shift=shift, pitch=pitch,
tempo=tempo, volume=volume, echo=echo, force=force)
files.append((wav_distort, getsize(wav_distort_path), wav_distort_len, transcript))
sum_duration += wav_distort_len
if sum_duration > 1000 * 60:
break
print(f'total length: {timedelta(seconds=sum_duration)}')
return pd.DataFrame(data=files, columns=['wav_filename', 'wav_filesize', 'wav_length', 'transcript']).sort_values(
'wav_length')
def synthesize_and_write(audio, rate, wav_path, shift=0, pitch=0, tempo=1, volume=0, echo=0, force=False):
audio_synth = distort_audio(audio, rate,
shift_s=shift,
pitch_factor=pitch,
tempo_factor=tempo,
volume=volume,
echo=echo)
if not exists(wav_path) or not getsize(wav_path) or force:
sf.write(wav_path, audio_synth, rate, subtype='PCM_16')
return len(audio_synth) / rate
def compute_features(csv_train, csv_valid, csv_test, target_dir, force):
df_train = pd.read_csv(csv_train)
df_dev = pd.read_csv(csv_valid)
df_test = pd.read_csv(csv_test)
h5_file_path = join(target_dir, 'features_mfcc.h5')
if exists(h5_file_path) and force:
remove(h5_file_path)
if not exists(h5_file_path):
with h5py.File(h5_file_path) as h5_file:
create_subset(h5_file, 'train', df_train)
create_subset(h5_file, 'test', df_dev)
create_subset(h5_file, 'valid', df_test)
def create_subset(h5_file, name, df):
h5_file.create_dataset(f'{name}/features', shape=(0,), maxshape=(None,), dtype=h5py.special_dtype(vlen=np.float32))
h5_file.create_dataset(f'{name}/labels', shape=(0,), maxshape=(None,), dtype=h5py.special_dtype(vlen=str))
h5_file.create_dataset(f'{name}/durations', shape=(0,), maxshape=(None,))
progress = tqdm(zip(df['wav_filename'], df['wav_filesize'], df['transcript']), total=len(df.index))
for wav_file_path, wav_file_size, transcript in progress:
progress.set_description(f'{name}: {wav_file_path}')
inputs = h5_file[name]['features']
labels = h5_file[name]['labels']
durations = h5_file[name]['durations']
rate, audio = wavfile.read(wav_file_path)
inp = mfcc(audio, samplerate=rate, numcep=26) # (num_timesteps x num_features)
inputs.resize(inputs.shape[0] + 1, axis=0)
inputs[inputs.shape[0] - 1] = inp.flatten().astype(np.float32)
labels.resize(labels.shape[0] + 1, axis=0)
labels[labels.shape[0] - 1] = transcript
durations.resize(durations.shape[0] + 1, axis=0)
durations[durations.shape[0] - 1] = wav_file_size
if __name__ == '__main__':
main(args)
|
en
| 0.660585
|
Export speech segments of corpus to CSV files and synthesize data WARNING: target directory {target_dir} already exists. Override? (this will overwrite all existing files in {target_dir} with the same names!!!) (Y/n) # (num_timesteps x num_features)
| 2.161662
| 2
|
chj/pyserver/flask_app.py
|
kestreltechnology/CodeHawk-Java
| 0
|
6626154
|
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: <NAME>
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import json
import os
import traceback
import xml.etree.ElementTree as ET
from flask import Flask, render_template, render_template_string, jsonify, request, Response, Markup
import chj.util.fileutil as UF
import chj.util.xmlutil as UX
import chj.index.AppAccess as AP
import chj.util.dotutil as UD
import chj.util.svgutil as UG
import chj.util.analysisutil as UA
from chj.index.TaintGraph import TaintGraph
from chj.reporting.BytecodeReport import BytecodeReport
from chj.reporting.BranchConditions import BranchConditions
from chj.reporting.CostSummary import CostSummary
from chj.reporting.ExceptionHandlers import ExceptionHandlers
from chj.reporting.LoopSummary import LoopSummary
from chj.reporting.Recursion import Recursion
from chj.reporting.StaticFields import StaticFields
from chj.reporting.TaintOrigins import TaintOrigins
from typing import Any, Dict, List, Tuple, Union, TYPE_CHECKING
if TYPE_CHECKING:
from chj.app.JavaClass import JavaClass
from chj.app.JavaMethod import JavaMethod
# ======================================================================
# Rest API
# ======================================================================
app = Flask(__name__)
@app.route('/')
def index() -> str:
return render_template('index.html')
@app.route('/loadprojects')
def loadprojects() -> Response:
result: Dict[str, Dict[str, Any]] = {}
result['meta'] = {}
try:
projects: Dict[str, List[str]] = {}
appfile = UF.get_engagements_data_file()
if not appfile is None:
for e in sorted(appfile):
if not e in projects: projects[e] = []
for app in sorted(appfile[e]['apps']):
projects[e].append(app)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = projects
return jsonify(result)
@app.route('/branches/<engagement>/<project>')
def loadbranches(engagement: str, project: str) -> Response:
result: Dict[str, Dict[str, Any]] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
branchsummary = BranchConditions(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = branchsummary
return jsonify(result)
@app.route('/costs/<engagement>/<project>')
def loadcosts(engagement: str, project: str) -> Response:
result: Dict[str, Dict[str, Any]] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
costreport = CostSummary(app)
costsummary = costreport.as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = costsummary
return jsonify(result)
@app.route('/exceptions/<engagement>/<project>')
def loadexceptions(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
exceptionreport = ExceptionHandlers(app)
exceptionsummary = exceptionreport.as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = exceptionsummary
return jsonify(result)
@app.route('/loops/<engagement>/<project>')
def loadloops(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
loopsummary = LoopSummary(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = loopsummary
return jsonify(result)
@app.route('/project/<engagement>/<project>')
def loadproject(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
classes = {}
def f(myclass: "JavaClass") -> None:
classes[myclass.get_name()] = str(myclass.cnix)
app.iter_classes(f)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = classes
return jsonify(result)
@app.route('/strings/<engagement>/<project>')
def loadstrings(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
strings = app.get_loaded_strings()
stringsummary = {}
for (cmsix, methodresults) in sorted(strings):
if len(methodresults) == 0: continue
methodname = str(app.jd.get_cms(cmsix).get_aqname())
methodstrings: Dict[str, Any] = {}
methodstrings['name'] = methodname
methodstrings['pcs'] = {}
for (pc, instr) in sorted(methodresults):
methodstrings['pcs'][pc] = instr
stringsummary[cmsix] = methodstrings
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = stringsummary
return jsonify(result)
@app.route('/recursive/<engagement>/<project>')
def loadrecursive(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
recursionsummary = Recursion(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = recursionsummary
return jsonify(result)
@app.route('/reflective/<engagement>/<project>')
def loadreflective(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
reflective_names = [
"forName",
"getDeclaredClassed",
"getDeclaredConstructors",
"getDeclaredField",
"getDeclaredFields",
"getDeclaredMethod",
"getDeclaredMethods",
"getField",
"getFields",
"getMethod",
"getMethods"
]
methods = []
def f(cmsix: int, m: "JavaMethod") -> None:
for n in reflective_names:
methods.append((cmsix,m.get_named_method_calls(n)))
app.iter_methods(f)
reflectionsummary: Dict[int, Any] = {}
for (cmsix,mmethodcalls) in methods:
if len(mmethodcalls) > 0:
name = app.jd.get_cms(cmsix).get_aqname()
pcs = {}
for (pc,i) in mmethodcalls:
pcs[pc] = str(i)
if not cmsix in reflectionsummary:
reflectionsummary[cmsix] = {}
reflectionsummary[cmsix]['name'] = name
reflectionsummary[cmsix]['pcs'] = pcs
else:
reflectionsummary[cmsix]['pcs'].update(pcs)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = reflectionsummary
return jsonify(result)
@app.route('/staticfieldinits/<engagement>/<project>')
def loadstaticfieldinits(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
sfsummary = StaticFields(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = sfsummary
return jsonify(result)
@app.route('/taintorigins/<engagement>/<project>')
def loadtaintorigins(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
taintsummary = TaintOrigins(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = taintsummary
return jsonify(result)
@app.route('/taint/<engagement>/<project>/<index>', methods=['GET', 'POST'])
def loadtaintgraph(engagement: str, project: str, index: str) -> Union[str, Dict[str, Any]]:
result: Dict[str, Any] = {}
result['meta'] = {}
loops = False
sink = None
try:
title = engagement + ":" + project + ":" + index
app = load_engagement_app(engagement, project)
if app.jd.ttd is None:
raise UF.CHJError('Taint analysis results do not exist! Please create them first')
name = str(app.jd.ttd.get_taint_origin(int(index)))
new_app = UA.analyze_taint_propagation(project, int(index))
if new_app is not None:
app = new_app
if request.method == 'POST':
req = request.form
loops = True if 'loops' in req else False
sink = req['sinkid'] if 'sinkid' in req else None
taintgraph = TaintGraph(app, project, index, loops=loops, sink=sink)
dotgraph = taintgraph.as_dot(index)
svggraph = UG.get_svg(app.path, dotgraph)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
if request.method == 'GET':
template = render_template('taint.html', title=title, body=Markup(svg), name=name,
eng=engagement, proj=project, index=index)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
return result
else:
if request.method == 'GET':
return template
elif request.method == 'POST':
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = Markup(svg)
return result
else:
raise UF.CHJError("Unknown Methods Parameter. Options are \"Get\" and \"Post\"")
def load_engagement_app(engagement: str, project: str) -> AP.AppAccess:
(path, jars) = UF.get_engagement_app_data(project)
UF.check_analysisdir(path)
app = AP.AppAccess(path)
return app
def get_method_body(engagement: str, project: str, cmsix: int) -> Tuple[str, str]:
app = load_engagement_app(engagement, project)
mname = app.get_method(int(cmsix)).get_qname()
bytecodereport = BytecodeReport(app, int(cmsix)).as_list()
body = ET.tostring(mk_method_code_table(bytecodereport),
encoding='unicode', method='html')
return (mname, body)
@app.route('/method/<engagement>/<project>/<cmsix>')
def load_method(engagement: str, project: str, cmsix: str) -> Union[str, Response]:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
(mname, body) = get_method_body(engagement, project, int(cmsix))
title = engagement + ":" + project + ":" + cmsix
template = render_template('method.html', title=title, body=Markup(body), name=mname,
eng=engagement, proj=project, index=cmsix)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
return jsonify(result)
else:
return template
@app.route('/class/<engagement>/<project>/<cnix>')
def load_bytecode(engagement: str, project: str, cnix: str) -> Union[str, Response]:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cname = app.get_class(int(cnix)).get_qname()
bytecode = app.get_class(int(cnix)).as_dictionary()
body = Markup(ET.tostring(mk_class_code_table(bytecode, engagement, project),
encoding='unicode', method='html'))
title = engagement + ":" + project + ":" + cnix
template = render_template('class.html', title=title, body=body, name=cname,
eng=engagement, proj=project, index=cnix)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
return jsonify(result)
else:
return template
@app.route('/methodcg/<engagement>/<project>/<cmsix>')
def load_method_cg(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cg = app.get_callgraph()
(nodes, dotgraph) = cg.as_dot(int(cmsix))
svggraph = UG.get_svg(app.path, dotgraph)
UG.append_cmsixs(svggraph, nodes)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
@app.route('/methodrevcg/<engagement>/<project>/<cmsix>')
def load_method_rev_cg(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
revcg = app.get_callgraph()
dotgraph = revcg.as_rev_dot(int(cmsix))
svggraph = UG.get_svg(app.path, dotgraph)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
@app.route('/methodcfg/<engagement>/<project>/<cmsix>')
def load_method_cfg(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cfg = app.get_method(int(cmsix)).get_cfg()
(nodes, dotgraph) = cfg.as_dot()
svggraph = UG.get_svg(app.path, dotgraph)
loop_levels = cfg.get_loop_level_counts()
UG.append_pcs(svggraph, nodes)
UG.append_loop_levels(svggraph, loop_levels)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
@app.route('/methodcfgcost/<engagement>/<project>/<cmsix>')
def load_method_cfg_cost(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cfg = app.get_method(int(cmsix)).get_cfg()
methodcost = app.get_costmodel().get_method_cost(int(cmsix))
(nodes, dotgraph) = cfg.as_dot(methodcost=methodcost)
svggraph = UG.get_svg(app.path, dotgraph)
loop_levels = cfg.get_loop_level_counts()
UG.append_pcs(svggraph, nodes)
UG.append_loop_levels(svggraph, loop_levels)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
@app.route('/methodsimplecfgcost/<engagement>/<project>/<cmsix>')
def load_method_simple_cfg_cost(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cfg = app.get_method(int(cmsix)).get_cfg()
methodcost = app.get_costmodel().get_method_cost(int(cmsix))
(nodes, dotgraph) = cfg.as_dot(methodcost=methodcost,simplecost=True)
svggraph = UG.get_svg(app.path, dotgraph)
loop_levels = cfg.get_loop_level_counts()
UG.append_pcs(svggraph, nodes)
UG.append_loop_levels(svggraph, loop_levels)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
#@<EMAIL>.route('/', defaults={'path': ''})
#@app.route('/<path:path>')
#def catch_all(path):
# result = {}
# result['meta'] = {}
# result['meta']['status'] = 'fail'
# result['meta']
def mk_class_code_table(f: Dict[str, Dict[str, Any]],
engagement: str,
project: str) -> ET.Element:
table = ET.Element('div')
table.set('id','codetable')
mt = ET.Element('table')
mt.set('class', 'methodtable balanced')
headerrow = mk_header(['pc', 'instruction'])
mt.append(headerrow)
for cmsix in f:
mtr = ET.Element('tr')
mdname = ET.Element('td')
if len(f[cmsix]['result']) == 0:
mdname.text = f[cmsix]['methodstring']
else:
#mta = ET.Element('a')
#mta.text = f[cmsix]['methodstring']
mdname.text = f[cmsix]['methodstring']
mdname.set('cmsix', cmsix)
mdname.set('name', 'method')
#linktxt = '/method/' + engagement + '/' + project + '/' + cmsix
#mta.set('href', linktxt)
#mta.set('target','_blank')
#mdname.append(mta)
mdname.set('colSpan', '2')
mdname.set('style', 'border-style:none;')
mtr.extend( [ mdname ] )
mt.append(mtr)
for instr in f[cmsix]['result']:
mtr = ET.Element('tr')
tdindex = ET.Element('td')
tdindex.text = instr[0]
tdopcode = ET.Element('td')
tdopcode.text = instr[1]
mtr.extend([ tdindex, tdopcode ])
mt.append(mtr)
mtr = ET.Element('tr')
empty = ET.Element('td')
empty.text = u'\xa0'
empty.set('colSpan', '2')
empty.set('style', 'border-style:none;')
mtr.append(empty)
mt.append(mtr)
table.append(mt)
return table
def mk_header(labels: List[str]) -> ET.Element:
headerrow = ET.Element('tr')
for label in labels:
header = ET.Element('th')
button = ET.Element('button')
button.text = label
header.append(button)
headerrow.append(header)
return headerrow
def mk_method_code_table(f: List[List[str]]) -> ET.Element:
table = ET.Element('div')
table.set('id', 'codetable')
mt = ET.Element('table')
mt.set('class','methodtable balanced')
headerrow = mk_header(['pc', 'instruction'])
mt.append(headerrow)
for line in f:
mtr = ET.Element('tr')
tdpc = ET.Element('td')
tdpc.text = line[0]
tdinstr = ET.Element('td')
tdinstr.text = line[1]
mtr.extend([ tdpc, tdinstr ])
mt.append(mtr)
table.append(mt)
return table
"""
def mk_display_body(mk_table, report):
body = ET.Element('body')
mainpage = ET.Element('div')
mainpage.set('id','mainpage')
header = ET.Element('header')
header.text = 'CodeHawk Java Analyzer'
nav = ET.Element('nav')
navdiv = ET.Element('div')
navul = ET.Element('ul')
navlihome = ET.Element('li')
navlihome.text = 'HOME'
navul.append(navlihome)
navdiv.append(navul)
nav.append(navdiv)
codetable = mk_table(report)
footer = ET.Element('footer')
footer.text = '© 2019-2020, Kestrel Technology, LLC, Palo Alto, CA 94304'
mainpage.extend([ header, nav, codetable, footer ])
body.append(mainpage)
return body
"""
|
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: <NAME>
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import json
import os
import traceback
import xml.etree.ElementTree as ET
from flask import Flask, render_template, render_template_string, jsonify, request, Response, Markup
import chj.util.fileutil as UF
import chj.util.xmlutil as UX
import chj.index.AppAccess as AP
import chj.util.dotutil as UD
import chj.util.svgutil as UG
import chj.util.analysisutil as UA
from chj.index.TaintGraph import TaintGraph
from chj.reporting.BytecodeReport import BytecodeReport
from chj.reporting.BranchConditions import BranchConditions
from chj.reporting.CostSummary import CostSummary
from chj.reporting.ExceptionHandlers import ExceptionHandlers
from chj.reporting.LoopSummary import LoopSummary
from chj.reporting.Recursion import Recursion
from chj.reporting.StaticFields import StaticFields
from chj.reporting.TaintOrigins import TaintOrigins
from typing import Any, Dict, List, Tuple, Union, TYPE_CHECKING
if TYPE_CHECKING:
from chj.app.JavaClass import JavaClass
from chj.app.JavaMethod import JavaMethod
# ======================================================================
# Rest API
# ======================================================================
app = Flask(__name__)
@app.route('/')
def index() -> str:
return render_template('index.html')
@app.route('/loadprojects')
def loadprojects() -> Response:
result: Dict[str, Dict[str, Any]] = {}
result['meta'] = {}
try:
projects: Dict[str, List[str]] = {}
appfile = UF.get_engagements_data_file()
if not appfile is None:
for e in sorted(appfile):
if not e in projects: projects[e] = []
for app in sorted(appfile[e]['apps']):
projects[e].append(app)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = projects
return jsonify(result)
@app.route('/branches/<engagement>/<project>')
def loadbranches(engagement: str, project: str) -> Response:
result: Dict[str, Dict[str, Any]] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
branchsummary = BranchConditions(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = branchsummary
return jsonify(result)
@app.route('/costs/<engagement>/<project>')
def loadcosts(engagement: str, project: str) -> Response:
result: Dict[str, Dict[str, Any]] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
costreport = CostSummary(app)
costsummary = costreport.as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = costsummary
return jsonify(result)
@app.route('/exceptions/<engagement>/<project>')
def loadexceptions(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
exceptionreport = ExceptionHandlers(app)
exceptionsummary = exceptionreport.as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = exceptionsummary
return jsonify(result)
@app.route('/loops/<engagement>/<project>')
def loadloops(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
loopsummary = LoopSummary(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = loopsummary
return jsonify(result)
@app.route('/project/<engagement>/<project>')
def loadproject(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
classes = {}
def f(myclass: "JavaClass") -> None:
classes[myclass.get_name()] = str(myclass.cnix)
app.iter_classes(f)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = classes
return jsonify(result)
@app.route('/strings/<engagement>/<project>')
def loadstrings(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
strings = app.get_loaded_strings()
stringsummary = {}
for (cmsix, methodresults) in sorted(strings):
if len(methodresults) == 0: continue
methodname = str(app.jd.get_cms(cmsix).get_aqname())
methodstrings: Dict[str, Any] = {}
methodstrings['name'] = methodname
methodstrings['pcs'] = {}
for (pc, instr) in sorted(methodresults):
methodstrings['pcs'][pc] = instr
stringsummary[cmsix] = methodstrings
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = stringsummary
return jsonify(result)
@app.route('/recursive/<engagement>/<project>')
def loadrecursive(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
recursionsummary = Recursion(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = recursionsummary
return jsonify(result)
@app.route('/reflective/<engagement>/<project>')
def loadreflective(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
reflective_names = [
"forName",
"getDeclaredClassed",
"getDeclaredConstructors",
"getDeclaredField",
"getDeclaredFields",
"getDeclaredMethod",
"getDeclaredMethods",
"getField",
"getFields",
"getMethod",
"getMethods"
]
methods = []
def f(cmsix: int, m: "JavaMethod") -> None:
for n in reflective_names:
methods.append((cmsix,m.get_named_method_calls(n)))
app.iter_methods(f)
reflectionsummary: Dict[int, Any] = {}
for (cmsix,mmethodcalls) in methods:
if len(mmethodcalls) > 0:
name = app.jd.get_cms(cmsix).get_aqname()
pcs = {}
for (pc,i) in mmethodcalls:
pcs[pc] = str(i)
if not cmsix in reflectionsummary:
reflectionsummary[cmsix] = {}
reflectionsummary[cmsix]['name'] = name
reflectionsummary[cmsix]['pcs'] = pcs
else:
reflectionsummary[cmsix]['pcs'].update(pcs)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = reflectionsummary
return jsonify(result)
@app.route('/staticfieldinits/<engagement>/<project>')
def loadstaticfieldinits(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
sfsummary = StaticFields(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = sfsummary
return jsonify(result)
@app.route('/taintorigins/<engagement>/<project>')
def loadtaintorigins(engagement: str, project: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
taintsummary = TaintOrigins(app).as_dictionary()
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = taintsummary
return jsonify(result)
@app.route('/taint/<engagement>/<project>/<index>', methods=['GET', 'POST'])
def loadtaintgraph(engagement: str, project: str, index: str) -> Union[str, Dict[str, Any]]:
result: Dict[str, Any] = {}
result['meta'] = {}
loops = False
sink = None
try:
title = engagement + ":" + project + ":" + index
app = load_engagement_app(engagement, project)
if app.jd.ttd is None:
raise UF.CHJError('Taint analysis results do not exist! Please create them first')
name = str(app.jd.ttd.get_taint_origin(int(index)))
new_app = UA.analyze_taint_propagation(project, int(index))
if new_app is not None:
app = new_app
if request.method == 'POST':
req = request.form
loops = True if 'loops' in req else False
sink = req['sinkid'] if 'sinkid' in req else None
taintgraph = TaintGraph(app, project, index, loops=loops, sink=sink)
dotgraph = taintgraph.as_dot(index)
svggraph = UG.get_svg(app.path, dotgraph)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
if request.method == 'GET':
template = render_template('taint.html', title=title, body=Markup(svg), name=name,
eng=engagement, proj=project, index=index)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
return result
else:
if request.method == 'GET':
return template
elif request.method == 'POST':
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = Markup(svg)
return result
else:
raise UF.CHJError("Unknown Methods Parameter. Options are \"Get\" and \"Post\"")
def load_engagement_app(engagement: str, project: str) -> AP.AppAccess:
(path, jars) = UF.get_engagement_app_data(project)
UF.check_analysisdir(path)
app = AP.AppAccess(path)
return app
def get_method_body(engagement: str, project: str, cmsix: int) -> Tuple[str, str]:
app = load_engagement_app(engagement, project)
mname = app.get_method(int(cmsix)).get_qname()
bytecodereport = BytecodeReport(app, int(cmsix)).as_list()
body = ET.tostring(mk_method_code_table(bytecodereport),
encoding='unicode', method='html')
return (mname, body)
@app.route('/method/<engagement>/<project>/<cmsix>')
def load_method(engagement: str, project: str, cmsix: str) -> Union[str, Response]:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
(mname, body) = get_method_body(engagement, project, int(cmsix))
title = engagement + ":" + project + ":" + cmsix
template = render_template('method.html', title=title, body=Markup(body), name=mname,
eng=engagement, proj=project, index=cmsix)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
return jsonify(result)
else:
return template
@app.route('/class/<engagement>/<project>/<cnix>')
def load_bytecode(engagement: str, project: str, cnix: str) -> Union[str, Response]:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cname = app.get_class(int(cnix)).get_qname()
bytecode = app.get_class(int(cnix)).as_dictionary()
body = Markup(ET.tostring(mk_class_code_table(bytecode, engagement, project),
encoding='unicode', method='html'))
title = engagement + ":" + project + ":" + cnix
template = render_template('class.html', title=title, body=body, name=cname,
eng=engagement, proj=project, index=cnix)
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
return jsonify(result)
else:
return template
@app.route('/methodcg/<engagement>/<project>/<cmsix>')
def load_method_cg(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cg = app.get_callgraph()
(nodes, dotgraph) = cg.as_dot(int(cmsix))
svggraph = UG.get_svg(app.path, dotgraph)
UG.append_cmsixs(svggraph, nodes)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
@app.route('/methodrevcg/<engagement>/<project>/<cmsix>')
def load_method_rev_cg(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
revcg = app.get_callgraph()
dotgraph = revcg.as_rev_dot(int(cmsix))
svggraph = UG.get_svg(app.path, dotgraph)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
@app.route('/methodcfg/<engagement>/<project>/<cmsix>')
def load_method_cfg(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cfg = app.get_method(int(cmsix)).get_cfg()
(nodes, dotgraph) = cfg.as_dot()
svggraph = UG.get_svg(app.path, dotgraph)
loop_levels = cfg.get_loop_level_counts()
UG.append_pcs(svggraph, nodes)
UG.append_loop_levels(svggraph, loop_levels)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
@app.route('/methodcfgcost/<engagement>/<project>/<cmsix>')
def load_method_cfg_cost(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cfg = app.get_method(int(cmsix)).get_cfg()
methodcost = app.get_costmodel().get_method_cost(int(cmsix))
(nodes, dotgraph) = cfg.as_dot(methodcost=methodcost)
svggraph = UG.get_svg(app.path, dotgraph)
loop_levels = cfg.get_loop_level_counts()
UG.append_pcs(svggraph, nodes)
UG.append_loop_levels(svggraph, loop_levels)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
@app.route('/methodsimplecfgcost/<engagement>/<project>/<cmsix>')
def load_method_simple_cfg_cost(engagement: str, project: str, cmsix: str) -> Response:
result: Dict[str, Any] = {}
result['meta'] = {}
try:
app = load_engagement_app(engagement, project)
cfg = app.get_method(int(cmsix)).get_cfg()
methodcost = app.get_costmodel().get_method_cost(int(cmsix))
(nodes, dotgraph) = cfg.as_dot(methodcost=methodcost,simplecost=True)
svggraph = UG.get_svg(app.path, dotgraph)
loop_levels = cfg.get_loop_level_counts()
UG.append_pcs(svggraph, nodes)
UG.append_loop_levels(svggraph, loop_levels)
svg = ET.tostring(svggraph.getroot(), encoding='unicode', method='html')
except Exception as e:
result['meta']['status'] = 'fail'
result['meta']['reason'] = str(e)
traceback.print_exc()
else:
result['meta']['status'] = 'ok'
result['content'] = {}
result['content']['svg'] = svg
return jsonify(result)
#@<EMAIL>.route('/', defaults={'path': ''})
#@app.route('/<path:path>')
#def catch_all(path):
# result = {}
# result['meta'] = {}
# result['meta']['status'] = 'fail'
# result['meta']
def mk_class_code_table(f: Dict[str, Dict[str, Any]],
engagement: str,
project: str) -> ET.Element:
table = ET.Element('div')
table.set('id','codetable')
mt = ET.Element('table')
mt.set('class', 'methodtable balanced')
headerrow = mk_header(['pc', 'instruction'])
mt.append(headerrow)
for cmsix in f:
mtr = ET.Element('tr')
mdname = ET.Element('td')
if len(f[cmsix]['result']) == 0:
mdname.text = f[cmsix]['methodstring']
else:
#mta = ET.Element('a')
#mta.text = f[cmsix]['methodstring']
mdname.text = f[cmsix]['methodstring']
mdname.set('cmsix', cmsix)
mdname.set('name', 'method')
#linktxt = '/method/' + engagement + '/' + project + '/' + cmsix
#mta.set('href', linktxt)
#mta.set('target','_blank')
#mdname.append(mta)
mdname.set('colSpan', '2')
mdname.set('style', 'border-style:none;')
mtr.extend( [ mdname ] )
mt.append(mtr)
for instr in f[cmsix]['result']:
mtr = ET.Element('tr')
tdindex = ET.Element('td')
tdindex.text = instr[0]
tdopcode = ET.Element('td')
tdopcode.text = instr[1]
mtr.extend([ tdindex, tdopcode ])
mt.append(mtr)
mtr = ET.Element('tr')
empty = ET.Element('td')
empty.text = u'\xa0'
empty.set('colSpan', '2')
empty.set('style', 'border-style:none;')
mtr.append(empty)
mt.append(mtr)
table.append(mt)
return table
def mk_header(labels: List[str]) -> ET.Element:
headerrow = ET.Element('tr')
for label in labels:
header = ET.Element('th')
button = ET.Element('button')
button.text = label
header.append(button)
headerrow.append(header)
return headerrow
def mk_method_code_table(f: List[List[str]]) -> ET.Element:
table = ET.Element('div')
table.set('id', 'codetable')
mt = ET.Element('table')
mt.set('class','methodtable balanced')
headerrow = mk_header(['pc', 'instruction'])
mt.append(headerrow)
for line in f:
mtr = ET.Element('tr')
tdpc = ET.Element('td')
tdpc.text = line[0]
tdinstr = ET.Element('td')
tdinstr.text = line[1]
mtr.extend([ tdpc, tdinstr ])
mt.append(mtr)
table.append(mt)
return table
"""
def mk_display_body(mk_table, report):
body = ET.Element('body')
mainpage = ET.Element('div')
mainpage.set('id','mainpage')
header = ET.Element('header')
header.text = 'CodeHawk Java Analyzer'
nav = ET.Element('nav')
navdiv = ET.Element('div')
navul = ET.Element('ul')
navlihome = ET.Element('li')
navlihome.text = 'HOME'
navul.append(navlihome)
navdiv.append(navul)
nav.append(navdiv)
codetable = mk_table(report)
footer = ET.Element('footer')
footer.text = '© 2019-2020, Kestrel Technology, LLC, Palo Alto, CA 94304'
mainpage.extend([ header, nav, codetable, footer ])
body.append(mainpage)
return body
"""
|
en
| 0.425344
|
# ------------------------------------------------------------------------------ # CodeHawk Binary Analyzer # Author: <NAME> # ------------------------------------------------------------------------------ # The MIT License (MIT) # # Copyright (c) 2016-2020 Kestrel Technology LLC # Copyright (c) 2021 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ # ====================================================================== # Rest API # ====================================================================== #@<EMAIL>.route('/', defaults={'path': ''}) #@app.route('/<path:path>') #def catch_all(path): # result = {} # result['meta'] = {} # result['meta']['status'] = 'fail' # result['meta'] #mta = ET.Element('a') #mta.text = f[cmsix]['methodstring'] #linktxt = '/method/' + engagement + '/' + project + '/' + cmsix #mta.set('href', linktxt) #mta.set('target','_blank') #mdname.append(mta) def mk_display_body(mk_table, report): body = ET.Element('body') mainpage = ET.Element('div') mainpage.set('id','mainpage') header = ET.Element('header') header.text = 'CodeHawk Java Analyzer' nav = ET.Element('nav') navdiv = ET.Element('div') navul = ET.Element('ul') navlihome = ET.Element('li') navlihome.text = 'HOME' navul.append(navlihome) navdiv.append(navul) nav.append(navdiv) codetable = mk_table(report) footer = ET.Element('footer') footer.text = '© 2019-2020, Kestrel Technology, LLC, Palo Alto, CA 94304' mainpage.extend([ header, nav, codetable, footer ]) body.append(mainpage) return body
| 1.353099
| 1
|
scripts/create_icons.py
|
tdmorello/imagecatalog
| 0
|
6626155
|
"""Script to create a set of images."""
import random
from pathlib import Path
from random import randrange
from typing import List, Tuple, Union
from PIL import Image, ImageDraw
PathLike = Union[str, bytes, Path]
def make_random_image(im_size, single_color=None):
def get_random_hex():
return f"#{randrange(0x1000000):06x}"
def get_random_pixel():
return (randrange(0, 256), randrange(0, 256), randrange(0, 256))
if single_color:
if not isinstance(single_color, str):
single_color = get_random_hex()
im = Image.new("RGB", im_size, single_color)
else:
data = [get_random_pixel() for _ in range(im_size[0] * im_size[1])]
im = Image.new("RGB", im_size)
im.putdata(data)
return im
def make_sample_images(n, im_size=(128, 128), single_color=True):
return [make_random_image(im_size, single_color) for _ in range(n)]
def make_sample_files(
dir, n, ext=".jpg", im_size=(128, 128), single_color=True, namestamp=True
):
images = [im for im in make_sample_images(n, im_size, single_color)]
def save_image(fpath, im):
if namestamp:
draw = ImageDraw.Draw(im)
draw.text((10, 10), Path(fpath).name)
im.save(fpath)
return fpath
return [
save_image(Path(dir) / f"image_{i:02}{ext}", im) for i, im in enumerate(images)
]
if __name__ == "__main__":
images = make_sample_files("images", 12, ".png", single_color="green")
with open("images/sample.csv", "w") as fp:
fp.write("image,label,note\n")
for i, fpath in enumerate(images):
fp.write(f"{fpath},Image {i},image {i} note\n")
|
"""Script to create a set of images."""
import random
from pathlib import Path
from random import randrange
from typing import List, Tuple, Union
from PIL import Image, ImageDraw
PathLike = Union[str, bytes, Path]
def make_random_image(im_size, single_color=None):
def get_random_hex():
return f"#{randrange(0x1000000):06x}"
def get_random_pixel():
return (randrange(0, 256), randrange(0, 256), randrange(0, 256))
if single_color:
if not isinstance(single_color, str):
single_color = get_random_hex()
im = Image.new("RGB", im_size, single_color)
else:
data = [get_random_pixel() for _ in range(im_size[0] * im_size[1])]
im = Image.new("RGB", im_size)
im.putdata(data)
return im
def make_sample_images(n, im_size=(128, 128), single_color=True):
return [make_random_image(im_size, single_color) for _ in range(n)]
def make_sample_files(
dir, n, ext=".jpg", im_size=(128, 128), single_color=True, namestamp=True
):
images = [im for im in make_sample_images(n, im_size, single_color)]
def save_image(fpath, im):
if namestamp:
draw = ImageDraw.Draw(im)
draw.text((10, 10), Path(fpath).name)
im.save(fpath)
return fpath
return [
save_image(Path(dir) / f"image_{i:02}{ext}", im) for i, im in enumerate(images)
]
if __name__ == "__main__":
images = make_sample_files("images", 12, ".png", single_color="green")
with open("images/sample.csv", "w") as fp:
fp.write("image,label,note\n")
for i, fpath in enumerate(images):
fp.write(f"{fpath},Image {i},image {i} note\n")
|
en
| 0.723258
|
Script to create a set of images.
| 3.430656
| 3
|
docs/extensions/promises.py
|
stephendwolff/django-localflavor
| 0
|
6626156
|
<gh_stars>0
try:
from django.utils.encoding import force_unicode
except ImportError:
# We are on Python 3, force_text is force_unicode.
from django.utils.encoding import force_text as force_unicode
from django.utils.functional import Promise
from sphinx.util.inspect import object_description
list_or_tuple = lambda x: isinstance(x, (tuple, list))
def lazy_repr(obj):
if list_or_tuple(obj):
values = []
for item in obj:
values.append(lazy_repr(item))
if isinstance(obj, tuple):
values = tuple(values)
return values
elif isinstance(obj, dict):
values = {}
for key, value in obj.items():
values[lazy_repr(key)] = lazy_repr(value)
return values
else:
if isinstance(obj, Promise):
obj = force_unicode(obj)
return obj
def setup(app):
from sphinx.ext import autodoc
def lazy_safe_repr(object):
return object_description(lazy_repr(object))
autodoc.safe_repr = lazy_safe_repr # noqa
|
try:
from django.utils.encoding import force_unicode
except ImportError:
# We are on Python 3, force_text is force_unicode.
from django.utils.encoding import force_text as force_unicode
from django.utils.functional import Promise
from sphinx.util.inspect import object_description
list_or_tuple = lambda x: isinstance(x, (tuple, list))
def lazy_repr(obj):
if list_or_tuple(obj):
values = []
for item in obj:
values.append(lazy_repr(item))
if isinstance(obj, tuple):
values = tuple(values)
return values
elif isinstance(obj, dict):
values = {}
for key, value in obj.items():
values[lazy_repr(key)] = lazy_repr(value)
return values
else:
if isinstance(obj, Promise):
obj = force_unicode(obj)
return obj
def setup(app):
from sphinx.ext import autodoc
def lazy_safe_repr(object):
return object_description(lazy_repr(object))
autodoc.safe_repr = lazy_safe_repr # noqa
|
en
| 0.658266
|
# We are on Python 3, force_text is force_unicode. # noqa
| 2.15318
| 2
|
AutotestWebD/apps/mock_server/views/http_interface_debug.py
|
yangjourney/sosotest
| 422
|
6626157
|
from django.shortcuts import render,HttpResponse
from apps.interface.services.HTTP_interface_debugService import HTTP_interfaceDebugService
from apps.common.func.CommonFunc import *
from apps.common.func.LanguageFunc import *
from apps.common.helper.ApiReturn import ApiReturn
from AutotestWebD import settings
import json,time,logging
from apps.common.func.WebFunc import *
from apps.common.model.RedisDBConfig import *
from all_models.models import *
logger = logging.getLogger("web")
def interfaceDebugAdd(request):
testDebugId = "interfaceDebug_%s_%s" % (request.session.get("loginName"),int(time.time() * 1000))
if request.method != 'POST':
addUserLog(request, "单接口管理->添加接口调试->请求方式错误", "FAIL")
return HttpResponse(ApiReturn(ApiReturn.CODE_METHOD_ERROR,"请求方式错误","").toJson())
data = json.loads(request.POST.get("interfaceData"))
data['version'] = request.session.get("version")
if data["method"] != "GET" and data["method"] != "HEAD":
file = request.FILES
bodyContent = data["bodyContent"]
bodyType = data["bodyType"]
if bodyType == "binary":
if "realPath" in bodyContent:
data["bodyContent"] = json.dumps(bodyContent,ensure_ascii=False)
else:
if not file.get("file"):
pass
thisFile = file.get("file")
contentRealPath = updateFileSave(request.session.get("loginName"), thisFile, "0")
bodyContent["realPath"] = contentRealPath
bodyContent["fileType"] = thisFile.content_type
data["bodyContent"] = json.dumps(bodyContent,ensure_ascii=False)
elif bodyType == "form-data":
fileDict = request.FILES
keyCountDict = {}
for i in range(0,len(bodyContent)):
tmpAttr = bodyContent[i]
if tmpAttr['type'] == "file":
if "realPath" in bodyContent[i]["value"]:
continue
fileKey = tmpAttr['key']
if fileKey in keyCountDict.keys():
keyCountDict[fileKey] += 1
else:
keyCountDict[fileKey] = 0
tmpFileTempObj = fileDict.getlist(fileKey)[keyCountDict[fileKey]]
contentRealPath = updateFileSave(request.session.get("loginName"), tmpFileTempObj, keyCountDict[fileKey])
bodyContent[i]['value']['fileType'] = tmpFileTempObj.content_type
bodyContent[i]['value']['realPath'] = contentRealPath
data["bodyContent"] = json.dumps(bodyContent,ensure_ascii=False)
data['actualResult'] = ''
data['assertResult'] = ''
data['testResult'] = "NOTRUN"
data['execStatus'] = 1
data['beforeExecuteTakeTime'] = 0
data['executeTakeTime'] = 0
data['afterExecuteTakeTime'] = 0
data['totalTakeTime'] = 0
data['businessLineId'] = data["businessLineId_id"]
data['moduleId'] = data["moduleId_id"]
data['httpConfKey'] = data["httpConfKey_id"]
langDict = getLangTextDict(request)
try:
RedisCache().set_data(testDebugId,json.dumps(data),60*60)
#初始设置接口debug的时间是1小时
# debugId = HTTP_interfaceDebugService.interfaceDebugAdd(data,request.session.get("loginName")).id
except Exception as e:
logging.error(traceback.format_exc())
addUserLog(request, "单接口管理->添加接口调试->插入失败,原因\n%s" % ApiReturn(ApiReturn.CODE_INTERFACE_DEBUG_ADD_EXCEPITON,langDict['web']['httpInterfaceDebugAddException'],"%s" % e).toJson(), "FAIL")
return HttpResponse(ApiReturn(ApiReturn.CODE_INTERFACE_DEBUG_ADD_EXCEPITON,langDict['web']['httpInterfaceDebugAddException'],"%s" % e).toJson())
addUserLog(request, "单接口管理->添加接口调试->成功" , "PASS")
# return HttpResponse(ApiReturn(ApiReturn.CODE_OK,langDict['web']['httpInterfaceDebugAdd'],str(debugId)).toJson())
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,langDict['web']['httpInterfaceDebugAdd'],testDebugId).toJson())
def debugInterface(request):
langDict = getLangTextDict(request)
testDebugId = request.POST.get("body")
#aaabbb
tcpStr = '{"do":1,"InterfaceDebugId":"%s"}' % testDebugId
retApiResult = send_tcp_request(tcpStr)
if retApiResult.code != ApiReturn.CODE_OK:
debugMsg = {}
debugMsg["execStatus"] = 4
debugMsg["actualResult"] = str(retApiResult.code)+":"+retApiResult.message
debugMsg["assertResult"] = str(retApiResult.code)+":"+retApiResult.message
debugMsg["modTime"] = datetime.datetime.now()
RedisCache().del_data(testDebugId)
# HTTP_interfaceDebugService.setDebugFail(request.session.get("loginName"),debugMsg)
addUserLog(request, "单接口管理->接口调试->发送TCP请求->失败,原因\n%s" % retApiResult.toJson(), "FAIL")
return HttpResponse(retApiResult.toJson())
else:
addUserLog(request, "单接口管理->接口调试->发送TCP请求->成功", "PASS")
return HttpResponse(ApiReturn(ApiReturn.CODE_OK, langDict['web']['httpInterfaceDebugSuccess']).toJson())
def getDebugResult(request):
langDict = getLangTextDict(request)
startTime = time.time()
testDebugId = request.POST.get("body")
while True:
if (time.time() - startTime) >= 1:
return HttpResponse(ApiReturn(ApiReturn.CODE_DEBUG_TIMEOUT, langDict['web']['httpDebugTimeout']).toJson())
try:
debugResult = json.loads(RedisCache().get_data(testDebugId))
except Exception as e:
print(traceback.format_exc())
return HttpResponse(ApiReturn(ApiReturn.CODE_ERROR, "调试结果解析异常,请联系管理员").toJson())
if debugResult["execStatus"] == 3 or debugResult["execStatus"] == 4:
debugResult["alias"] = TbConfigHttp.objects.get(httpConfKey=debugResult["httpConfKey"]).alias
RedisCache().del_data(testDebugId)
addUserLog(request, "单接口管理->接口调试->获取结果->成功", "PASS")
return render(request,"InterfaceTest/HTTPInterface/SubPages/HTTP_interface_debug_page.html",debugResult)
if __name__=="__main__":
print(settings.BASE_DIR)
|
from django.shortcuts import render,HttpResponse
from apps.interface.services.HTTP_interface_debugService import HTTP_interfaceDebugService
from apps.common.func.CommonFunc import *
from apps.common.func.LanguageFunc import *
from apps.common.helper.ApiReturn import ApiReturn
from AutotestWebD import settings
import json,time,logging
from apps.common.func.WebFunc import *
from apps.common.model.RedisDBConfig import *
from all_models.models import *
logger = logging.getLogger("web")
def interfaceDebugAdd(request):
testDebugId = "interfaceDebug_%s_%s" % (request.session.get("loginName"),int(time.time() * 1000))
if request.method != 'POST':
addUserLog(request, "单接口管理->添加接口调试->请求方式错误", "FAIL")
return HttpResponse(ApiReturn(ApiReturn.CODE_METHOD_ERROR,"请求方式错误","").toJson())
data = json.loads(request.POST.get("interfaceData"))
data['version'] = request.session.get("version")
if data["method"] != "GET" and data["method"] != "HEAD":
file = request.FILES
bodyContent = data["bodyContent"]
bodyType = data["bodyType"]
if bodyType == "binary":
if "realPath" in bodyContent:
data["bodyContent"] = json.dumps(bodyContent,ensure_ascii=False)
else:
if not file.get("file"):
pass
thisFile = file.get("file")
contentRealPath = updateFileSave(request.session.get("loginName"), thisFile, "0")
bodyContent["realPath"] = contentRealPath
bodyContent["fileType"] = thisFile.content_type
data["bodyContent"] = json.dumps(bodyContent,ensure_ascii=False)
elif bodyType == "form-data":
fileDict = request.FILES
keyCountDict = {}
for i in range(0,len(bodyContent)):
tmpAttr = bodyContent[i]
if tmpAttr['type'] == "file":
if "realPath" in bodyContent[i]["value"]:
continue
fileKey = tmpAttr['key']
if fileKey in keyCountDict.keys():
keyCountDict[fileKey] += 1
else:
keyCountDict[fileKey] = 0
tmpFileTempObj = fileDict.getlist(fileKey)[keyCountDict[fileKey]]
contentRealPath = updateFileSave(request.session.get("loginName"), tmpFileTempObj, keyCountDict[fileKey])
bodyContent[i]['value']['fileType'] = tmpFileTempObj.content_type
bodyContent[i]['value']['realPath'] = contentRealPath
data["bodyContent"] = json.dumps(bodyContent,ensure_ascii=False)
data['actualResult'] = ''
data['assertResult'] = ''
data['testResult'] = "NOTRUN"
data['execStatus'] = 1
data['beforeExecuteTakeTime'] = 0
data['executeTakeTime'] = 0
data['afterExecuteTakeTime'] = 0
data['totalTakeTime'] = 0
data['businessLineId'] = data["businessLineId_id"]
data['moduleId'] = data["moduleId_id"]
data['httpConfKey'] = data["httpConfKey_id"]
langDict = getLangTextDict(request)
try:
RedisCache().set_data(testDebugId,json.dumps(data),60*60)
#初始设置接口debug的时间是1小时
# debugId = HTTP_interfaceDebugService.interfaceDebugAdd(data,request.session.get("loginName")).id
except Exception as e:
logging.error(traceback.format_exc())
addUserLog(request, "单接口管理->添加接口调试->插入失败,原因\n%s" % ApiReturn(ApiReturn.CODE_INTERFACE_DEBUG_ADD_EXCEPITON,langDict['web']['httpInterfaceDebugAddException'],"%s" % e).toJson(), "FAIL")
return HttpResponse(ApiReturn(ApiReturn.CODE_INTERFACE_DEBUG_ADD_EXCEPITON,langDict['web']['httpInterfaceDebugAddException'],"%s" % e).toJson())
addUserLog(request, "单接口管理->添加接口调试->成功" , "PASS")
# return HttpResponse(ApiReturn(ApiReturn.CODE_OK,langDict['web']['httpInterfaceDebugAdd'],str(debugId)).toJson())
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,langDict['web']['httpInterfaceDebugAdd'],testDebugId).toJson())
def debugInterface(request):
langDict = getLangTextDict(request)
testDebugId = request.POST.get("body")
#aaabbb
tcpStr = '{"do":1,"InterfaceDebugId":"%s"}' % testDebugId
retApiResult = send_tcp_request(tcpStr)
if retApiResult.code != ApiReturn.CODE_OK:
debugMsg = {}
debugMsg["execStatus"] = 4
debugMsg["actualResult"] = str(retApiResult.code)+":"+retApiResult.message
debugMsg["assertResult"] = str(retApiResult.code)+":"+retApiResult.message
debugMsg["modTime"] = datetime.datetime.now()
RedisCache().del_data(testDebugId)
# HTTP_interfaceDebugService.setDebugFail(request.session.get("loginName"),debugMsg)
addUserLog(request, "单接口管理->接口调试->发送TCP请求->失败,原因\n%s" % retApiResult.toJson(), "FAIL")
return HttpResponse(retApiResult.toJson())
else:
addUserLog(request, "单接口管理->接口调试->发送TCP请求->成功", "PASS")
return HttpResponse(ApiReturn(ApiReturn.CODE_OK, langDict['web']['httpInterfaceDebugSuccess']).toJson())
def getDebugResult(request):
langDict = getLangTextDict(request)
startTime = time.time()
testDebugId = request.POST.get("body")
while True:
if (time.time() - startTime) >= 1:
return HttpResponse(ApiReturn(ApiReturn.CODE_DEBUG_TIMEOUT, langDict['web']['httpDebugTimeout']).toJson())
try:
debugResult = json.loads(RedisCache().get_data(testDebugId))
except Exception as e:
print(traceback.format_exc())
return HttpResponse(ApiReturn(ApiReturn.CODE_ERROR, "调试结果解析异常,请联系管理员").toJson())
if debugResult["execStatus"] == 3 or debugResult["execStatus"] == 4:
debugResult["alias"] = TbConfigHttp.objects.get(httpConfKey=debugResult["httpConfKey"]).alias
RedisCache().del_data(testDebugId)
addUserLog(request, "单接口管理->接口调试->获取结果->成功", "PASS")
return render(request,"InterfaceTest/HTTPInterface/SubPages/HTTP_interface_debug_page.html",debugResult)
if __name__=="__main__":
print(settings.BASE_DIR)
|
en
| 0.067893
|
#初始设置接口debug的时间是1小时 # debugId = HTTP_interfaceDebugService.interfaceDebugAdd(data,request.session.get("loginName")).id # return HttpResponse(ApiReturn(ApiReturn.CODE_OK,langDict['web']['httpInterfaceDebugAdd'],str(debugId)).toJson()) #aaabbb # HTTP_interfaceDebugService.setDebugFail(request.session.get("loginName"),debugMsg)
| 1.958606
| 2
|
nempy/market_constraints.py
|
bje-/nempy
| 0
|
6626158
|
<gh_stars>0
from nempy import helper_functions as hf
import pandas as pd
def energy(demand, next_constraint_id):
"""Create the constraints that ensure the amount of supply dispatched in each region equals demand.
If only one region exists then the constraint will be of the form:
unit 1 output + unit 2 output +. . .+ unit n output = region demand
If multiple regions exist then a constraint will ne created for each region. If there were 2 units A and B in region
X, and 2 units C and D in region Y, then the constraints would be of the form:
constraint 1: unit A output + unit B output = region X demand
constraint 2: unit C output + unit D output = region Y demand
Examples
--------
>>> import pandas
Defined the unit capacities.
>>> demand = pd.DataFrame({
... 'region': ['X', 'Y'],
... 'demand': [1000.0, 2000.0]})
>>> next_constraint_id = 0
Create the constraint information.
>>> type_and_rhs, variable_map = energy(demand, next_constraint_id)
>>> print(type_and_rhs)
region constraint_id type rhs
0 X 0 = 1000.0
1 Y 1 = 2000.0
>>> print(variable_map)
constraint_id region service coefficient
0 0 X energy 1.0
1 1 Y energy 1.0
Parameters
----------
demand : pd.DataFrame
Demand by region.
======== =====================================================================================
Columns: Description:
region unique identifier of a region (as `str`)
demand the non dispatchable demand, in MW (as `np.float64`)
======== =====================================================================================
next_constraint_id : int
The next integer to start using for constraint ids.
Returns
-------
type_and_rhs : pd.DataFrame
The type and rhs of each constraint.
============= ===============================================================
Columns: Description:
region unique identifier of a market region (as `str`)
constraint_id the id of the variable (as `int`)
type the type of the constraint, e.g. "=" (as `str`)
rhs the rhs of the constraint (as `np.float64`)
============= ===============================================================
variable_map : pd.DataFrame
The type of variables that should appear on the lhs of the constraint.
============= ==========================================================================
Columns: Description:
constraint_id the id of the constraint (as `np.int64`)
region the regional variables the constraint should map too (as `str`)
service the service type of the variables the constraint should map to (as `str`)
coefficient the upper bound of the variable, the volume bid (as `np.float64`)
============= ==========================================================================
"""
# Create an index for each constraint.
type_and_rhs = hf.save_index(demand, 'constraint_id', next_constraint_id)
type_and_rhs['type'] = '=' # Supply and interconnector flow must exactly equal demand.
type_and_rhs['rhs'] = type_and_rhs['demand']
type_and_rhs = type_and_rhs.loc[:, ['region', 'constraint_id', 'type', 'rhs']]
# Map constraints to energy variables in their region.
variable_map = type_and_rhs.loc[:, ['constraint_id', 'region']]
variable_map['service'] = 'energy'
variable_map['coefficient'] = 1.0
return type_and_rhs, variable_map
def fcas(fcas_requirements, next_constraint_id):
"""Create the constraints that ensure the amount of FCAS supply dispatched equals requirements.
Examples
--------
>>> import pandas
Defined the unit capacities.
>>> fcas_requirements = pd.DataFrame({
... 'set': ['raise_reg_main', 'raise_reg_main', 'raise_reg_main', 'raise_reg_main'],
... 'service': ['raise_reg', 'raise_reg', 'raise_reg', 'raise_reg'],
... 'region': ['QLD', 'NSW', 'VIC', 'SA'],
... 'volume': [100.0, 100.0, 100.0, 100.0]})
>>> next_constraint_id = 0
Create the constraint information.
>>> type_and_rhs, variable_map = fcas(fcas_requirements, next_constraint_id)
>>> print(type_and_rhs)
set constraint_id type rhs
0 raise_reg_main 0 = 100.0
>>> print(variable_map)
constraint_id service region coefficient
0 0 raise_reg QLD 1.0
1 0 raise_reg NSW 1.0
2 0 raise_reg VIC 1.0
3 0 raise_reg SA 1.0
Parameters
----------
fcas_requirements : pd.DataFrame
requirement by set and the regions and service the requirement applies to.
======== ===================================================================
Columns: Description:
set unique identifier of the requirement set (as `str`)
service the service or services the requirement set applies to (as `str`)
region unique identifier of a region (as `str`)
volume the amount of service required, in MW (as `np.float64`)
======== ===================================================================
next_constraint_id : int
The next integer to start using for constraint ids.
Returns
-------
type_and_rhs : pd.DataFrame
The type and rhs of each constraint.
============= ===================================================================
Columns: Description:
set unique identifier of a market region (as `str`)
constraint_id the id of the variable (as `int`)
type the type of the constraint, e.g. "=" (as `str`)
rhs the rhs of the constraint (as `np.float64`)
============= ===================================================================
variable_map : pd.DataFrame
The type of variables that should appear on the lhs of the constraint.
============= ==========================================================================
Columns: Description:
constraint_id the id of the constraint (as `np.int64`)
region the regional variables the constraint should map too (as `str`)
service the service type of the variables the constraint should map to (as `str`)
coefficient the upper bound of the variable, the volume bid (as `np.float64`)
============= ==========================================================================
"""
# Create an index for each constraint.
type_and_rhs = fcas_requirements.loc[:, ['set', 'volume']]
type_and_rhs = type_and_rhs.drop_duplicates('set')
type_and_rhs = hf.save_index(type_and_rhs, 'constraint_id', next_constraint_id)
type_and_rhs['type'] = '=' # Supply and interconnector flow must exactly equal demand.
type_and_rhs['rhs'] = type_and_rhs['volume']
type_and_rhs = type_and_rhs.loc[:, ['set', 'constraint_id', 'type', 'rhs']]
# Map constraints to energy variables in their region.
variable_map = fcas_requirements.loc[:, ['set', 'service', 'region']]
variable_map = pd.merge(variable_map, type_and_rhs.loc[:, ['set', 'constraint_id']], 'inner', on='set')
variable_map['coefficient'] = 1.0
variable_map = variable_map.loc[:, ['constraint_id', 'service', 'region', 'coefficient']]
return type_and_rhs, variable_map
|
from nempy import helper_functions as hf
import pandas as pd
def energy(demand, next_constraint_id):
"""Create the constraints that ensure the amount of supply dispatched in each region equals demand.
If only one region exists then the constraint will be of the form:
unit 1 output + unit 2 output +. . .+ unit n output = region demand
If multiple regions exist then a constraint will ne created for each region. If there were 2 units A and B in region
X, and 2 units C and D in region Y, then the constraints would be of the form:
constraint 1: unit A output + unit B output = region X demand
constraint 2: unit C output + unit D output = region Y demand
Examples
--------
>>> import pandas
Defined the unit capacities.
>>> demand = pd.DataFrame({
... 'region': ['X', 'Y'],
... 'demand': [1000.0, 2000.0]})
>>> next_constraint_id = 0
Create the constraint information.
>>> type_and_rhs, variable_map = energy(demand, next_constraint_id)
>>> print(type_and_rhs)
region constraint_id type rhs
0 X 0 = 1000.0
1 Y 1 = 2000.0
>>> print(variable_map)
constraint_id region service coefficient
0 0 X energy 1.0
1 1 Y energy 1.0
Parameters
----------
demand : pd.DataFrame
Demand by region.
======== =====================================================================================
Columns: Description:
region unique identifier of a region (as `str`)
demand the non dispatchable demand, in MW (as `np.float64`)
======== =====================================================================================
next_constraint_id : int
The next integer to start using for constraint ids.
Returns
-------
type_and_rhs : pd.DataFrame
The type and rhs of each constraint.
============= ===============================================================
Columns: Description:
region unique identifier of a market region (as `str`)
constraint_id the id of the variable (as `int`)
type the type of the constraint, e.g. "=" (as `str`)
rhs the rhs of the constraint (as `np.float64`)
============= ===============================================================
variable_map : pd.DataFrame
The type of variables that should appear on the lhs of the constraint.
============= ==========================================================================
Columns: Description:
constraint_id the id of the constraint (as `np.int64`)
region the regional variables the constraint should map too (as `str`)
service the service type of the variables the constraint should map to (as `str`)
coefficient the upper bound of the variable, the volume bid (as `np.float64`)
============= ==========================================================================
"""
# Create an index for each constraint.
type_and_rhs = hf.save_index(demand, 'constraint_id', next_constraint_id)
type_and_rhs['type'] = '=' # Supply and interconnector flow must exactly equal demand.
type_and_rhs['rhs'] = type_and_rhs['demand']
type_and_rhs = type_and_rhs.loc[:, ['region', 'constraint_id', 'type', 'rhs']]
# Map constraints to energy variables in their region.
variable_map = type_and_rhs.loc[:, ['constraint_id', 'region']]
variable_map['service'] = 'energy'
variable_map['coefficient'] = 1.0
return type_and_rhs, variable_map
def fcas(fcas_requirements, next_constraint_id):
"""Create the constraints that ensure the amount of FCAS supply dispatched equals requirements.
Examples
--------
>>> import pandas
Defined the unit capacities.
>>> fcas_requirements = pd.DataFrame({
... 'set': ['raise_reg_main', 'raise_reg_main', 'raise_reg_main', 'raise_reg_main'],
... 'service': ['raise_reg', 'raise_reg', 'raise_reg', 'raise_reg'],
... 'region': ['QLD', 'NSW', 'VIC', 'SA'],
... 'volume': [100.0, 100.0, 100.0, 100.0]})
>>> next_constraint_id = 0
Create the constraint information.
>>> type_and_rhs, variable_map = fcas(fcas_requirements, next_constraint_id)
>>> print(type_and_rhs)
set constraint_id type rhs
0 raise_reg_main 0 = 100.0
>>> print(variable_map)
constraint_id service region coefficient
0 0 raise_reg QLD 1.0
1 0 raise_reg NSW 1.0
2 0 raise_reg VIC 1.0
3 0 raise_reg SA 1.0
Parameters
----------
fcas_requirements : pd.DataFrame
requirement by set and the regions and service the requirement applies to.
======== ===================================================================
Columns: Description:
set unique identifier of the requirement set (as `str`)
service the service or services the requirement set applies to (as `str`)
region unique identifier of a region (as `str`)
volume the amount of service required, in MW (as `np.float64`)
======== ===================================================================
next_constraint_id : int
The next integer to start using for constraint ids.
Returns
-------
type_and_rhs : pd.DataFrame
The type and rhs of each constraint.
============= ===================================================================
Columns: Description:
set unique identifier of a market region (as `str`)
constraint_id the id of the variable (as `int`)
type the type of the constraint, e.g. "=" (as `str`)
rhs the rhs of the constraint (as `np.float64`)
============= ===================================================================
variable_map : pd.DataFrame
The type of variables that should appear on the lhs of the constraint.
============= ==========================================================================
Columns: Description:
constraint_id the id of the constraint (as `np.int64`)
region the regional variables the constraint should map too (as `str`)
service the service type of the variables the constraint should map to (as `str`)
coefficient the upper bound of the variable, the volume bid (as `np.float64`)
============= ==========================================================================
"""
# Create an index for each constraint.
type_and_rhs = fcas_requirements.loc[:, ['set', 'volume']]
type_and_rhs = type_and_rhs.drop_duplicates('set')
type_and_rhs = hf.save_index(type_and_rhs, 'constraint_id', next_constraint_id)
type_and_rhs['type'] = '=' # Supply and interconnector flow must exactly equal demand.
type_and_rhs['rhs'] = type_and_rhs['volume']
type_and_rhs = type_and_rhs.loc[:, ['set', 'constraint_id', 'type', 'rhs']]
# Map constraints to energy variables in their region.
variable_map = fcas_requirements.loc[:, ['set', 'service', 'region']]
variable_map = pd.merge(variable_map, type_and_rhs.loc[:, ['set', 'constraint_id']], 'inner', on='set')
variable_map['coefficient'] = 1.0
variable_map = variable_map.loc[:, ['constraint_id', 'service', 'region', 'coefficient']]
return type_and_rhs, variable_map
|
en
| 0.665062
|
Create the constraints that ensure the amount of supply dispatched in each region equals demand. If only one region exists then the constraint will be of the form: unit 1 output + unit 2 output +. . .+ unit n output = region demand If multiple regions exist then a constraint will ne created for each region. If there were 2 units A and B in region X, and 2 units C and D in region Y, then the constraints would be of the form: constraint 1: unit A output + unit B output = region X demand constraint 2: unit C output + unit D output = region Y demand Examples -------- >>> import pandas Defined the unit capacities. >>> demand = pd.DataFrame({ ... 'region': ['X', 'Y'], ... 'demand': [1000.0, 2000.0]}) >>> next_constraint_id = 0 Create the constraint information. >>> type_and_rhs, variable_map = energy(demand, next_constraint_id) >>> print(type_and_rhs) region constraint_id type rhs 0 X 0 = 1000.0 1 Y 1 = 2000.0 >>> print(variable_map) constraint_id region service coefficient 0 0 X energy 1.0 1 1 Y energy 1.0 Parameters ---------- demand : pd.DataFrame Demand by region. ======== ===================================================================================== Columns: Description: region unique identifier of a region (as `str`) demand the non dispatchable demand, in MW (as `np.float64`) ======== ===================================================================================== next_constraint_id : int The next integer to start using for constraint ids. Returns ------- type_and_rhs : pd.DataFrame The type and rhs of each constraint. ============= =============================================================== Columns: Description: region unique identifier of a market region (as `str`) constraint_id the id of the variable (as `int`) type the type of the constraint, e.g. "=" (as `str`) rhs the rhs of the constraint (as `np.float64`) ============= =============================================================== variable_map : pd.DataFrame The type of variables that should appear on the lhs of the constraint. ============= ========================================================================== Columns: Description: constraint_id the id of the constraint (as `np.int64`) region the regional variables the constraint should map too (as `str`) service the service type of the variables the constraint should map to (as `str`) coefficient the upper bound of the variable, the volume bid (as `np.float64`) ============= ========================================================================== # Create an index for each constraint. # Supply and interconnector flow must exactly equal demand. # Map constraints to energy variables in their region. Create the constraints that ensure the amount of FCAS supply dispatched equals requirements. Examples -------- >>> import pandas Defined the unit capacities. >>> fcas_requirements = pd.DataFrame({ ... 'set': ['raise_reg_main', 'raise_reg_main', 'raise_reg_main', 'raise_reg_main'], ... 'service': ['raise_reg', 'raise_reg', 'raise_reg', 'raise_reg'], ... 'region': ['QLD', 'NSW', 'VIC', 'SA'], ... 'volume': [100.0, 100.0, 100.0, 100.0]}) >>> next_constraint_id = 0 Create the constraint information. >>> type_and_rhs, variable_map = fcas(fcas_requirements, next_constraint_id) >>> print(type_and_rhs) set constraint_id type rhs 0 raise_reg_main 0 = 100.0 >>> print(variable_map) constraint_id service region coefficient 0 0 raise_reg QLD 1.0 1 0 raise_reg NSW 1.0 2 0 raise_reg VIC 1.0 3 0 raise_reg SA 1.0 Parameters ---------- fcas_requirements : pd.DataFrame requirement by set and the regions and service the requirement applies to. ======== =================================================================== Columns: Description: set unique identifier of the requirement set (as `str`) service the service or services the requirement set applies to (as `str`) region unique identifier of a region (as `str`) volume the amount of service required, in MW (as `np.float64`) ======== =================================================================== next_constraint_id : int The next integer to start using for constraint ids. Returns ------- type_and_rhs : pd.DataFrame The type and rhs of each constraint. ============= =================================================================== Columns: Description: set unique identifier of a market region (as `str`) constraint_id the id of the variable (as `int`) type the type of the constraint, e.g. "=" (as `str`) rhs the rhs of the constraint (as `np.float64`) ============= =================================================================== variable_map : pd.DataFrame The type of variables that should appear on the lhs of the constraint. ============= ========================================================================== Columns: Description: constraint_id the id of the constraint (as `np.int64`) region the regional variables the constraint should map too (as `str`) service the service type of the variables the constraint should map to (as `str`) coefficient the upper bound of the variable, the volume bid (as `np.float64`) ============= ========================================================================== # Create an index for each constraint. # Supply and interconnector flow must exactly equal demand. # Map constraints to energy variables in their region.
| 3.863979
| 4
|
pretraining.py
|
gregsherrid/docker_bert_as_service
| 1
|
6626159
|
from bert_serving.client import BertClient
bc = BertClient(check_length=False)
txt = input("Name output file: ")
txt +='.txt'
encoded_array = bc.encode(["The antagonist in the test is Julian. Julian is the antagonist because he was a bully to Auggie through the entire school year and anybody who is as mean as Julian should be a 'bad guy.' If I was getting bullied by Julian I would think of him as a bully or bad guy.","The book, House of Secrets, reminds me of <NAME> because both books involve three main people going on adventures. Also, the two novels involve some magic. The two novels are different because House of Secrets is about three kids being transported into a world of books(all by the same author), while <NAME> is about a boy who turns out to be a wizard."])
print(encoded_array)
with open(txt, 'w') as filehandle:
for listitem in encoded_array:
filehandle.write('%s\n' % listitem)
|
from bert_serving.client import BertClient
bc = BertClient(check_length=False)
txt = input("Name output file: ")
txt +='.txt'
encoded_array = bc.encode(["The antagonist in the test is Julian. Julian is the antagonist because he was a bully to Auggie through the entire school year and anybody who is as mean as Julian should be a 'bad guy.' If I was getting bullied by Julian I would think of him as a bully or bad guy.","The book, House of Secrets, reminds me of <NAME> because both books involve three main people going on adventures. Also, the two novels involve some magic. The two novels are different because House of Secrets is about three kids being transported into a world of books(all by the same author), while <NAME> is about a boy who turns out to be a wizard."])
print(encoded_array)
with open(txt, 'w') as filehandle:
for listitem in encoded_array:
filehandle.write('%s\n' % listitem)
|
none
| 1
| 3.165273
| 3
|
|
splider.py
|
lyc-123/audioStory
| 0
|
6626160
|
<filename>splider.py
import requests
import urllib
import re
import os
import time
class YsSpider:
def __init__(self, name):
self.search_name = name
self.search_url = "http://www.ting89.com/search.asp?searchword="
self.home_url = "http://www.ting89.com/books/"
self.index_pattern = r"""<a href="/books/([0-9]+).html" title="(.+?)" target='_blank'>"""
self.chapter_pattern=r"""<a href='(/down/\?[^-]+-\d+.html)' target="_blank">(.+?)</a>"""
self.down_pattern=r"""url=(.*)/(.+?)\.mp3"""
self.book_id = ''
self.book_name = ''
self.Chapter_list = []
# 返回搜索书目的id
def searchbook(self):
file = requests.get(self.search_url + urllib.parse.quote(self.search_name, encoding='gb2312'))
data = file.content.decode('gbk')
result = re.findall(self.index_pattern, data)
if len(result):
for index, i in enumerate(result):
print('%d.%s'%(index+1,i[1]))
# str = input("输入你要下载的书目名称序号: ")
str = '1'
self.book_name = result[int(str)-1][1]
self.book_id = result[int(str)-1][0]
return self.book_id
else:
print('*******没有找到你输入的相关书籍,请更换后重新运行程序*******')
exit()
def get_chapter_list(self):#获取各章节list和url
data = requests.get(self.home_url+self.searchbook()+'.html').content.decode('gbk')
result = re.findall(self.chapter_pattern, data)
return result
def _getAllUrl(self):# 获得所有的章节的下载地址
chapter_list = self.get_chapter_list()
chapter = [x[0] for x in chapter_list]
self.Chapter_list= [x[1] for x in chapter_list]
_list = [x[1] for x in chapter_list]
data = requests.get("http://www.ting89.com" + chapter[0]).content.decode('gbk')
result = re.findall(self.down_pattern, data)
# return result
return self.sub_get_url(result[0][0],_list, re.search("^0.*1$", result[0][1]))
def sub_get_url(self, down_url, _list, down_url_flag):
url = []
if down_url_flag:
xulie = list(range(len(_list)))
weishu = len(str(xulie[-1]))
for i in xulie:
i1 = i + 1
tmp_url = down_url+'/' + str(i1).zfill(weishu) + '.mp3'
url.append(urllib.request.quote(tmp_url, safe='/:?='))
else:
for item in _list:
tmp_url = down_url + '/'+item + ".mp3"
url.append(urllib.request.quote(tmp_url, safe='/:?='))
return url
# 保存指定URL的文件
def save_a_file(self, url, path, chapter):
try:
print('尝试下载',chapter)
if not os.path.exists(path):
response = requests.get(url)
with open(path, 'wb') as f:
f.write(response.content)
f.close
print(chapter,'保存成功')
response.close()
time.sleep(1)
else:
print('文件已经存在')
except:
print('爬取失败,已下载至',chapter,'即将重新尝试下载')
self.save_a_file(url, path, chapter)
def download_files(self):
result = self._getAllUrl()# 所有的章节对应的下载地址
root = os.path.join(os.getcwd(), self.book_name)
if not os.path.exists(root):
os.mkdir(root)
for index,i in enumerate(result):
path = os.path.join(root, self.Chapter_list[index])+'.mp3'
self.save_a_file(i, path, self.Chapter_list[index])
|
<filename>splider.py
import requests
import urllib
import re
import os
import time
class YsSpider:
def __init__(self, name):
self.search_name = name
self.search_url = "http://www.ting89.com/search.asp?searchword="
self.home_url = "http://www.ting89.com/books/"
self.index_pattern = r"""<a href="/books/([0-9]+).html" title="(.+?)" target='_blank'>"""
self.chapter_pattern=r"""<a href='(/down/\?[^-]+-\d+.html)' target="_blank">(.+?)</a>"""
self.down_pattern=r"""url=(.*)/(.+?)\.mp3"""
self.book_id = ''
self.book_name = ''
self.Chapter_list = []
# 返回搜索书目的id
def searchbook(self):
file = requests.get(self.search_url + urllib.parse.quote(self.search_name, encoding='gb2312'))
data = file.content.decode('gbk')
result = re.findall(self.index_pattern, data)
if len(result):
for index, i in enumerate(result):
print('%d.%s'%(index+1,i[1]))
# str = input("输入你要下载的书目名称序号: ")
str = '1'
self.book_name = result[int(str)-1][1]
self.book_id = result[int(str)-1][0]
return self.book_id
else:
print('*******没有找到你输入的相关书籍,请更换后重新运行程序*******')
exit()
def get_chapter_list(self):#获取各章节list和url
data = requests.get(self.home_url+self.searchbook()+'.html').content.decode('gbk')
result = re.findall(self.chapter_pattern, data)
return result
def _getAllUrl(self):# 获得所有的章节的下载地址
chapter_list = self.get_chapter_list()
chapter = [x[0] for x in chapter_list]
self.Chapter_list= [x[1] for x in chapter_list]
_list = [x[1] for x in chapter_list]
data = requests.get("http://www.ting89.com" + chapter[0]).content.decode('gbk')
result = re.findall(self.down_pattern, data)
# return result
return self.sub_get_url(result[0][0],_list, re.search("^0.*1$", result[0][1]))
def sub_get_url(self, down_url, _list, down_url_flag):
url = []
if down_url_flag:
xulie = list(range(len(_list)))
weishu = len(str(xulie[-1]))
for i in xulie:
i1 = i + 1
tmp_url = down_url+'/' + str(i1).zfill(weishu) + '.mp3'
url.append(urllib.request.quote(tmp_url, safe='/:?='))
else:
for item in _list:
tmp_url = down_url + '/'+item + ".mp3"
url.append(urllib.request.quote(tmp_url, safe='/:?='))
return url
# 保存指定URL的文件
def save_a_file(self, url, path, chapter):
try:
print('尝试下载',chapter)
if not os.path.exists(path):
response = requests.get(url)
with open(path, 'wb') as f:
f.write(response.content)
f.close
print(chapter,'保存成功')
response.close()
time.sleep(1)
else:
print('文件已经存在')
except:
print('爬取失败,已下载至',chapter,'即将重新尝试下载')
self.save_a_file(url, path, chapter)
def download_files(self):
result = self._getAllUrl()# 所有的章节对应的下载地址
root = os.path.join(os.getcwd(), self.book_name)
if not os.path.exists(root):
os.mkdir(root)
for index,i in enumerate(result):
path = os.path.join(root, self.Chapter_list[index])+'.mp3'
self.save_a_file(i, path, self.Chapter_list[index])
|
zh
| 0.395821
|
<a href="/books/([0-9]+).html" title="(.+?)" target='_blank'> <a href='(/down/\?[^-]+-\d+.html)' target="_blank">(.+?)</a> url=(.*)/(.+?)\.mp3 # 返回搜索书目的id # str = input("输入你要下载的书目名称序号: ") #获取各章节list和url # 获得所有的章节的下载地址 # return result # 保存指定URL的文件 # 所有的章节对应的下载地址
| 3.142861
| 3
|
tests/api/endpoints/test_file_comments.py
|
odontomachus/seahub
| 0
|
6626161
|
import json
from django.core.urlresolvers import reverse
import seaserv
from seaserv import seafile_api, ccnet_api
from seahub.base.models import FileComment
from seahub.notifications.models import UserNotification
from seahub.test_utils import BaseTestCase
from seahub.file_participants.models import FileParticipant
from seahub.tags.models import FileUUIDMap
class FileCommentsTest(BaseTestCase):
def setUp(self):
self.tmp_user = self.create_user()
self.login_as(self.user)
self.endpoint = reverse('api2-file-comments', args=[self.repo.id]) + '?p=' + self.file
def tearDown(self):
self.remove_repo()
self.remove_user(self.tmp_user.email)
def test_can_list(self):
for i in xrange(10):
o = FileComment.objects.add_by_file_path(repo_id=self.repo.id,
file_path=self.file,
author=self.tmp_user.username,
comment='test comment'+str(i))
resp = self.client.get(self.endpoint + '&page=2&per_page=5')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(resp._headers.get('links')) == 2
assert resp._headers.get('links')[0] == 'Links'
link = reverse('api2-file-comments', args=[self.repo.id]) + '?per_page=5&page=1'
assert link in resp._headers.get('links')[1]
assert len(json_resp['comments']) == 5
assert json_resp['comments'][0]['comment'] == 'test comment5'
assert json_resp['comments'][0]['user_email'] == self.tmp_user.email
assert 'avatars' in json_resp['comments'][0]['avatar_url']
assert json_resp['total_count'] == 10
def test_can_list_with_avatar_size(self):
o = FileComment.objects.add_by_file_path(repo_id=self.repo.id,
file_path=self.file,
author=self.tmp_user.username,
comment='test comment')
resp = self.client.get(self.endpoint + '&avatar_size=20')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['comments']) == 1
assert json_resp['comments'][0]['comment'] == o.comment
assert json_resp['comments'][0]['user_email'] == self.tmp_user.email
assert 'avatars' in json_resp['comments'][0]['avatar_url']
assert json_resp['total_count'] == 1
def test_can_post(self):
resp = self.client.post(self.endpoint, {
'comment': 'new comment'
})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['comment'] == 'new comment'
assert 'avatars' in json_resp['avatar_url']
def test_can_post_with_avatar_size(self):
resp = self.client.post(self.endpoint + '&avatar_size=20', {
'comment': 'new comment'
})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['comment'] == 'new comment'
assert 'avatars' in json_resp['avatar_url']
def test_invalid_user(self):
self.logout()
self.login_as(self.admin)
resp = self.client.get(self.endpoint)
self.assertEqual(403, resp.status_code)
resp = self.client.post(self.endpoint, {
'comment': 'new comment'
})
self.assertEqual(403, resp.status_code)
def test_can_notify_participant(self):
assert len(UserNotification.objects.all()) == 0
# share repo and add participant
seafile_api.share_repo(self.repo.id, self.user.username, self.admin.username, 'rw')
file_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap_by_path(self.repo.id, self.file, False)
FileParticipant.objects.add_participant(file_uuid, self.admin.username)
resp = self.client.post(self.endpoint, {
'comment': 'new comment'
})
self.assertEqual(201, resp.status_code)
assert len(UserNotification.objects.all()) == 1
assert UserNotification.objects.all()[0].to_user == self.admin.username
|
import json
from django.core.urlresolvers import reverse
import seaserv
from seaserv import seafile_api, ccnet_api
from seahub.base.models import FileComment
from seahub.notifications.models import UserNotification
from seahub.test_utils import BaseTestCase
from seahub.file_participants.models import FileParticipant
from seahub.tags.models import FileUUIDMap
class FileCommentsTest(BaseTestCase):
def setUp(self):
self.tmp_user = self.create_user()
self.login_as(self.user)
self.endpoint = reverse('api2-file-comments', args=[self.repo.id]) + '?p=' + self.file
def tearDown(self):
self.remove_repo()
self.remove_user(self.tmp_user.email)
def test_can_list(self):
for i in xrange(10):
o = FileComment.objects.add_by_file_path(repo_id=self.repo.id,
file_path=self.file,
author=self.tmp_user.username,
comment='test comment'+str(i))
resp = self.client.get(self.endpoint + '&page=2&per_page=5')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(resp._headers.get('links')) == 2
assert resp._headers.get('links')[0] == 'Links'
link = reverse('api2-file-comments', args=[self.repo.id]) + '?per_page=5&page=1'
assert link in resp._headers.get('links')[1]
assert len(json_resp['comments']) == 5
assert json_resp['comments'][0]['comment'] == 'test comment5'
assert json_resp['comments'][0]['user_email'] == self.tmp_user.email
assert 'avatars' in json_resp['comments'][0]['avatar_url']
assert json_resp['total_count'] == 10
def test_can_list_with_avatar_size(self):
o = FileComment.objects.add_by_file_path(repo_id=self.repo.id,
file_path=self.file,
author=self.tmp_user.username,
comment='test comment')
resp = self.client.get(self.endpoint + '&avatar_size=20')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['comments']) == 1
assert json_resp['comments'][0]['comment'] == o.comment
assert json_resp['comments'][0]['user_email'] == self.tmp_user.email
assert 'avatars' in json_resp['comments'][0]['avatar_url']
assert json_resp['total_count'] == 1
def test_can_post(self):
resp = self.client.post(self.endpoint, {
'comment': 'new comment'
})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['comment'] == 'new comment'
assert 'avatars' in json_resp['avatar_url']
def test_can_post_with_avatar_size(self):
resp = self.client.post(self.endpoint + '&avatar_size=20', {
'comment': 'new comment'
})
self.assertEqual(201, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['comment'] == 'new comment'
assert 'avatars' in json_resp['avatar_url']
def test_invalid_user(self):
self.logout()
self.login_as(self.admin)
resp = self.client.get(self.endpoint)
self.assertEqual(403, resp.status_code)
resp = self.client.post(self.endpoint, {
'comment': 'new comment'
})
self.assertEqual(403, resp.status_code)
def test_can_notify_participant(self):
assert len(UserNotification.objects.all()) == 0
# share repo and add participant
seafile_api.share_repo(self.repo.id, self.user.username, self.admin.username, 'rw')
file_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap_by_path(self.repo.id, self.file, False)
FileParticipant.objects.add_participant(file_uuid, self.admin.username)
resp = self.client.post(self.endpoint, {
'comment': 'new comment'
})
self.assertEqual(201, resp.status_code)
assert len(UserNotification.objects.all()) == 1
assert UserNotification.objects.all()[0].to_user == self.admin.username
|
en
| 0.70325
|
# share repo and add participant
| 2.244789
| 2
|
ex073.py
|
jgabriel1607/Python
| 0
|
6626162
|
<gh_stars>0
print('Vamos ver alguns times do Campeonato Brasileiro 2021')
times = ('Palmeiras', 'Atlético-MG', 'Fortaleza', 'Bragantino', 'Athletico-PR', 'Flamengo', 'Ceará SC',
'Atlético-GO', 'Bahia', 'Corinthians', 'Fluminense', 'Santos', 'Juventude', 'Internacional',
'Cuiabá', 'Sport Recife', 'São Paulo', 'América-MG', 'Grêmio', 'Chapecoense')
pos = 0
print('=' * 30)
print('Os cinco primeiros colocados são: ')
while True:
print(times[pos])
pos += 1
if pos >= 5:
break
pos = 16
print('=' * 30)
print('Os últimos quatro colocados são: ')
while True:
print(times[pos])
pos += 1
if pos >= 20:
break
print('=' * 30)
print('Organizando os times em ordem alfabética: ')
print(sorted(times))
print('=' * 30)
print(f'O time da Chapecoense está na posição: {times.index("Chapecoense") + 1}.')
f'''
Outra forma de resolver esse exercício de forma mais fácil.
print('=' * 30)
print(f'Lista de Times do Campeonato Brasileiro: {times}')
print('=' * 30)
print(f'Os 5 primeiros são {times[0:5]}')
print('=' * 30)
print(f'Os 4 útimos são {times[-4:]}')
print('=' * 30)
print(f'Em ordem alfabética ficam: {sorted(times)}')
print('=' * 30)
print(f'O time da Chapecoense está na posição: {times.index("Chapecoense") + 1}.')
'''
|
print('Vamos ver alguns times do Campeonato Brasileiro 2021')
times = ('Palmeiras', 'Atlético-MG', 'Fortaleza', 'Bragantino', 'Athletico-PR', 'Flamengo', 'Ceará SC',
'Atlético-GO', 'Bahia', 'Corinthians', 'Fluminense', 'Santos', 'Juventude', 'Internacional',
'Cuiabá', 'Sport Recife', 'São Paulo', 'América-MG', 'Grêmio', 'Chapecoense')
pos = 0
print('=' * 30)
print('Os cinco primeiros colocados são: ')
while True:
print(times[pos])
pos += 1
if pos >= 5:
break
pos = 16
print('=' * 30)
print('Os últimos quatro colocados são: ')
while True:
print(times[pos])
pos += 1
if pos >= 20:
break
print('=' * 30)
print('Organizando os times em ordem alfabética: ')
print(sorted(times))
print('=' * 30)
print(f'O time da Chapecoense está na posição: {times.index("Chapecoense") + 1}.')
f'''
Outra forma de resolver esse exercício de forma mais fácil.
print('=' * 30)
print(f'Lista de Times do Campeonato Brasileiro: {times}')
print('=' * 30)
print(f'Os 5 primeiros são {times[0:5]}')
print('=' * 30)
print(f'Os 4 útimos são {times[-4:]}')
print('=' * 30)
print(f'Em ordem alfabética ficam: {sorted(times)}')
print('=' * 30)
print(f'O time da Chapecoense está na posição: {times.index("Chapecoense") + 1}.')
'''
|
pt
| 0.790792
|
Outra forma de resolver esse exercício de forma mais fácil. print('=' * 30) print(f'Lista de Times do Campeonato Brasileiro: {times}') print('=' * 30) print(f'Os 5 primeiros são {times[0:5]}') print('=' * 30) print(f'Os 4 útimos são {times[-4:]}') print('=' * 30) print(f'Em ordem alfabética ficam: {sorted(times)}') print('=' * 30) print(f'O time da Chapecoense está na posição: {times.index("Chapecoense") + 1}.')
| 4.103332
| 4
|
movierecommender/datahandler/DataExtractors.py
|
apostolis1/Movie-recommender
| 0
|
6626163
|
<reponame>apostolis1/Movie-recommender
import pandas as pd
import numpy as np
from movierecommender.datahandler.DbHandler import DbHandler
from sqlalchemy import text
import pathlib
import os
root_dir = pathlib.Path(__file__).parent.parent.parent.resolve()
DATA_PATH = os.path.join(root_dir, "data/")
class BaseExtractor:
def __init__(self, tsv_file=None):
self.tsv_file = tsv_file
self.df = None
self.reader = None
def read_tsv(self):
self.df = pd.read_csv(self.tsv_file, sep='\t', header=0)
return
def get_reader(self):
chunksize = 10 ** 6
self.reader = pd.read_csv(self.tsv_file, sep='\t', header=0, chunksize=chunksize)
return
def filter_nan_from_tsv(self):
self.df.replace(r'\N', np.nan, inplace=True)
return
class TitleBasicsExtractor(BaseExtractor):
def __init__(self, tsv_file=None):
super(TitleBasicsExtractor, self).__init__(tsv_file=tsv_file)
self.filtered_df = None
return
def filter_movies_from_df(self):
is_movie = self.df["titleType"] == "movie"
self.filtered_df = self.df[is_movie]
return
def filter_columns(self):
"""tconst titleType primaryTitle originalTitle isAdult startYear endYear runtimeMinutes genres"""
columns_to_keep = ['tconst', 'primaryTitle', 'startYear', 'genres']
self.filtered_df = self.filtered_df[columns_to_keep]
return
def insert_to_db(self):
myDbHandler = DbHandler()
myDbHandler.connect()
self.filtered_df.to_sql("title_basics", myDbHandler.conn, if_exists='append', index=False)
return
def filter_and_insert(self):
"""
E2E function that handles everything from reading the tsv to inserting in the db after filtering
:return:
"""
self.read_tsv()
self.filter_nan_from_tsv()
print("Successfully replaced Nan")
self.filter_movies_from_df()
print("Successfully filtered movies")
self.filter_columns()
print("Successfully filtered columns")
self.insert_to_db()
print("Successfully inserted to db")
return
class TitleNameExtractor(BaseExtractor):
def __init__(self, tsv_file=None):
super(TitleNameExtractor, self).__init__(tsv_file=tsv_file)
self.filtered_df = None
return
def filter_columns(self):
"""tconst ordering nconst category job characters"""
columns_to_keep = ['tconst', 'nconst']
self.filtered_df = self.df[columns_to_keep]
del self.df
return
def filter_foreign_keys(self):
myDbHandler = DbHandler()
myDbHandler.connect()
tconst_ids = [row["tconst"] for row in myDbHandler.conn.execute(text("SELECT tconst FROM title_basics"))]
self.filtered_df = self.filtered_df[self.filtered_df.tconst.isin(tconst_ids)]
self.filtered_df = self.filtered_df.groupby(['tconst', 'nconst']).size().reset_index()
columns_to_keep = ['tconst', 'nconst']
self.filtered_df = self.filtered_df[columns_to_keep]
return
def insert_to_db(self):
myDbHandler = DbHandler()
myDbHandler.connect()
self.filtered_df.to_sql("title_principals", myDbHandler.conn, if_exists='append', index=False)
return
def filter_and_insert(self):
"""
E2E function that handles everything from reading the tsv to inserting in the db after filtering
We don't use self.read_tsv() here, we use the reader to break the file in chunks
:return:
"""
self.filter_nan_from_tsv()
print("Successfully replaced Nan")
self.filter_columns()
print("Successfully filtered columns")
self.filter_foreign_keys()
print("Successfully filtered foreign key constraints")
self.insert_to_db()
print("Successfully inserted to db")
return
class NameBasicsExtractor(BaseExtractor):
def __init__(self, tsv_file=None):
super(NameBasicsExtractor, self).__init__(tsv_file=tsv_file)
self.filtered_df = None
return
def filter_columns(self):
"""tconst ordering nconst category job characters"""
columns_to_keep = ['nconst', 'primaryName']
self.filtered_df = self.df[columns_to_keep]
del self.df
return
def filter_foreign_keys(self):
myDbHandler = DbHandler()
myDbHandler.connect()
nconst_ids = [row["nconst"] for row in myDbHandler.conn.execute(text("SELECT DISTINCT nconst \ "
"FROM title_principals"))]
self.filtered_df = self.filtered_df[self.filtered_df.nconst.isin(nconst_ids)]
return
def insert_to_db(self):
myDbHandler = DbHandler()
myDbHandler.connect()
self.filtered_df.to_sql("name_basics", myDbHandler.conn, if_exists='append', index=False)
return
def filter_and_insert(self):
"""
E2E function that handles everything from reading the tsv to inserting in the db after filtering
We don't use self.read_tsv() here, we use the reader to break the file in chunks
:return:
"""
self.filter_nan_from_tsv()
print("Successfully replaced Nan")
self.filter_columns()
print("Successfully filtered columns")
self.filter_foreign_keys()
print("Successfully filtered foreign key constraints")
self.insert_to_db()
print("Successfully inserted to db")
return
class TitleRatingsExtractor(BaseExtractor):
def __init__(self, tsv_file=None):
super(TitleRatingsExtractor, self).__init__(tsv_file=tsv_file)
self.filtered_df = None
return
def filter_columns(self):
"""
tconst averageRating numVotes
we need everything for ratings
:return:
"""
self.filtered_df = self.df
del self.df
return
def filter_foreign_keys(self):
"""
Keep only the tconst ids that are in the title_basics table
:return:
"""
myDbHandler = DbHandler()
myDbHandler.connect()
tconst_ids = [row["tconst"] for row in myDbHandler.conn.execute(text("SELECT tconst FROM title_basics"))]
self.filtered_df = self.filtered_df[self.filtered_df.tconst.isin(tconst_ids)]
return
def insert_to_db(self):
"""
Insert the filtered_df in the db, tablename: title_ratings
:return:
"""
myDbHandler = DbHandler()
myDbHandler.connect()
self.filtered_df.to_sql("title_ratings", myDbHandler.conn, if_exists='append', index=False)
return
def filter_and_insert(self):
"""
E2E function that handles everything from reading the tsv to inserting in the db after filtering
We don't use self.read_tsv() here, we use the reader to break the file in chunks
:return:
"""
self.filter_nan_from_tsv()
print("Successfully replaced Nan")
self.filter_columns()
print("Successfully filtered columns")
self.filter_foreign_keys()
print("Successfully filtered foreign key constraints")
self.insert_to_db()
print("Successfully inserted to db")
return
class SoupCreator:
def __init__(self):
myDbHandler = DbHandler()
keywords_list = myDbHandler.exec_select_sql_from_file(os.path.join(DATA_PATH, 'sql/select_genres_keywords.sql'))
self.keywords_df = pd.DataFrame(keywords_list, columns=['tconst', 'genres', 'keywords'])
print(self.keywords_df.head())
actors_list = myDbHandler.exec_select_sql_from_file(os.path.join(DATA_PATH, 'sql/select_actors.sql'))
self.actors_df = pd.DataFrame(actors_list, columns=['tconst', 'PrimaryName'])
print(self.actors_df.head())
self.soup_df = pd.DataFrame(columns=['tconst', 'soup'])
def group_actors(self):
soup_list = []
for index, row in self.keywords_df.iterrows():
tconst = row["tconst"]
actors_tconst_df = self.actors_df.loc[self.actors_df['tconst'] == tconst]
actors_soup = ','.join(actors_tconst_df.PrimaryName)
final_soup = ",".join([row["genres"], actors_soup, row["keywords"]])
# print(final_soup)
soup_list.append((tconst, final_soup))
self.soup_df = pd.DataFrame(data=soup_list, columns=['tconst', 'soup'])
print(self.soup_df.head())
return
def get_keywords_df(self) -> pd.DataFrame:
return self.keywords_df
def get_soup_df(self) -> pd.DataFrame:
return self.soup_df
def insert_to_db(self) -> None:
"""
Assuming the group_actors function is already called and the self.soup_df df is created, insert it
in the title_soup table
:return:
"""
myDbHandler = DbHandler()
myDbHandler.connect()
self.soup_df.to_sql("title_soup", myDbHandler.conn, if_exists='append', index=False)
print("Successfully inserted values in the db")
return
|
import pandas as pd
import numpy as np
from movierecommender.datahandler.DbHandler import DbHandler
from sqlalchemy import text
import pathlib
import os
root_dir = pathlib.Path(__file__).parent.parent.parent.resolve()
DATA_PATH = os.path.join(root_dir, "data/")
class BaseExtractor:
def __init__(self, tsv_file=None):
self.tsv_file = tsv_file
self.df = None
self.reader = None
def read_tsv(self):
self.df = pd.read_csv(self.tsv_file, sep='\t', header=0)
return
def get_reader(self):
chunksize = 10 ** 6
self.reader = pd.read_csv(self.tsv_file, sep='\t', header=0, chunksize=chunksize)
return
def filter_nan_from_tsv(self):
self.df.replace(r'\N', np.nan, inplace=True)
return
class TitleBasicsExtractor(BaseExtractor):
def __init__(self, tsv_file=None):
super(TitleBasicsExtractor, self).__init__(tsv_file=tsv_file)
self.filtered_df = None
return
def filter_movies_from_df(self):
is_movie = self.df["titleType"] == "movie"
self.filtered_df = self.df[is_movie]
return
def filter_columns(self):
"""tconst titleType primaryTitle originalTitle isAdult startYear endYear runtimeMinutes genres"""
columns_to_keep = ['tconst', 'primaryTitle', 'startYear', 'genres']
self.filtered_df = self.filtered_df[columns_to_keep]
return
def insert_to_db(self):
myDbHandler = DbHandler()
myDbHandler.connect()
self.filtered_df.to_sql("title_basics", myDbHandler.conn, if_exists='append', index=False)
return
def filter_and_insert(self):
"""
E2E function that handles everything from reading the tsv to inserting in the db after filtering
:return:
"""
self.read_tsv()
self.filter_nan_from_tsv()
print("Successfully replaced Nan")
self.filter_movies_from_df()
print("Successfully filtered movies")
self.filter_columns()
print("Successfully filtered columns")
self.insert_to_db()
print("Successfully inserted to db")
return
class TitleNameExtractor(BaseExtractor):
def __init__(self, tsv_file=None):
super(TitleNameExtractor, self).__init__(tsv_file=tsv_file)
self.filtered_df = None
return
def filter_columns(self):
"""tconst ordering nconst category job characters"""
columns_to_keep = ['tconst', 'nconst']
self.filtered_df = self.df[columns_to_keep]
del self.df
return
def filter_foreign_keys(self):
myDbHandler = DbHandler()
myDbHandler.connect()
tconst_ids = [row["tconst"] for row in myDbHandler.conn.execute(text("SELECT tconst FROM title_basics"))]
self.filtered_df = self.filtered_df[self.filtered_df.tconst.isin(tconst_ids)]
self.filtered_df = self.filtered_df.groupby(['tconst', 'nconst']).size().reset_index()
columns_to_keep = ['tconst', 'nconst']
self.filtered_df = self.filtered_df[columns_to_keep]
return
def insert_to_db(self):
myDbHandler = DbHandler()
myDbHandler.connect()
self.filtered_df.to_sql("title_principals", myDbHandler.conn, if_exists='append', index=False)
return
def filter_and_insert(self):
"""
E2E function that handles everything from reading the tsv to inserting in the db after filtering
We don't use self.read_tsv() here, we use the reader to break the file in chunks
:return:
"""
self.filter_nan_from_tsv()
print("Successfully replaced Nan")
self.filter_columns()
print("Successfully filtered columns")
self.filter_foreign_keys()
print("Successfully filtered foreign key constraints")
self.insert_to_db()
print("Successfully inserted to db")
return
class NameBasicsExtractor(BaseExtractor):
def __init__(self, tsv_file=None):
super(NameBasicsExtractor, self).__init__(tsv_file=tsv_file)
self.filtered_df = None
return
def filter_columns(self):
"""tconst ordering nconst category job characters"""
columns_to_keep = ['nconst', 'primaryName']
self.filtered_df = self.df[columns_to_keep]
del self.df
return
def filter_foreign_keys(self):
myDbHandler = DbHandler()
myDbHandler.connect()
nconst_ids = [row["nconst"] for row in myDbHandler.conn.execute(text("SELECT DISTINCT nconst \ "
"FROM title_principals"))]
self.filtered_df = self.filtered_df[self.filtered_df.nconst.isin(nconst_ids)]
return
def insert_to_db(self):
myDbHandler = DbHandler()
myDbHandler.connect()
self.filtered_df.to_sql("name_basics", myDbHandler.conn, if_exists='append', index=False)
return
def filter_and_insert(self):
"""
E2E function that handles everything from reading the tsv to inserting in the db after filtering
We don't use self.read_tsv() here, we use the reader to break the file in chunks
:return:
"""
self.filter_nan_from_tsv()
print("Successfully replaced Nan")
self.filter_columns()
print("Successfully filtered columns")
self.filter_foreign_keys()
print("Successfully filtered foreign key constraints")
self.insert_to_db()
print("Successfully inserted to db")
return
class TitleRatingsExtractor(BaseExtractor):
def __init__(self, tsv_file=None):
super(TitleRatingsExtractor, self).__init__(tsv_file=tsv_file)
self.filtered_df = None
return
def filter_columns(self):
"""
tconst averageRating numVotes
we need everything for ratings
:return:
"""
self.filtered_df = self.df
del self.df
return
def filter_foreign_keys(self):
"""
Keep only the tconst ids that are in the title_basics table
:return:
"""
myDbHandler = DbHandler()
myDbHandler.connect()
tconst_ids = [row["tconst"] for row in myDbHandler.conn.execute(text("SELECT tconst FROM title_basics"))]
self.filtered_df = self.filtered_df[self.filtered_df.tconst.isin(tconst_ids)]
return
def insert_to_db(self):
"""
Insert the filtered_df in the db, tablename: title_ratings
:return:
"""
myDbHandler = DbHandler()
myDbHandler.connect()
self.filtered_df.to_sql("title_ratings", myDbHandler.conn, if_exists='append', index=False)
return
def filter_and_insert(self):
"""
E2E function that handles everything from reading the tsv to inserting in the db after filtering
We don't use self.read_tsv() here, we use the reader to break the file in chunks
:return:
"""
self.filter_nan_from_tsv()
print("Successfully replaced Nan")
self.filter_columns()
print("Successfully filtered columns")
self.filter_foreign_keys()
print("Successfully filtered foreign key constraints")
self.insert_to_db()
print("Successfully inserted to db")
return
class SoupCreator:
def __init__(self):
myDbHandler = DbHandler()
keywords_list = myDbHandler.exec_select_sql_from_file(os.path.join(DATA_PATH, 'sql/select_genres_keywords.sql'))
self.keywords_df = pd.DataFrame(keywords_list, columns=['tconst', 'genres', 'keywords'])
print(self.keywords_df.head())
actors_list = myDbHandler.exec_select_sql_from_file(os.path.join(DATA_PATH, 'sql/select_actors.sql'))
self.actors_df = pd.DataFrame(actors_list, columns=['tconst', 'PrimaryName'])
print(self.actors_df.head())
self.soup_df = pd.DataFrame(columns=['tconst', 'soup'])
def group_actors(self):
soup_list = []
for index, row in self.keywords_df.iterrows():
tconst = row["tconst"]
actors_tconst_df = self.actors_df.loc[self.actors_df['tconst'] == tconst]
actors_soup = ','.join(actors_tconst_df.PrimaryName)
final_soup = ",".join([row["genres"], actors_soup, row["keywords"]])
# print(final_soup)
soup_list.append((tconst, final_soup))
self.soup_df = pd.DataFrame(data=soup_list, columns=['tconst', 'soup'])
print(self.soup_df.head())
return
def get_keywords_df(self) -> pd.DataFrame:
return self.keywords_df
def get_soup_df(self) -> pd.DataFrame:
return self.soup_df
def insert_to_db(self) -> None:
"""
Assuming the group_actors function is already called and the self.soup_df df is created, insert it
in the title_soup table
:return:
"""
myDbHandler = DbHandler()
myDbHandler.connect()
self.soup_df.to_sql("title_soup", myDbHandler.conn, if_exists='append', index=False)
print("Successfully inserted values in the db")
return
|
en
| 0.782677
|
tconst titleType primaryTitle originalTitle isAdult startYear endYear runtimeMinutes genres E2E function that handles everything from reading the tsv to inserting in the db after filtering :return: tconst ordering nconst category job characters E2E function that handles everything from reading the tsv to inserting in the db after filtering We don't use self.read_tsv() here, we use the reader to break the file in chunks :return: tconst ordering nconst category job characters E2E function that handles everything from reading the tsv to inserting in the db after filtering We don't use self.read_tsv() here, we use the reader to break the file in chunks :return: tconst averageRating numVotes we need everything for ratings :return: Keep only the tconst ids that are in the title_basics table :return: Insert the filtered_df in the db, tablename: title_ratings :return: E2E function that handles everything from reading the tsv to inserting in the db after filtering We don't use self.read_tsv() here, we use the reader to break the file in chunks :return: # print(final_soup) Assuming the group_actors function is already called and the self.soup_df df is created, insert it in the title_soup table :return:
| 2.800485
| 3
|
vc_assistance/indico_vc_assistance/util.py
|
OmeGak/indico-plugins-cern
| 4
|
6626164
|
<filename>vc_assistance/indico_vc_assistance/util.py<gh_stars>1-10
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2021 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from datetime import time
from sqlalchemy.orm import joinedload, subqueryload, undefer
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import limit_groups
from indico.modules.events import Event
from indico.modules.events.contributions import Contribution
from indico.modules.events.models.events import EventType
from indico.modules.events.requests.models.requests import Request, RequestState
from indico.modules.events.sessions import Session
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.rb.models.equipment import EquipmentType
from indico.modules.rb.models.room_features import RoomFeature
from indico.modules.rb.models.rooms import Room
from indico.modules.vc import VCRoomEventAssociation
from indico.util.caching import memoize_request
WORKING_TIME_PERIOD = (time(8, 30), time(17, 30))
def can_request_assistance(user):
"""Check if a user can request VC assistance"""
return _is_in_acl(user, 'authorized')
def is_vc_support(user):
"""Check if a user is VC support"""
return _is_in_acl(user, 'vc_support')
def _is_in_acl(user, acl):
from indico_vc_assistance.plugin import VCAssistanceRequestPlugin
if user.is_admin:
return True
return VCAssistanceRequestPlugin.settings.acls.contains_user(acl, user)
def has_vc_rooms(event):
"""
Check whether the event or any of its contributions and sessions has some
vc room created.
"""
return any(VCRoomEventAssociation.find_for_event(event, include_hidden=True))
def has_vc_capable_rooms(event):
"""
Check whether the event or any of its contributions and sessions has some
vc capable room attached.
"""
capable_rooms = get_vc_capable_rooms()
return (event.room in capable_rooms
or any(c.room for c in event.contributions if c.room in capable_rooms)
or any([(s.room, sb.room) for s in event.sessions for sb in s.blocks
if sb.room in capable_rooms or s.room in capable_rooms]))
def has_vc_rooms_attached_to_capable(event):
"""Check whether the event or any of its contributions and sessions has some
vc room created and those are linked to a vc capable room.
"""
return any(vc for vc in VCRoomEventAssociation.find_for_event(event, include_hidden=True)
if vc.link_object.room is not None and vc.link_object.room in get_vc_capable_rooms())
def find_requests(from_dt=None, to_dt=None, contribs_and_sessions=True):
"""Finds requests matching certain criteria.
:param from_dt: earliest event/contribution to include
:param to_dt: latest event/contribution to include
:param contribs_and_sessions: whether it should return contributions and sessions or only request
"""
from .definition import VCAssistanceRequest
query = Request.query.join(Event).filter(~Event.is_deleted,
Request.type == VCAssistanceRequest.name,
Request.state == RequestState.accepted)
if from_dt is not None or to_dt is not None:
query = query.filter(Event.happens_between(from_dt, to_dt))
# We only want the latest one for each event
query = limit_groups(query, Request, Request.event_id, Request.created_dt.desc(), 1)
query = query.options(joinedload('event'))
for req in query:
event = req.event
if to_dt is not None and event.start_dt > to_dt:
continue
if not contribs_and_sessions:
yield req
else:
contribs = [x[0] for x in get_capable(req, get_contributions)]
session_blocks = [x[0] for x in get_capable(req, get_session_blocks)]
yield req, contribs, session_blocks
@memoize_request
def get_vc_capable_rooms():
"""Returns a list of rooms with VC equipment"""
from indico_vc_assistance.plugin import VCAssistanceRequestPlugin
feature = VCAssistanceRequestPlugin.settings.get('room_feature')
if not feature:
return set()
feature_criterion = Room.available_equipment.any(EquipmentType.features.any(RoomFeature.name == feature.name))
return set(Room.query.filter(~Room.is_deleted, feature_criterion))
def _contrib_key(contrib):
return (contrib.start_dt,
contrib.title,
contrib.friendly_id)
@memoize_request
def get_contributions(event):
"""Returns a list of contributions in rooms with VC equipment
:return: a list of ``(contribution, capable, custom_room)`` tuples
"""
contribs = (Contribution.query
.with_parent(event)
.filter(Contribution.is_scheduled)
.filter(db.or_(Contribution.session == None, # noqa
Contribution.session.has(db.or_(Session.type == None, # noqa
Session.type.has(is_poster=False)))))
.options(joinedload('timetable_entry').load_only('start_dt'),
joinedload('session_block'),
subqueryload('person_links'),
undefer('is_scheduled'))
.all())
all_contribs = sorted(contribs, key=_contrib_key)
vc_capable_rooms = get_vc_capable_rooms()
event_room = event.room
return [(c,
c.room in vc_capable_rooms,
c.room_name if c.room and c.room != event_room else None)
for c in all_contribs]
@memoize_request
def get_session_blocks(event):
"""Returns a list of contributions in rooms with VC equipment
:return: a list of ``(contribution, capable, custom_room)`` tuples
"""
session_blocks = (SessionBlock.query
.filter(SessionBlock.session.has(event=event, is_deleted=False)))
vc_capable_rooms = get_vc_capable_rooms()
event_room = event.room
return [(sb,
sb.room in vc_capable_rooms,
sb.room_name if sb.room and sb.room != event_room else None)
for sb in session_blocks]
def get_capable(req, get_contribs_or_session_blocks):
"""Gets the capable contributions/session blocks with associated vc room for a request.
:return: list of ``contribution`` or ``session block``
"""
if req.event.type_ == EventType.lecture:
return []
return [x for x in get_contribs_or_session_blocks(req.event) if x[1] and x[0].vc_room_associations]
def start_time_within_working_hours(event):
return WORKING_TIME_PERIOD[0] <= event.start_dt_local.time() <= WORKING_TIME_PERIOD[1]
|
<filename>vc_assistance/indico_vc_assistance/util.py<gh_stars>1-10
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2021 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from datetime import time
from sqlalchemy.orm import joinedload, subqueryload, undefer
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import limit_groups
from indico.modules.events import Event
from indico.modules.events.contributions import Contribution
from indico.modules.events.models.events import EventType
from indico.modules.events.requests.models.requests import Request, RequestState
from indico.modules.events.sessions import Session
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.rb.models.equipment import EquipmentType
from indico.modules.rb.models.room_features import RoomFeature
from indico.modules.rb.models.rooms import Room
from indico.modules.vc import VCRoomEventAssociation
from indico.util.caching import memoize_request
WORKING_TIME_PERIOD = (time(8, 30), time(17, 30))
def can_request_assistance(user):
"""Check if a user can request VC assistance"""
return _is_in_acl(user, 'authorized')
def is_vc_support(user):
"""Check if a user is VC support"""
return _is_in_acl(user, 'vc_support')
def _is_in_acl(user, acl):
from indico_vc_assistance.plugin import VCAssistanceRequestPlugin
if user.is_admin:
return True
return VCAssistanceRequestPlugin.settings.acls.contains_user(acl, user)
def has_vc_rooms(event):
"""
Check whether the event or any of its contributions and sessions has some
vc room created.
"""
return any(VCRoomEventAssociation.find_for_event(event, include_hidden=True))
def has_vc_capable_rooms(event):
"""
Check whether the event or any of its contributions and sessions has some
vc capable room attached.
"""
capable_rooms = get_vc_capable_rooms()
return (event.room in capable_rooms
or any(c.room for c in event.contributions if c.room in capable_rooms)
or any([(s.room, sb.room) for s in event.sessions for sb in s.blocks
if sb.room in capable_rooms or s.room in capable_rooms]))
def has_vc_rooms_attached_to_capable(event):
"""Check whether the event or any of its contributions and sessions has some
vc room created and those are linked to a vc capable room.
"""
return any(vc for vc in VCRoomEventAssociation.find_for_event(event, include_hidden=True)
if vc.link_object.room is not None and vc.link_object.room in get_vc_capable_rooms())
def find_requests(from_dt=None, to_dt=None, contribs_and_sessions=True):
"""Finds requests matching certain criteria.
:param from_dt: earliest event/contribution to include
:param to_dt: latest event/contribution to include
:param contribs_and_sessions: whether it should return contributions and sessions or only request
"""
from .definition import VCAssistanceRequest
query = Request.query.join(Event).filter(~Event.is_deleted,
Request.type == VCAssistanceRequest.name,
Request.state == RequestState.accepted)
if from_dt is not None or to_dt is not None:
query = query.filter(Event.happens_between(from_dt, to_dt))
# We only want the latest one for each event
query = limit_groups(query, Request, Request.event_id, Request.created_dt.desc(), 1)
query = query.options(joinedload('event'))
for req in query:
event = req.event
if to_dt is not None and event.start_dt > to_dt:
continue
if not contribs_and_sessions:
yield req
else:
contribs = [x[0] for x in get_capable(req, get_contributions)]
session_blocks = [x[0] for x in get_capable(req, get_session_blocks)]
yield req, contribs, session_blocks
@memoize_request
def get_vc_capable_rooms():
"""Returns a list of rooms with VC equipment"""
from indico_vc_assistance.plugin import VCAssistanceRequestPlugin
feature = VCAssistanceRequestPlugin.settings.get('room_feature')
if not feature:
return set()
feature_criterion = Room.available_equipment.any(EquipmentType.features.any(RoomFeature.name == feature.name))
return set(Room.query.filter(~Room.is_deleted, feature_criterion))
def _contrib_key(contrib):
return (contrib.start_dt,
contrib.title,
contrib.friendly_id)
@memoize_request
def get_contributions(event):
"""Returns a list of contributions in rooms with VC equipment
:return: a list of ``(contribution, capable, custom_room)`` tuples
"""
contribs = (Contribution.query
.with_parent(event)
.filter(Contribution.is_scheduled)
.filter(db.or_(Contribution.session == None, # noqa
Contribution.session.has(db.or_(Session.type == None, # noqa
Session.type.has(is_poster=False)))))
.options(joinedload('timetable_entry').load_only('start_dt'),
joinedload('session_block'),
subqueryload('person_links'),
undefer('is_scheduled'))
.all())
all_contribs = sorted(contribs, key=_contrib_key)
vc_capable_rooms = get_vc_capable_rooms()
event_room = event.room
return [(c,
c.room in vc_capable_rooms,
c.room_name if c.room and c.room != event_room else None)
for c in all_contribs]
@memoize_request
def get_session_blocks(event):
"""Returns a list of contributions in rooms with VC equipment
:return: a list of ``(contribution, capable, custom_room)`` tuples
"""
session_blocks = (SessionBlock.query
.filter(SessionBlock.session.has(event=event, is_deleted=False)))
vc_capable_rooms = get_vc_capable_rooms()
event_room = event.room
return [(sb,
sb.room in vc_capable_rooms,
sb.room_name if sb.room and sb.room != event_room else None)
for sb in session_blocks]
def get_capable(req, get_contribs_or_session_blocks):
"""Gets the capable contributions/session blocks with associated vc room for a request.
:return: list of ``contribution`` or ``session block``
"""
if req.event.type_ == EventType.lecture:
return []
return [x for x in get_contribs_or_session_blocks(req.event) if x[1] and x[0].vc_room_associations]
def start_time_within_working_hours(event):
return WORKING_TIME_PERIOD[0] <= event.start_dt_local.time() <= WORKING_TIME_PERIOD[1]
|
en
| 0.879485
|
# This file is part of the CERN Indico plugins. # Copyright (C) 2014 - 2021 CERN # # The CERN Indico plugins are free software; you can redistribute # them and/or modify them under the terms of the MIT License; see # the LICENSE file for more details. Check if a user can request VC assistance Check if a user is VC support Check whether the event or any of its contributions and sessions has some vc room created. Check whether the event or any of its contributions and sessions has some vc capable room attached. Check whether the event or any of its contributions and sessions has some vc room created and those are linked to a vc capable room. Finds requests matching certain criteria. :param from_dt: earliest event/contribution to include :param to_dt: latest event/contribution to include :param contribs_and_sessions: whether it should return contributions and sessions or only request # We only want the latest one for each event Returns a list of rooms with VC equipment Returns a list of contributions in rooms with VC equipment :return: a list of ``(contribution, capable, custom_room)`` tuples # noqa # noqa Returns a list of contributions in rooms with VC equipment :return: a list of ``(contribution, capable, custom_room)`` tuples Gets the capable contributions/session blocks with associated vc room for a request. :return: list of ``contribution`` or ``session block``
| 2.096877
| 2
|
scripts/sources/S_PredictAssess.py
|
dpopadic/arpmRes
| 6
|
6626165
|
<filename>scripts/sources/S_PredictAssess.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_PredictAssess [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_PredictAssess&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-predictor-assess).
# ## Prepare the environment
# +
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import arange, ones, zeros, mean, sqrt
from numpy.random import randint, permutation
from scipy.stats import norm
import matplotlib.pyplot as plt
plt.style.use('seaborn')
from OrdLeastSquareFPNReg import OrdLeastSquareFPNReg
from RelEntropyMultivariateNormal import RelEntropyMultivariateNormal
# input parameters
t_ = 1000 # len of time series
beta = 1 # real value of beta
alpha = 0 # real value of alpha
sigma2 = 4 # real value of sigma
lsub = 200 # len of sub-samples
# -
# ## Generate simulations of factor
Z = norm.rvs(0, 1, [1, t_])
# ## Generate time series of residuals
U = norm.rvs(0, sqrt(sigma2), [1, t_])
# ## Compute simulations of target variable and time series
I = U + alpha + beta*Z
# ## Reshuffle the training set
# +
perm = permutation(arange(t_))
I_perm = I[0,perm].reshape(1,-1)
Z_perm = Z[0,perm].reshape(1,-1)
# number of samples
k_ = int(t_ / lsub)
MLobj = zeros((1, k_))
NonSobj = zeros((1, k_))
t_vec = range(t_)
for m in range(k_):
t_in = arange(m*lsub, (m + 1)*lsub) # in-sample observations
t_out = np.setdiff1d(t_vec, t_in) # out-of-sample observations
# extract sub-samples
I_in = I_perm[0,t_in].reshape(1,-1)
I_out = I_perm[0,t_out].reshape(1,-1)
Z_in = Z_perm[0,t_in].reshape(1,-1)
Z_out = Z_perm[0,t_out].reshape(1,-1)
# set flat flexible probabilities
sub_t = I_in.shape[1]
p = ones((1, sub_t)) / sub_t
csub_t = I_out.shape[1]
c_p = ones((1, csub_t)) / csub_t
# maximum likelihood predictor
alpha_OLSFP, beta_OLSFP, s2_OLSFP,_ = OrdLeastSquareFPNReg(I_in, Z_in, p)
c_alpha_OLSFP, c_beta_OLSFP, c_s2_OLSFP,_= OrdLeastSquareFPNReg(I_out, Z_out, c_p)
mu = alpha_OLSFP + beta_OLSFP*Z[0,-1]
c_mu = c_alpha_OLSFP + c_beta_OLSFP*Z[0,-1]
MLobj[0,m] = RelEntropyMultivariateNormal(mu, s2_OLSFP, c_mu, c_s2_OLSFP)
# nonsensical predictor
alpha_cap = 0
beta_cap = I_in[0,-1]*Z_in[0,0]
sigma2_cap = I_in[0,-1]**2*I_in[0,0] ** 2
c_alpha_cap = 0
c_beta_cap = I_out[0,-1]*Z_out[0,0]
c_sigma2_cap = I_out[0,-1] ** 2*I_out[0,0] ** 2
mu = alpha_cap + beta_cap*Z[0,-1]
c_mu = c_alpha_cap + c_beta_cap*Z[0,-1]
NonSobj[0,m] = RelEntropyMultivariateNormal(np.atleast_1d(mu), np.atleast_2d(sigma2_cap), np.atleast_1d(c_mu),
np.atleast_2d(c_sigma2_cap))
vML = mean(MLobj)
vNonS = mean(NonSobj)
|
<filename>scripts/sources/S_PredictAssess.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_PredictAssess [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_PredictAssess&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-predictor-assess).
# ## Prepare the environment
# +
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import arange, ones, zeros, mean, sqrt
from numpy.random import randint, permutation
from scipy.stats import norm
import matplotlib.pyplot as plt
plt.style.use('seaborn')
from OrdLeastSquareFPNReg import OrdLeastSquareFPNReg
from RelEntropyMultivariateNormal import RelEntropyMultivariateNormal
# input parameters
t_ = 1000 # len of time series
beta = 1 # real value of beta
alpha = 0 # real value of alpha
sigma2 = 4 # real value of sigma
lsub = 200 # len of sub-samples
# -
# ## Generate simulations of factor
Z = norm.rvs(0, 1, [1, t_])
# ## Generate time series of residuals
U = norm.rvs(0, sqrt(sigma2), [1, t_])
# ## Compute simulations of target variable and time series
I = U + alpha + beta*Z
# ## Reshuffle the training set
# +
perm = permutation(arange(t_))
I_perm = I[0,perm].reshape(1,-1)
Z_perm = Z[0,perm].reshape(1,-1)
# number of samples
k_ = int(t_ / lsub)
MLobj = zeros((1, k_))
NonSobj = zeros((1, k_))
t_vec = range(t_)
for m in range(k_):
t_in = arange(m*lsub, (m + 1)*lsub) # in-sample observations
t_out = np.setdiff1d(t_vec, t_in) # out-of-sample observations
# extract sub-samples
I_in = I_perm[0,t_in].reshape(1,-1)
I_out = I_perm[0,t_out].reshape(1,-1)
Z_in = Z_perm[0,t_in].reshape(1,-1)
Z_out = Z_perm[0,t_out].reshape(1,-1)
# set flat flexible probabilities
sub_t = I_in.shape[1]
p = ones((1, sub_t)) / sub_t
csub_t = I_out.shape[1]
c_p = ones((1, csub_t)) / csub_t
# maximum likelihood predictor
alpha_OLSFP, beta_OLSFP, s2_OLSFP,_ = OrdLeastSquareFPNReg(I_in, Z_in, p)
c_alpha_OLSFP, c_beta_OLSFP, c_s2_OLSFP,_= OrdLeastSquareFPNReg(I_out, Z_out, c_p)
mu = alpha_OLSFP + beta_OLSFP*Z[0,-1]
c_mu = c_alpha_OLSFP + c_beta_OLSFP*Z[0,-1]
MLobj[0,m] = RelEntropyMultivariateNormal(mu, s2_OLSFP, c_mu, c_s2_OLSFP)
# nonsensical predictor
alpha_cap = 0
beta_cap = I_in[0,-1]*Z_in[0,0]
sigma2_cap = I_in[0,-1]**2*I_in[0,0] ** 2
c_alpha_cap = 0
c_beta_cap = I_out[0,-1]*Z_out[0,0]
c_sigma2_cap = I_out[0,-1] ** 2*I_out[0,0] ** 2
mu = alpha_cap + beta_cap*Z[0,-1]
c_mu = c_alpha_cap + c_beta_cap*Z[0,-1]
NonSobj[0,m] = RelEntropyMultivariateNormal(np.atleast_1d(mu), np.atleast_2d(sigma2_cap), np.atleast_1d(c_mu),
np.atleast_2d(c_sigma2_cap))
vML = mean(MLobj)
vNonS = mean(NonSobj)
|
en
| 0.533549
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # S_PredictAssess [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_PredictAssess&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-predictor-assess). # ## Prepare the environment # + # input parameters # len of time series # real value of beta # real value of alpha # real value of sigma # len of sub-samples # - # ## Generate simulations of factor # ## Generate time series of residuals # ## Compute simulations of target variable and time series # ## Reshuffle the training set # + # number of samples # in-sample observations # out-of-sample observations # extract sub-samples # set flat flexible probabilities # maximum likelihood predictor # nonsensical predictor
| 1.924883
| 2
|
verres/architecture/layers/regu_relu.py
|
csxeba/Verres
| 0
|
6626166
|
import tensorflow as tf
class TargetedL2(tf.keras.regularizers.Regularizer):
def __init__(self, target_value, ord="euclidean"):
self.target_value = target_value
self.ord = ord
def __call__(self, x):
return tf.norm(self.target_value - x, ord=self.ord)
class LinearPReLUInitializer(tf.keras.initializers.Initializer):
def __call__(self, shape, dtype=None, partition_info=None):
return -tf.keras.backend.ones(shape, dtype)
class ReguReLU(tf.keras.layers.PReLU):
name_counter = 1
def __init__(self, regularization_ord="euclidean", **prelu_kwargs):
super().__init__(alpha_initializer=LinearPReLUInitializer(),
alpha_regularizer=TargetedL2(target_value=-1, ord=regularization_ord),
**prelu_kwargs)
|
import tensorflow as tf
class TargetedL2(tf.keras.regularizers.Regularizer):
def __init__(self, target_value, ord="euclidean"):
self.target_value = target_value
self.ord = ord
def __call__(self, x):
return tf.norm(self.target_value - x, ord=self.ord)
class LinearPReLUInitializer(tf.keras.initializers.Initializer):
def __call__(self, shape, dtype=None, partition_info=None):
return -tf.keras.backend.ones(shape, dtype)
class ReguReLU(tf.keras.layers.PReLU):
name_counter = 1
def __init__(self, regularization_ord="euclidean", **prelu_kwargs):
super().__init__(alpha_initializer=LinearPReLUInitializer(),
alpha_regularizer=TargetedL2(target_value=-1, ord=regularization_ord),
**prelu_kwargs)
|
none
| 1
| 2.626847
| 3
|
|
datasets/otbdataset.py
|
bit-ml/sftrackpp
| 1
|
6626167
|
import numpy as np
from ltr.data.image_loader import jpeg4py_loader
from pytracking.evaluation.data import BaseDataset, Sequence, SequenceList
from pytracking.utils.load_text import load_text
class OTBDataset(BaseDataset):
""" OTB-2015 dataset
Publication:
Object Tracking Benchmark
<NAME>, <NAME>, and <NAME>
TPAMI, 2015
http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf
Download the dataset from http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html
"""
def __init__(self):
super().__init__()
self.base_path = self.env_settings.otb_path
self.sequence_info_list = self._get_sequence_info_list()
def get_sequence_list(self):
return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])
def _construct_sequence(self, sequence_info):
sequence_path = sequence_info['path']
nz = sequence_info['nz']
ext = sequence_info['ext']
start_frame = sequence_info['startFrame']
end_frame = sequence_info['endFrame']
init_omit = 0
if 'initOmit' in sequence_info:
init_omit = sequence_info['initOmit']
frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,
sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]
anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])
# NOTE: OTB has some weird annos which panda cannot handle
ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy')
return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:,:],
object_class=sequence_info['object_class'])
def __len__(self):
return len(self.sequence_info_list)
def _get_sequence_info_list(self):
sequence_info_list = [
{"name": "Basketball", "path": "Basketball/img", "startFrame": 1, "endFrame": 725, "nz": 4, "ext": "jpg", "anno_path": "Basketball/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Biker", "path": "Biker/img", "startFrame": 1, "endFrame": 142, "nz": 4, "ext": "jpg", "anno_path": "Biker/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Bird1", "path": "Bird1/img", "startFrame": 1, "endFrame": 408, "nz": 4, "ext": "jpg", "anno_path": "Bird1/groundtruth_rect.txt",
"object_class": "bird"},
{"name": "Bird2", "path": "Bird2/img", "startFrame": 1, "endFrame": 99, "nz": 4, "ext": "jpg", "anno_path": "Bird2/groundtruth_rect.txt",
"object_class": "bird"},
{"name": "BlurBody", "path": "BlurBody/img", "startFrame": 1, "endFrame": 334, "nz": 4, "ext": "jpg", "anno_path": "BlurBody/groundtruth_rect.txt",
"object_class": "person"},
{"name": "BlurCar1", "path": "BlurCar1/img", "startFrame": 247, "endFrame": 988, "nz": 4, "ext": "jpg", "anno_path": "BlurCar1/groundtruth_rect.txt",
"object_class": "car"},
{"name": "BlurCar2", "path": "BlurCar2/img", "startFrame": 1, "endFrame": 585, "nz": 4, "ext": "jpg", "anno_path": "BlurCar2/groundtruth_rect.txt",
"object_class": "car"},
{"name": "BlurCar3", "path": "BlurCar3/img", "startFrame": 3, "endFrame": 359, "nz": 4, "ext": "jpg", "anno_path": "BlurCar3/groundtruth_rect.txt",
"object_class": "car"},
{"name": "BlurCar4", "path": "BlurCar4/img", "startFrame": 18, "endFrame": 397, "nz": 4, "ext": "jpg", "anno_path": "BlurCar4/groundtruth_rect.txt",
"object_class": "car"},
{"name": "BlurFace", "path": "BlurFace/img", "startFrame": 1, "endFrame": 493, "nz": 4, "ext": "jpg", "anno_path": "BlurFace/groundtruth_rect.txt",
"object_class": "face"},
{"name": "BlurOwl", "path": "BlurOwl/img", "startFrame": 1, "endFrame": 631, "nz": 4, "ext": "jpg", "anno_path": "BlurOwl/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Board", "path": "Board/img", "startFrame": 1, "endFrame": 698, "nz": 5, "ext": "jpg", "anno_path": "Board/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Bolt", "path": "Bolt/img", "startFrame": 1, "endFrame": 350, "nz": 4, "ext": "jpg", "anno_path": "Bolt/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Bolt2", "path": "Bolt2/img", "startFrame": 1, "endFrame": 293, "nz": 4, "ext": "jpg", "anno_path": "Bolt2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Box", "path": "Box/img", "startFrame": 1, "endFrame": 1161, "nz": 4, "ext": "jpg", "anno_path": "Box/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Boy", "path": "Boy/img", "startFrame": 1, "endFrame": 602, "nz": 4, "ext": "jpg", "anno_path": "Boy/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Car1", "path": "Car1/img", "startFrame": 1, "endFrame": 1020, "nz": 4, "ext": "jpg", "anno_path": "Car1/groundtruth_rect.txt",
"object_class": "car"},
{"name": "Car2", "path": "Car2/img", "startFrame": 1, "endFrame": 913, "nz": 4, "ext": "jpg", "anno_path": "Car2/groundtruth_rect.txt",
"object_class": "car"},
{"name": "Car24", "path": "Car24/img", "startFrame": 1, "endFrame": 3059, "nz": 4, "ext": "jpg", "anno_path": "Car24/groundtruth_rect.txt",
"object_class": "car"},
{"name": "Car4", "path": "Car4/img", "startFrame": 1, "endFrame": 659, "nz": 4, "ext": "jpg", "anno_path": "Car4/groundtruth_rect.txt",
"object_class": "car"},
{"name": "CarDark", "path": "CarDark/img", "startFrame": 1, "endFrame": 393, "nz": 4, "ext": "jpg", "anno_path": "CarDark/groundtruth_rect.txt",
"object_class": "car"},
{"name": "CarScale", "path": "CarScale/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "CarScale/groundtruth_rect.txt",
"object_class": "car"},
{"name": "ClifBar", "path": "ClifBar/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "ClifBar/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Coke", "path": "Coke/img", "startFrame": 1, "endFrame": 291, "nz": 4, "ext": "jpg", "anno_path": "Coke/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Couple", "path": "Couple/img", "startFrame": 1, "endFrame": 140, "nz": 4, "ext": "jpg", "anno_path": "Couple/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Coupon", "path": "Coupon/img", "startFrame": 1, "endFrame": 327, "nz": 4, "ext": "jpg", "anno_path": "Coupon/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Crossing", "path": "Crossing/img", "startFrame": 1, "endFrame": 120, "nz": 4, "ext": "jpg", "anno_path": "Crossing/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Crowds", "path": "Crowds/img", "startFrame": 1, "endFrame": 347, "nz": 4, "ext": "jpg", "anno_path": "Crowds/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Dancer", "path": "Dancer/img", "startFrame": 1, "endFrame": 225, "nz": 4, "ext": "jpg", "anno_path": "Dancer/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Dancer2", "path": "Dancer2/img", "startFrame": 1, "endFrame": 150, "nz": 4, "ext": "jpg", "anno_path": "Dancer2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "David", "path": "David/img", "startFrame": 300, "endFrame": 770, "nz": 4, "ext": "jpg", "anno_path": "David/groundtruth_rect.txt",
"object_class": "face"},
{"name": "David2", "path": "David2/img", "startFrame": 1, "endFrame": 537, "nz": 4, "ext": "jpg", "anno_path": "David2/groundtruth_rect.txt",
"object_class": "face"},
{"name": "David3", "path": "David3/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "David3/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Deer", "path": "Deer/img", "startFrame": 1, "endFrame": 71, "nz": 4, "ext": "jpg", "anno_path": "Deer/groundtruth_rect.txt",
"object_class": "mammal"},
{"name": "Diving", "path": "Diving/img", "startFrame": 1, "endFrame": 215, "nz": 4, "ext": "jpg", "anno_path": "Diving/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Dog", "path": "Dog/img", "startFrame": 1, "endFrame": 127, "nz": 4, "ext": "jpg", "anno_path": "Dog/groundtruth_rect.txt",
"object_class": "dog"},
{"name": "Dog1", "path": "Dog1/img", "startFrame": 1, "endFrame": 1350, "nz": 4, "ext": "jpg", "anno_path": "Dog1/groundtruth_rect.txt",
"object_class": "dog"},
{"name": "Doll", "path": "Doll/img", "startFrame": 1, "endFrame": 3872, "nz": 4, "ext": "jpg", "anno_path": "Doll/groundtruth_rect.txt",
"object_class": "other"},
{"name": "DragonBaby", "path": "DragonBaby/img", "startFrame": 1, "endFrame": 113, "nz": 4, "ext": "jpg", "anno_path": "DragonBaby/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Dudek", "path": "Dudek/img", "startFrame": 1, "endFrame": 1145, "nz": 4, "ext": "jpg", "anno_path": "Dudek/groundtruth_rect.txt",
"object_class": "face"},
{"name": "FaceOcc1", "path": "FaceOcc1/img", "startFrame": 1, "endFrame": 892, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc1/groundtruth_rect.txt",
"object_class": "face"},
{"name": "FaceOcc2", "path": "FaceOcc2/img", "startFrame": 1, "endFrame": 812, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc2/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Fish", "path": "Fish/img", "startFrame": 1, "endFrame": 476, "nz": 4, "ext": "jpg", "anno_path": "Fish/groundtruth_rect.txt",
"object_class": "other"},
{"name": "FleetFace", "path": "FleetFace/img", "startFrame": 1, "endFrame": 707, "nz": 4, "ext": "jpg", "anno_path": "FleetFace/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Football", "path": "Football/img", "startFrame": 1, "endFrame": 362, "nz": 4, "ext": "jpg", "anno_path": "Football/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Football1", "path": "Football1/img", "startFrame": 1, "endFrame": 74, "nz": 4, "ext": "jpg", "anno_path": "Football1/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Freeman1", "path": "Freeman1/img", "startFrame": 1, "endFrame": 326, "nz": 4, "ext": "jpg", "anno_path": "Freeman1/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Freeman3", "path": "Freeman3/img", "startFrame": 1, "endFrame": 460, "nz": 4, "ext": "jpg", "anno_path": "Freeman3/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Freeman4", "path": "Freeman4/img", "startFrame": 1, "endFrame": 283, "nz": 4, "ext": "jpg", "anno_path": "Freeman4/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Girl", "path": "Girl/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Girl/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Girl2", "path": "Girl2/img", "startFrame": 1, "endFrame": 1500, "nz": 4, "ext": "jpg", "anno_path": "Girl2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Gym", "path": "Gym/img", "startFrame": 1, "endFrame": 767, "nz": 4, "ext": "jpg", "anno_path": "Gym/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human2", "path": "Human2/img", "startFrame": 1, "endFrame": 1128, "nz": 4, "ext": "jpg", "anno_path": "Human2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human3", "path": "Human3/img", "startFrame": 1, "endFrame": 1698, "nz": 4, "ext": "jpg", "anno_path": "Human3/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human4_2", "path": "Human4/img", "startFrame": 1, "endFrame": 667, "nz": 4, "ext": "jpg", "anno_path": "Human4/groundtruth_rect.2.txt",
"object_class": "person"},
{"name": "Human5", "path": "Human5/img", "startFrame": 1, "endFrame": 713, "nz": 4, "ext": "jpg", "anno_path": "Human5/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human6", "path": "Human6/img", "startFrame": 1, "endFrame": 792, "nz": 4, "ext": "jpg", "anno_path": "Human6/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human7", "path": "Human7/img", "startFrame": 1, "endFrame": 250, "nz": 4, "ext": "jpg", "anno_path": "Human7/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human8", "path": "Human8/img", "startFrame": 1, "endFrame": 128, "nz": 4, "ext": "jpg", "anno_path": "Human8/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human9", "path": "Human9/img", "startFrame": 1, "endFrame": 305, "nz": 4, "ext": "jpg", "anno_path": "Human9/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Ironman", "path": "Ironman/img", "startFrame": 1, "endFrame": 166, "nz": 4, "ext": "jpg", "anno_path": "Ironman/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Jogging_1", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.1.txt",
"object_class": "person"},
{"name": "Jogging_2", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.2.txt",
"object_class": "person"},
{"name": "Jump", "path": "Jump/img", "startFrame": 1, "endFrame": 122, "nz": 4, "ext": "jpg", "anno_path": "Jump/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Jumping", "path": "Jumping/img", "startFrame": 1, "endFrame": 313, "nz": 4, "ext": "jpg", "anno_path": "Jumping/groundtruth_rect.txt",
"object_class": "face"},
{"name": "KiteSurf", "path": "KiteSurf/img", "startFrame": 1, "endFrame": 84, "nz": 4, "ext": "jpg", "anno_path": "KiteSurf/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Lemming", "path": "Lemming/img", "startFrame": 1, "endFrame": 1336, "nz": 4, "ext": "jpg", "anno_path": "Lemming/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Liquor", "path": "Liquor/img", "startFrame": 1, "endFrame": 1741, "nz": 4, "ext": "jpg", "anno_path": "Liquor/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Man", "path": "Man/img", "startFrame": 1, "endFrame": 134, "nz": 4, "ext": "jpg", "anno_path": "Man/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Matrix", "path": "Matrix/img", "startFrame": 1, "endFrame": 100, "nz": 4, "ext": "jpg", "anno_path": "Matrix/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Mhyang", "path": "Mhyang/img", "startFrame": 1, "endFrame": 1490, "nz": 4, "ext": "jpg", "anno_path": "Mhyang/groundtruth_rect.txt",
"object_class": "face"},
{"name": "MotorRolling", "path": "MotorRolling/img", "startFrame": 1, "endFrame": 164, "nz": 4, "ext": "jpg", "anno_path": "MotorRolling/groundtruth_rect.txt",
"object_class": "vehicle"},
{"name": "MountainBike", "path": "MountainBike/img", "startFrame": 1, "endFrame": 228, "nz": 4, "ext": "jpg", "anno_path": "MountainBike/groundtruth_rect.txt",
"object_class": "bicycle"},
{"name": "Panda", "path": "Panda/img", "startFrame": 1, "endFrame": 1000, "nz": 4, "ext": "jpg", "anno_path": "Panda/groundtruth_rect.txt",
"object_class": "mammal"},
{"name": "RedTeam", "path": "RedTeam/img", "startFrame": 1, "endFrame": 1918, "nz": 4, "ext": "jpg", "anno_path": "RedTeam/groundtruth_rect.txt",
"object_class": "vehicle"},
{"name": "Rubik", "path": "Rubik/img", "startFrame": 1, "endFrame": 1997, "nz": 4, "ext": "jpg", "anno_path": "Rubik/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Shaking", "path": "Shaking/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Shaking/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Singer1", "path": "Singer1/img", "startFrame": 1, "endFrame": 351, "nz": 4, "ext": "jpg", "anno_path": "Singer1/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Singer2", "path": "Singer2/img", "startFrame": 1, "endFrame": 366, "nz": 4, "ext": "jpg", "anno_path": "Singer2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Skater", "path": "Skater/img", "startFrame": 1, "endFrame": 160, "nz": 4, "ext": "jpg", "anno_path": "Skater/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Skater2", "path": "Skater2/img", "startFrame": 1, "endFrame": 435, "nz": 4, "ext": "jpg", "anno_path": "Skater2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Skating1", "path": "Skating1/img", "startFrame": 1, "endFrame": 400, "nz": 4, "ext": "jpg", "anno_path": "Skating1/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Skating2_1", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.1.txt",
"object_class": "person"},
{"name": "Skating2_2", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.2.txt",
"object_class": "person"},
{"name": "Skiing", "path": "Skiing/img", "startFrame": 1, "endFrame": 81, "nz": 4, "ext": "jpg", "anno_path": "Skiing/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Soccer", "path": "Soccer/img", "startFrame": 1, "endFrame": 392, "nz": 4, "ext": "jpg", "anno_path": "Soccer/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Subway", "path": "Subway/img", "startFrame": 1, "endFrame": 175, "nz": 4, "ext": "jpg", "anno_path": "Subway/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Surfer", "path": "Surfer/img", "startFrame": 1, "endFrame": 376, "nz": 4, "ext": "jpg", "anno_path": "Surfer/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Suv", "path": "Suv/img", "startFrame": 1, "endFrame": 945, "nz": 4, "ext": "jpg", "anno_path": "Suv/groundtruth_rect.txt",
"object_class": "car"},
{"name": "Sylvester", "path": "Sylvester/img", "startFrame": 1, "endFrame": 1345, "nz": 4, "ext": "jpg", "anno_path": "Sylvester/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Tiger1", "path": "Tiger1/img", "startFrame": 1, "endFrame": 354, "nz": 4, "ext": "jpg", "anno_path": "Tiger1/groundtruth_rect.txt", "initOmit": 5,
"object_class": "other"},
{"name": "Tiger2", "path": "Tiger2/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Tiger2/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Toy", "path": "Toy/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Toy/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Trans", "path": "Trans/img", "startFrame": 1, "endFrame": 124, "nz": 4, "ext": "jpg", "anno_path": "Trans/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Trellis", "path": "Trellis/img", "startFrame": 1, "endFrame": 569, "nz": 4, "ext": "jpg", "anno_path": "Trellis/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Twinnings", "path": "Twinnings/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "Twinnings/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Vase", "path": "Vase/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Vase/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Walking", "path": "Walking/img", "startFrame": 1, "endFrame": 412, "nz": 4, "ext": "jpg", "anno_path": "Walking/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Walking2", "path": "Walking2/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Walking2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Woman", "path": "Woman/img", "startFrame": 1, "endFrame": 597, "nz": 4, "ext": "jpg", "anno_path": "Woman/groundtruth_rect.txt",
"object_class": "person"}
]
return sequence_info_list
|
import numpy as np
from ltr.data.image_loader import jpeg4py_loader
from pytracking.evaluation.data import BaseDataset, Sequence, SequenceList
from pytracking.utils.load_text import load_text
class OTBDataset(BaseDataset):
""" OTB-2015 dataset
Publication:
Object Tracking Benchmark
<NAME>, <NAME>, and <NAME>
TPAMI, 2015
http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf
Download the dataset from http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html
"""
def __init__(self):
super().__init__()
self.base_path = self.env_settings.otb_path
self.sequence_info_list = self._get_sequence_info_list()
def get_sequence_list(self):
return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])
def _construct_sequence(self, sequence_info):
sequence_path = sequence_info['path']
nz = sequence_info['nz']
ext = sequence_info['ext']
start_frame = sequence_info['startFrame']
end_frame = sequence_info['endFrame']
init_omit = 0
if 'initOmit' in sequence_info:
init_omit = sequence_info['initOmit']
frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,
sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]
anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])
# NOTE: OTB has some weird annos which panda cannot handle
ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy')
return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:,:],
object_class=sequence_info['object_class'])
def __len__(self):
return len(self.sequence_info_list)
def _get_sequence_info_list(self):
sequence_info_list = [
{"name": "Basketball", "path": "Basketball/img", "startFrame": 1, "endFrame": 725, "nz": 4, "ext": "jpg", "anno_path": "Basketball/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Biker", "path": "Biker/img", "startFrame": 1, "endFrame": 142, "nz": 4, "ext": "jpg", "anno_path": "Biker/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Bird1", "path": "Bird1/img", "startFrame": 1, "endFrame": 408, "nz": 4, "ext": "jpg", "anno_path": "Bird1/groundtruth_rect.txt",
"object_class": "bird"},
{"name": "Bird2", "path": "Bird2/img", "startFrame": 1, "endFrame": 99, "nz": 4, "ext": "jpg", "anno_path": "Bird2/groundtruth_rect.txt",
"object_class": "bird"},
{"name": "BlurBody", "path": "BlurBody/img", "startFrame": 1, "endFrame": 334, "nz": 4, "ext": "jpg", "anno_path": "BlurBody/groundtruth_rect.txt",
"object_class": "person"},
{"name": "BlurCar1", "path": "BlurCar1/img", "startFrame": 247, "endFrame": 988, "nz": 4, "ext": "jpg", "anno_path": "BlurCar1/groundtruth_rect.txt",
"object_class": "car"},
{"name": "BlurCar2", "path": "BlurCar2/img", "startFrame": 1, "endFrame": 585, "nz": 4, "ext": "jpg", "anno_path": "BlurCar2/groundtruth_rect.txt",
"object_class": "car"},
{"name": "BlurCar3", "path": "BlurCar3/img", "startFrame": 3, "endFrame": 359, "nz": 4, "ext": "jpg", "anno_path": "BlurCar3/groundtruth_rect.txt",
"object_class": "car"},
{"name": "BlurCar4", "path": "BlurCar4/img", "startFrame": 18, "endFrame": 397, "nz": 4, "ext": "jpg", "anno_path": "BlurCar4/groundtruth_rect.txt",
"object_class": "car"},
{"name": "BlurFace", "path": "BlurFace/img", "startFrame": 1, "endFrame": 493, "nz": 4, "ext": "jpg", "anno_path": "BlurFace/groundtruth_rect.txt",
"object_class": "face"},
{"name": "BlurOwl", "path": "BlurOwl/img", "startFrame": 1, "endFrame": 631, "nz": 4, "ext": "jpg", "anno_path": "BlurOwl/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Board", "path": "Board/img", "startFrame": 1, "endFrame": 698, "nz": 5, "ext": "jpg", "anno_path": "Board/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Bolt", "path": "Bolt/img", "startFrame": 1, "endFrame": 350, "nz": 4, "ext": "jpg", "anno_path": "Bolt/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Bolt2", "path": "Bolt2/img", "startFrame": 1, "endFrame": 293, "nz": 4, "ext": "jpg", "anno_path": "Bolt2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Box", "path": "Box/img", "startFrame": 1, "endFrame": 1161, "nz": 4, "ext": "jpg", "anno_path": "Box/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Boy", "path": "Boy/img", "startFrame": 1, "endFrame": 602, "nz": 4, "ext": "jpg", "anno_path": "Boy/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Car1", "path": "Car1/img", "startFrame": 1, "endFrame": 1020, "nz": 4, "ext": "jpg", "anno_path": "Car1/groundtruth_rect.txt",
"object_class": "car"},
{"name": "Car2", "path": "Car2/img", "startFrame": 1, "endFrame": 913, "nz": 4, "ext": "jpg", "anno_path": "Car2/groundtruth_rect.txt",
"object_class": "car"},
{"name": "Car24", "path": "Car24/img", "startFrame": 1, "endFrame": 3059, "nz": 4, "ext": "jpg", "anno_path": "Car24/groundtruth_rect.txt",
"object_class": "car"},
{"name": "Car4", "path": "Car4/img", "startFrame": 1, "endFrame": 659, "nz": 4, "ext": "jpg", "anno_path": "Car4/groundtruth_rect.txt",
"object_class": "car"},
{"name": "CarDark", "path": "CarDark/img", "startFrame": 1, "endFrame": 393, "nz": 4, "ext": "jpg", "anno_path": "CarDark/groundtruth_rect.txt",
"object_class": "car"},
{"name": "CarScale", "path": "CarScale/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "CarScale/groundtruth_rect.txt",
"object_class": "car"},
{"name": "ClifBar", "path": "ClifBar/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "ClifBar/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Coke", "path": "Coke/img", "startFrame": 1, "endFrame": 291, "nz": 4, "ext": "jpg", "anno_path": "Coke/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Couple", "path": "Couple/img", "startFrame": 1, "endFrame": 140, "nz": 4, "ext": "jpg", "anno_path": "Couple/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Coupon", "path": "Coupon/img", "startFrame": 1, "endFrame": 327, "nz": 4, "ext": "jpg", "anno_path": "Coupon/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Crossing", "path": "Crossing/img", "startFrame": 1, "endFrame": 120, "nz": 4, "ext": "jpg", "anno_path": "Crossing/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Crowds", "path": "Crowds/img", "startFrame": 1, "endFrame": 347, "nz": 4, "ext": "jpg", "anno_path": "Crowds/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Dancer", "path": "Dancer/img", "startFrame": 1, "endFrame": 225, "nz": 4, "ext": "jpg", "anno_path": "Dancer/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Dancer2", "path": "Dancer2/img", "startFrame": 1, "endFrame": 150, "nz": 4, "ext": "jpg", "anno_path": "Dancer2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "David", "path": "David/img", "startFrame": 300, "endFrame": 770, "nz": 4, "ext": "jpg", "anno_path": "David/groundtruth_rect.txt",
"object_class": "face"},
{"name": "David2", "path": "David2/img", "startFrame": 1, "endFrame": 537, "nz": 4, "ext": "jpg", "anno_path": "David2/groundtruth_rect.txt",
"object_class": "face"},
{"name": "David3", "path": "David3/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "David3/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Deer", "path": "Deer/img", "startFrame": 1, "endFrame": 71, "nz": 4, "ext": "jpg", "anno_path": "Deer/groundtruth_rect.txt",
"object_class": "mammal"},
{"name": "Diving", "path": "Diving/img", "startFrame": 1, "endFrame": 215, "nz": 4, "ext": "jpg", "anno_path": "Diving/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Dog", "path": "Dog/img", "startFrame": 1, "endFrame": 127, "nz": 4, "ext": "jpg", "anno_path": "Dog/groundtruth_rect.txt",
"object_class": "dog"},
{"name": "Dog1", "path": "Dog1/img", "startFrame": 1, "endFrame": 1350, "nz": 4, "ext": "jpg", "anno_path": "Dog1/groundtruth_rect.txt",
"object_class": "dog"},
{"name": "Doll", "path": "Doll/img", "startFrame": 1, "endFrame": 3872, "nz": 4, "ext": "jpg", "anno_path": "Doll/groundtruth_rect.txt",
"object_class": "other"},
{"name": "DragonBaby", "path": "DragonBaby/img", "startFrame": 1, "endFrame": 113, "nz": 4, "ext": "jpg", "anno_path": "DragonBaby/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Dudek", "path": "Dudek/img", "startFrame": 1, "endFrame": 1145, "nz": 4, "ext": "jpg", "anno_path": "Dudek/groundtruth_rect.txt",
"object_class": "face"},
{"name": "FaceOcc1", "path": "FaceOcc1/img", "startFrame": 1, "endFrame": 892, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc1/groundtruth_rect.txt",
"object_class": "face"},
{"name": "FaceOcc2", "path": "FaceOcc2/img", "startFrame": 1, "endFrame": 812, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc2/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Fish", "path": "Fish/img", "startFrame": 1, "endFrame": 476, "nz": 4, "ext": "jpg", "anno_path": "Fish/groundtruth_rect.txt",
"object_class": "other"},
{"name": "FleetFace", "path": "FleetFace/img", "startFrame": 1, "endFrame": 707, "nz": 4, "ext": "jpg", "anno_path": "FleetFace/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Football", "path": "Football/img", "startFrame": 1, "endFrame": 362, "nz": 4, "ext": "jpg", "anno_path": "Football/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Football1", "path": "Football1/img", "startFrame": 1, "endFrame": 74, "nz": 4, "ext": "jpg", "anno_path": "Football1/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Freeman1", "path": "Freeman1/img", "startFrame": 1, "endFrame": 326, "nz": 4, "ext": "jpg", "anno_path": "Freeman1/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Freeman3", "path": "Freeman3/img", "startFrame": 1, "endFrame": 460, "nz": 4, "ext": "jpg", "anno_path": "Freeman3/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Freeman4", "path": "Freeman4/img", "startFrame": 1, "endFrame": 283, "nz": 4, "ext": "jpg", "anno_path": "Freeman4/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Girl", "path": "Girl/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Girl/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Girl2", "path": "Girl2/img", "startFrame": 1, "endFrame": 1500, "nz": 4, "ext": "jpg", "anno_path": "Girl2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Gym", "path": "Gym/img", "startFrame": 1, "endFrame": 767, "nz": 4, "ext": "jpg", "anno_path": "Gym/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human2", "path": "Human2/img", "startFrame": 1, "endFrame": 1128, "nz": 4, "ext": "jpg", "anno_path": "Human2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human3", "path": "Human3/img", "startFrame": 1, "endFrame": 1698, "nz": 4, "ext": "jpg", "anno_path": "Human3/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human4_2", "path": "Human4/img", "startFrame": 1, "endFrame": 667, "nz": 4, "ext": "jpg", "anno_path": "Human4/groundtruth_rect.2.txt",
"object_class": "person"},
{"name": "Human5", "path": "Human5/img", "startFrame": 1, "endFrame": 713, "nz": 4, "ext": "jpg", "anno_path": "Human5/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human6", "path": "Human6/img", "startFrame": 1, "endFrame": 792, "nz": 4, "ext": "jpg", "anno_path": "Human6/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human7", "path": "Human7/img", "startFrame": 1, "endFrame": 250, "nz": 4, "ext": "jpg", "anno_path": "Human7/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human8", "path": "Human8/img", "startFrame": 1, "endFrame": 128, "nz": 4, "ext": "jpg", "anno_path": "Human8/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Human9", "path": "Human9/img", "startFrame": 1, "endFrame": 305, "nz": 4, "ext": "jpg", "anno_path": "Human9/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Ironman", "path": "Ironman/img", "startFrame": 1, "endFrame": 166, "nz": 4, "ext": "jpg", "anno_path": "Ironman/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Jogging_1", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.1.txt",
"object_class": "person"},
{"name": "Jogging_2", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.2.txt",
"object_class": "person"},
{"name": "Jump", "path": "Jump/img", "startFrame": 1, "endFrame": 122, "nz": 4, "ext": "jpg", "anno_path": "Jump/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Jumping", "path": "Jumping/img", "startFrame": 1, "endFrame": 313, "nz": 4, "ext": "jpg", "anno_path": "Jumping/groundtruth_rect.txt",
"object_class": "face"},
{"name": "KiteSurf", "path": "KiteSurf/img", "startFrame": 1, "endFrame": 84, "nz": 4, "ext": "jpg", "anno_path": "KiteSurf/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Lemming", "path": "Lemming/img", "startFrame": 1, "endFrame": 1336, "nz": 4, "ext": "jpg", "anno_path": "Lemming/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Liquor", "path": "Liquor/img", "startFrame": 1, "endFrame": 1741, "nz": 4, "ext": "jpg", "anno_path": "Liquor/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Man", "path": "Man/img", "startFrame": 1, "endFrame": 134, "nz": 4, "ext": "jpg", "anno_path": "Man/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Matrix", "path": "Matrix/img", "startFrame": 1, "endFrame": 100, "nz": 4, "ext": "jpg", "anno_path": "Matrix/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Mhyang", "path": "Mhyang/img", "startFrame": 1, "endFrame": 1490, "nz": 4, "ext": "jpg", "anno_path": "Mhyang/groundtruth_rect.txt",
"object_class": "face"},
{"name": "MotorRolling", "path": "MotorRolling/img", "startFrame": 1, "endFrame": 164, "nz": 4, "ext": "jpg", "anno_path": "MotorRolling/groundtruth_rect.txt",
"object_class": "vehicle"},
{"name": "MountainBike", "path": "MountainBike/img", "startFrame": 1, "endFrame": 228, "nz": 4, "ext": "jpg", "anno_path": "MountainBike/groundtruth_rect.txt",
"object_class": "bicycle"},
{"name": "Panda", "path": "Panda/img", "startFrame": 1, "endFrame": 1000, "nz": 4, "ext": "jpg", "anno_path": "Panda/groundtruth_rect.txt",
"object_class": "mammal"},
{"name": "RedTeam", "path": "RedTeam/img", "startFrame": 1, "endFrame": 1918, "nz": 4, "ext": "jpg", "anno_path": "RedTeam/groundtruth_rect.txt",
"object_class": "vehicle"},
{"name": "Rubik", "path": "Rubik/img", "startFrame": 1, "endFrame": 1997, "nz": 4, "ext": "jpg", "anno_path": "Rubik/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Shaking", "path": "Shaking/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Shaking/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Singer1", "path": "Singer1/img", "startFrame": 1, "endFrame": 351, "nz": 4, "ext": "jpg", "anno_path": "Singer1/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Singer2", "path": "Singer2/img", "startFrame": 1, "endFrame": 366, "nz": 4, "ext": "jpg", "anno_path": "Singer2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Skater", "path": "Skater/img", "startFrame": 1, "endFrame": 160, "nz": 4, "ext": "jpg", "anno_path": "Skater/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Skater2", "path": "Skater2/img", "startFrame": 1, "endFrame": 435, "nz": 4, "ext": "jpg", "anno_path": "Skater2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Skating1", "path": "Skating1/img", "startFrame": 1, "endFrame": 400, "nz": 4, "ext": "jpg", "anno_path": "Skating1/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Skating2_1", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.1.txt",
"object_class": "person"},
{"name": "Skating2_2", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.2.txt",
"object_class": "person"},
{"name": "Skiing", "path": "Skiing/img", "startFrame": 1, "endFrame": 81, "nz": 4, "ext": "jpg", "anno_path": "Skiing/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Soccer", "path": "Soccer/img", "startFrame": 1, "endFrame": 392, "nz": 4, "ext": "jpg", "anno_path": "Soccer/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Subway", "path": "Subway/img", "startFrame": 1, "endFrame": 175, "nz": 4, "ext": "jpg", "anno_path": "Subway/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Surfer", "path": "Surfer/img", "startFrame": 1, "endFrame": 376, "nz": 4, "ext": "jpg", "anno_path": "Surfer/groundtruth_rect.txt",
"object_class": "person head"},
{"name": "Suv", "path": "Suv/img", "startFrame": 1, "endFrame": 945, "nz": 4, "ext": "jpg", "anno_path": "Suv/groundtruth_rect.txt",
"object_class": "car"},
{"name": "Sylvester", "path": "Sylvester/img", "startFrame": 1, "endFrame": 1345, "nz": 4, "ext": "jpg", "anno_path": "Sylvester/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Tiger1", "path": "Tiger1/img", "startFrame": 1, "endFrame": 354, "nz": 4, "ext": "jpg", "anno_path": "Tiger1/groundtruth_rect.txt", "initOmit": 5,
"object_class": "other"},
{"name": "Tiger2", "path": "Tiger2/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Tiger2/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Toy", "path": "Toy/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Toy/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Trans", "path": "Trans/img", "startFrame": 1, "endFrame": 124, "nz": 4, "ext": "jpg", "anno_path": "Trans/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Trellis", "path": "Trellis/img", "startFrame": 1, "endFrame": 569, "nz": 4, "ext": "jpg", "anno_path": "Trellis/groundtruth_rect.txt",
"object_class": "face"},
{"name": "Twinnings", "path": "Twinnings/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "Twinnings/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Vase", "path": "Vase/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Vase/groundtruth_rect.txt",
"object_class": "other"},
{"name": "Walking", "path": "Walking/img", "startFrame": 1, "endFrame": 412, "nz": 4, "ext": "jpg", "anno_path": "Walking/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Walking2", "path": "Walking2/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Walking2/groundtruth_rect.txt",
"object_class": "person"},
{"name": "Woman", "path": "Woman/img", "startFrame": 1, "endFrame": 597, "nz": 4, "ext": "jpg", "anno_path": "Woman/groundtruth_rect.txt",
"object_class": "person"}
]
return sequence_info_list
|
en
| 0.665257
|
OTB-2015 dataset Publication: Object Tracking Benchmark <NAME>, <NAME>, and <NAME> TPAMI, 2015 http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf Download the dataset from http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html # NOTE: OTB has some weird annos which panda cannot handle
| 2.33147
| 2
|
evaluation/models/ordering.py
|
airKlizz/passage-ordering
| 1
|
6626168
|
<gh_stars>1-10
from training.scripts.models.bart_simple import BartForSequenceOrdering
from evaluation.model import Model
class OrderingModel(Model):
"""
Class for BART for the ordering model
"""
def __init__(
self,
name,
model_name,
tokenizer_name,
device,
quantization,
onnx,
onnx_convert_kwargs,
ordering_parameters={},
):
super().__init__(
name, BartForSequenceOrdering, model_name, tokenizer_name, device, quantization, onnx, onnx_convert_kwargs
)
self.ordering_parameters = ordering_parameters
def _predict(self, x):
x = x[0]
pt_batch = self.tokenizer(
[" </s> <s> ".join(sequences) + " </s> <s>" for sequences in x],
padding=True,
truncation=True,
max_length=self.tokenizer.max_len,
return_tensors="pt",
)
outputs = self.model.order(
input_ids=pt_batch["input_ids"].to(self.device),
attention_mask=pt_batch["attention_mask"].to(self.device),
**self.ordering_parameters,
)
for output, sequences in zip(outputs, x):
output.remove(max(output))
for i in range(len(sequences)):
if i not in output:
output.append(i)
while max(output) > len(sequences) - 1:
print(
f"INFO: Before second verification: sequences: {len(sequences)} - output: {len(output)} --- \n output:\n{output}"
)
output.remove(max(output))
assert len(output) == len(sequences), f"sequences: {sequences} - output: {output}"
return outputs
|
from training.scripts.models.bart_simple import BartForSequenceOrdering
from evaluation.model import Model
class OrderingModel(Model):
"""
Class for BART for the ordering model
"""
def __init__(
self,
name,
model_name,
tokenizer_name,
device,
quantization,
onnx,
onnx_convert_kwargs,
ordering_parameters={},
):
super().__init__(
name, BartForSequenceOrdering, model_name, tokenizer_name, device, quantization, onnx, onnx_convert_kwargs
)
self.ordering_parameters = ordering_parameters
def _predict(self, x):
x = x[0]
pt_batch = self.tokenizer(
[" </s> <s> ".join(sequences) + " </s> <s>" for sequences in x],
padding=True,
truncation=True,
max_length=self.tokenizer.max_len,
return_tensors="pt",
)
outputs = self.model.order(
input_ids=pt_batch["input_ids"].to(self.device),
attention_mask=pt_batch["attention_mask"].to(self.device),
**self.ordering_parameters,
)
for output, sequences in zip(outputs, x):
output.remove(max(output))
for i in range(len(sequences)):
if i not in output:
output.append(i)
while max(output) > len(sequences) - 1:
print(
f"INFO: Before second verification: sequences: {len(sequences)} - output: {len(output)} --- \n output:\n{output}"
)
output.remove(max(output))
assert len(output) == len(sequences), f"sequences: {sequences} - output: {output}"
return outputs
|
en
| 0.898041
|
Class for BART for the ordering model
| 2.627981
| 3
|
pygon/problem.py
|
TsarN/pygon
| 4
|
6626169
|
<gh_stars>1-10
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module defines class for working with problems."""
import os
import subprocess
import glob
from shutil import rmtree
import yaml
from loguru import logger
from pygon.testcase import FileName, SolutionTest, CheckerTest, Verdict
from pygon.testcase import expand_generator_command, ValidatorTest
from pygon.config import TEST_FORMAT, BUILD_DIR
from pygon.ejudge import export_problem as ejudge_export
class ProblemConfigurationError(Exception):
pass
class Problem:
"""A problem.
Attributes:
root: path to problem root
internal_name (str): problem internal name (e.g. "aplusb")
input_file (FileName): input file
output_file (FileName): output file
interactive (bool): is this problem interactive?
time_limit (float): time limit in seconds
memory_limit (float): memory limit in MiB
active_checker (Checker): active checker for the problem (or None)
active_interactor (Interactor): active interactor for the problem (or None)
active_validators (list): list of active Validators for the problem
"""
def __init__(self, root):
"""Constructs a Problem
Args:
root: path to problem root
"""
self.root = root
self.internal_name = os.path.basename(root)
self.input_file = FileName(stdio=True)
self.output_file = FileName(stdio=True)
self.interactive = False
self.time_limit = 1.0
self.memory_limit = 256.0
self.active_checker = None
self.active_interactor = None
self.active_validators = []
def load(self):
"""Load itself from descriptor."""
from pygon.checker import Checker
from pygon.validator import Validator
from pygon.interactor import Interactor
with open(self.get_descriptor_path()) as desc:
data = yaml.safe_load(desc.read())
self.internal_name = data["internal_name"]
self.input_file = FileName(data.get("input_file", "standard_io"))
self.output_file = FileName(data.get("output_file", "standard_io"))
self.interactive = data.get("interactive", False)
self.time_limit = data.get("time_limit", 1.0)
self.memory_limit = data.get("memory_limit", 256.0)
chk = data.get("active_checker")
if chk:
self.active_checker = Checker.from_identifier(chk, self)
else:
self.active_checker = None
itr = data.get("active_interactor")
if itr:
self.active_interactor = Interactor.from_identifier(itr, self)
else:
self.active_interactor = None
self.active_validators = []
for i in data.get("active_validators", []):
self.active_validators.append(Validator.from_identifier(i, self))
def get_source_filename(self, directory, name):
"""Get a source filename from identifier, or determine that
the source doesn't exist.
Args:
directory: directory where to look for the source (e.g. "checkers")
name: identifier of the source (e.g. "check")
Returns:
None if source was not found, filename of the source otherwise
(e.g. "check.cpp")
"""
lst = os.listdir(os.path.join(self.root, directory))
for i in lst:
if os.path.splitext(i)[0] == name and not i.endswith(".yaml"):
return i
return None
def get_descriptor_path(self):
"""Returns a path to problem's descriptor file"""
return os.path.join(self.root, "problem.yaml")
def get_sources(self, directory):
"""Returns a list of all sources' filenames.
Args:
directory: directory where to look for the source (e.g. "solutions")
Returns:
list: source filenames (e.g. ["solve_ok.cpp", "solve_wa.cpp"])
"""
try:
lst = set(os.listdir(os.path.join(self.root, directory)))
except FileNotFoundError:
return []
res = []
for i in lst:
if i.endswith(".yaml"):
continue
base = os.path.splitext(i)[0]
if '{}.yaml'.format(base) not in lst:
continue
res.append(i)
res.sort()
return res
def discover_sources(self, cls):
"""Discover sources that lack descriptors and create them.
Args:
cls: a Source subclass
"""
dirname = os.path.join(self.root, cls.directory_name)
try:
lst = set(os.listdir(dirname))
except FileNotFoundError:
return
for src in lst:
if src.endswith(".yaml"):
continue
base = os.path.splitext(src)[0]
if '{}.yaml'.format(base) in lst:
continue
logger.success("{} {} discovered", cls.__name__, src)
obj = cls(problem=self, name=src)
obj.save()
def get_statements(self):
"""Returns list of all Statements."""
from pygon.statement import Statement
res = []
for lang in os.listdir(os.path.join(self.root, "statements")):
if lang == "tests":
continue
with open(os.path.join(self.root, "statements", lang, "name.txt")) as f:
name = f.read().strip()
res.append(Statement(problem=self, name=name, language=lang))
return res
def get_main_solution(self):
"""Returns the problem's main Solution.
Raises:
ProblemConfigurationError - no/more than one main solution is found
"""
from pygon.solution import Solution
if hasattr(self, "_main_solution"):
return self._main_solution
res = []
for i in self.get_sources(Solution.directory_name):
sol = Solution(name=i, problem=self)
sol.load()
if sol.tag.tag == "main":
res.append(sol)
if not res:
raise ProblemConfigurationError("No main solution found")
if len(res) > 1:
raise ProblemConfigurationError("More than one main solution found")
self._main_solution = res[0]
return self._main_solution
def get_tests(self, cls):
"""Collects all of the tests of type cls:
solution/checker/validator tests.
"""
res = []
try:
lst = set(os.listdir(os.path.join(self.root, cls.directory)))
except OSError:
return res
for i in lst:
if not i.endswith(".yaml"):
base = i
else:
base = i[:-5]
try:
index = int(base)
except ValueError:
continue
if TEST_FORMAT.format(index) != base or index < 1:
continue
test = cls(index, problem=self)
if not i.endswith(".yaml"):
if "{}.yaml".format(i) in lst:
continue
# There's no descriptor, so this test is has default settings,
# so we don't run load.
else:
test.load()
res.append(test)
res.sort(key=lambda test: test.index)
return res
def get_solution_tests(self):
"""Collects all of the `SolutionTest`s from the file system and
returns it as a list, sorted by index.
"""
return self.get_tests(SolutionTest)
def get_checker_tests(self):
"""Collects all of the `CheckerTest`s from the file system and
returns it as a list, sorted by index.
"""
return self.get_tests(CheckerTest)
def get_validator_tests(self):
"""Collects all of the `ValidatorTest`s from the file system and
returns it as a list, sorted by index.
"""
return self.get_tests(ValidatorTest)
def edit_solution_tests(self):
"""Returns a editable multiline value for managing tests."""
res = """\
# Managing tests of problem {problem}
#
# Each non-empty line of this file, except comments, which begin with '#'
# signifies a test. Test may be either manually entered or generated.
#
# Manually entered tests are lines beginning with 'M', then flags,
# then a path to the input file, relative to the problem root.
# Globs are supported (you can use /something/*), tests are ordered
# lexicographically.
#
# Generated tests are lines beginning with 'G', then flags,
# then generator command. By default, ranges are expanded into
# several tests. For example, generator command "gen [1..3]" expands
# into three tests, with generator commands "gen 1", "gen 2" and "gen 3"
# respectively. You can also use several ranges in one command and
# specify step, for example "gen 10 [1,3..9] 20 [5,4..1]".
#
# List of flags:
# S - this test is a sample
# R - do not expand ranges or globs in this test
#
# For example, following line means a manually entered test that is
# included in the statements and is located at PROBLEMROOT/tests/01:
#
# MS tests/01
#
# Edit your tests, then save this file and exit the editor
""".format(problem=self.internal_name)
lines = []
for test in self.get_solution_tests():
if test.generate:
line = "G"
exp = expand_generator_command(test.generate)
if len(exp) != 1 or exp[0] != test.generate:
line += "R"
else:
line = "M"
if test.sample:
line += "S"
line += " "
if test.generate:
line += test.generate
else:
line += os.path.join("tests", TEST_FORMAT.format(test.index))
lines.append(line)
return res + "\n".join(lines)
def update_solution_tests(self, text):
"""Updates SolutionTests from editable text
(see edit_solution_tests).
"""
tests = []
dirname = os.path.join(self.root, "tests")
for line in text.split("\n"):
l = line.strip()
if l.startswith("#") or not l:
continue
if " " not in l:
raise ValueError("Malformed line: '{}'".format(l))
flags = l[:l.find(" ")]
arg = l[l.find(" ")+1:]
test = dict(sample="S" in flags)
if flags[0] == "M":
if "R" not in flags:
for i in sorted(glob.glob(os.path.join(self.root, arg))):
test = test.copy()
with open(i, 'rb') as f:
test['data'] = f.read()
tests.append(test)
else:
with open(os.path.join(self.root, arg), 'rb') as f:
test['data'] = f.read()
tests.append(test)
elif flags[0] == "G":
if "R" not in flags:
for i in expand_generator_command(arg):
test = test.copy()
test["generate"] = i
tests.append(test)
else:
test["generate"] = arg
tests.append(test)
else:
raise ValueError("Malformed line: '{}'".format(l))
to_remove = set(os.listdir(dirname))
for i, test in enumerate(tests):
index = TEST_FORMAT.format(i + 1)
if 'data' in test:
to_remove.discard(index)
to_remove.discard(index + ".yaml")
obj = SolutionTest(index=i + 1, problem=self,
sample=test['sample'],
generate=test.get('generate'))
obj.save()
if 'data' in test:
with open(os.path.join(dirname, index), 'wb') as f:
f.write(test['data'])
for i in to_remove:
os.remove(os.path.join(dirname, i))
def add_statement(self, lang, name):
"""Add new statement.
Args:
lang: statement language.
name: name of the problem in that language.
"""
root = os.path.join(self.root, "statements", lang)
if os.path.exists(root):
logger.error("{} already exists", root)
return False
os.makedirs(root, exist_ok=True)
with open(os.path.join(root, "name.txt"), "w") as f:
print(name, file=f)
with open(os.path.join(root, "problem.tex"), "w") as f:
print("""\
% Write problem legend here
\\InputFile
% Write input format here
\\OutputFile
% Write output format here
% Sample tests replace the following line, if you remove it,
% they will not be displayed.
\\SAMPLES
\\Explanations
% Write your explanations here. You can also remove this (or any other)
% section entirely. It's up to you.
""", file=f)
return True
def build(self, statements=True):
"""Build the problem verifying that:
- There is an active checker and it compiles
- (If interactive) There is an active interactor and it compiles
- There is a main solution and it compiles
- All active validators compile
- All tests are generated and valid
- Main solution gets OK
Should be ran prior to verification.
"""
if not self.active_checker:
raise ProblemConfigurationError("Active checker is not set")
try:
self.active_checker.ensure_compile()
except subprocess.CalledProcessError:
raise ProblemConfigurationError("Active checker compilation failed")
if self.interactive:
if not self.input_file.stdio or not self.output_file.stdio:
raise ProblemConfigurationError("Interactive problems must use stdio")
if not self.active_interactor:
raise ProblemConfigurationError("Active interactor is not set")
try:
self.active_interactor.ensure_compile()
except subprocess.CalledProcessError:
raise ProblemConfigurationError("Active interactor compilation failed")
main_solution = self.get_main_solution()
try:
main_solution.ensure_compile()
except subprocess.CalledProcessError:
raise ProblemConfigurationError("Main solution compilation failed")
for validator in self.active_validators:
try:
validator.ensure_compile()
except subprocess.CalledProcessError:
raise ProblemConfigurationError(
"Validator {} compilation failed".format(validator)
)
tests = self.get_solution_tests()
for test in tests:
try:
test.build()
except subprocess.CalledProcessError:
raise ProblemConfigurationError("Generator compilation failed")
for validator in self.active_validators:
verdict = validator.validate(test.get_input_path())
if verdict.verdict != Verdict.OK:
raise ProblemConfigurationError(
"Validator {} rejects test {}: {}".format(
validator.identifier,
test.index,
verdict.comment
))
for test in tests:
verdict = main_solution.judge(test)
if not main_solution.tag.check_one(verdict.verdict):
raise ProblemConfigurationError(
"Main solution {} gets {} on test {}: {}".format(
main_solution.identifier,
verdict.verdict,
test.index,
verdict.comment
))
if statements:
for stmt in self.get_statements():
try:
stmt.build()
except subprocess.CalledProcessError:
raise ProblemConfigurationError(
"Failed to build {} statement. See '{}' for details".format(
stmt.language, stmt.get_log_path()
))
logger.success("Problem built successfully")
def verify(self):
"""Build and lint problem for configuration errors.
Raises errors when:
- Problem fails to build correctly (see `Problem.build`).
- Solutions have incorrect tags.
- Active checker doesn't pass all checker tests.
- Active validators don't pass all validator tests.
Reports warnings when:
- Checker tests are missing and custom checker is used.
- Validator tests are missing and custom validator is used.
- Custom validator is not used.
- No tests.
- Sample tests are not first.
"""
from pygon.solution import Solution
self.build(statements=False)
solutions = Solution.all(self)
tests = self.get_solution_tests()
for solution in solutions:
verdicts = []
for test in tests:
verdicts.append(solution.judge(test).verdict)
if solution.tag.check_all(verdicts):
logger.success("Solution {} has correct tag"
.format(solution.identifier))
else:
raise ProblemConfigurationError("Solution {} has incorrect tag"
.format(solution.identifier))
checker_tests = self.get_checker_tests()
for test in checker_tests:
test.validate(self.active_checker)
if checker_tests:
logger.success("Checker passed all tests")
elif not self.active_checker.standard:
logger.warning("No checker tests found, please consider adding them")
validator_tests = self.get_validator_tests()
for test in self.get_validator_tests():
test.validate(self.active_validators)
if any(not i.standard for i in self.active_validators):
if validator_tests:
logger.success("Validators passed all tests")
else:
logger.warning("No validator tests found, please consider adding them")
else:
logger.warning("No custom validators found, please consider adding them")
if tests:
prefix = True
for test in tests:
if test.sample:
if not prefix:
logger.warning(
"Test case {} is a sample, but is not among "
"the first tests for the problem".format(test.index))
else:
prefix = False
else:
logger.warning("No test cases found")
def ejudge_export(self, language=None):
"""Export problem in ejudge format to BUILD_ROOT/ejudge"""
path = os.path.join(self.root, BUILD_DIR, "ejudge")
rmtree(path, ignore_errors=True)
ejudge_export(self, path, language=language)
|
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module defines class for working with problems."""
import os
import subprocess
import glob
from shutil import rmtree
import yaml
from loguru import logger
from pygon.testcase import FileName, SolutionTest, CheckerTest, Verdict
from pygon.testcase import expand_generator_command, ValidatorTest
from pygon.config import TEST_FORMAT, BUILD_DIR
from pygon.ejudge import export_problem as ejudge_export
class ProblemConfigurationError(Exception):
pass
class Problem:
"""A problem.
Attributes:
root: path to problem root
internal_name (str): problem internal name (e.g. "aplusb")
input_file (FileName): input file
output_file (FileName): output file
interactive (bool): is this problem interactive?
time_limit (float): time limit in seconds
memory_limit (float): memory limit in MiB
active_checker (Checker): active checker for the problem (or None)
active_interactor (Interactor): active interactor for the problem (or None)
active_validators (list): list of active Validators for the problem
"""
def __init__(self, root):
"""Constructs a Problem
Args:
root: path to problem root
"""
self.root = root
self.internal_name = os.path.basename(root)
self.input_file = FileName(stdio=True)
self.output_file = FileName(stdio=True)
self.interactive = False
self.time_limit = 1.0
self.memory_limit = 256.0
self.active_checker = None
self.active_interactor = None
self.active_validators = []
def load(self):
"""Load itself from descriptor."""
from pygon.checker import Checker
from pygon.validator import Validator
from pygon.interactor import Interactor
with open(self.get_descriptor_path()) as desc:
data = yaml.safe_load(desc.read())
self.internal_name = data["internal_name"]
self.input_file = FileName(data.get("input_file", "standard_io"))
self.output_file = FileName(data.get("output_file", "standard_io"))
self.interactive = data.get("interactive", False)
self.time_limit = data.get("time_limit", 1.0)
self.memory_limit = data.get("memory_limit", 256.0)
chk = data.get("active_checker")
if chk:
self.active_checker = Checker.from_identifier(chk, self)
else:
self.active_checker = None
itr = data.get("active_interactor")
if itr:
self.active_interactor = Interactor.from_identifier(itr, self)
else:
self.active_interactor = None
self.active_validators = []
for i in data.get("active_validators", []):
self.active_validators.append(Validator.from_identifier(i, self))
def get_source_filename(self, directory, name):
"""Get a source filename from identifier, or determine that
the source doesn't exist.
Args:
directory: directory where to look for the source (e.g. "checkers")
name: identifier of the source (e.g. "check")
Returns:
None if source was not found, filename of the source otherwise
(e.g. "check.cpp")
"""
lst = os.listdir(os.path.join(self.root, directory))
for i in lst:
if os.path.splitext(i)[0] == name and not i.endswith(".yaml"):
return i
return None
def get_descriptor_path(self):
"""Returns a path to problem's descriptor file"""
return os.path.join(self.root, "problem.yaml")
def get_sources(self, directory):
"""Returns a list of all sources' filenames.
Args:
directory: directory where to look for the source (e.g. "solutions")
Returns:
list: source filenames (e.g. ["solve_ok.cpp", "solve_wa.cpp"])
"""
try:
lst = set(os.listdir(os.path.join(self.root, directory)))
except FileNotFoundError:
return []
res = []
for i in lst:
if i.endswith(".yaml"):
continue
base = os.path.splitext(i)[0]
if '{}.yaml'.format(base) not in lst:
continue
res.append(i)
res.sort()
return res
def discover_sources(self, cls):
"""Discover sources that lack descriptors and create them.
Args:
cls: a Source subclass
"""
dirname = os.path.join(self.root, cls.directory_name)
try:
lst = set(os.listdir(dirname))
except FileNotFoundError:
return
for src in lst:
if src.endswith(".yaml"):
continue
base = os.path.splitext(src)[0]
if '{}.yaml'.format(base) in lst:
continue
logger.success("{} {} discovered", cls.__name__, src)
obj = cls(problem=self, name=src)
obj.save()
def get_statements(self):
"""Returns list of all Statements."""
from pygon.statement import Statement
res = []
for lang in os.listdir(os.path.join(self.root, "statements")):
if lang == "tests":
continue
with open(os.path.join(self.root, "statements", lang, "name.txt")) as f:
name = f.read().strip()
res.append(Statement(problem=self, name=name, language=lang))
return res
def get_main_solution(self):
"""Returns the problem's main Solution.
Raises:
ProblemConfigurationError - no/more than one main solution is found
"""
from pygon.solution import Solution
if hasattr(self, "_main_solution"):
return self._main_solution
res = []
for i in self.get_sources(Solution.directory_name):
sol = Solution(name=i, problem=self)
sol.load()
if sol.tag.tag == "main":
res.append(sol)
if not res:
raise ProblemConfigurationError("No main solution found")
if len(res) > 1:
raise ProblemConfigurationError("More than one main solution found")
self._main_solution = res[0]
return self._main_solution
def get_tests(self, cls):
"""Collects all of the tests of type cls:
solution/checker/validator tests.
"""
res = []
try:
lst = set(os.listdir(os.path.join(self.root, cls.directory)))
except OSError:
return res
for i in lst:
if not i.endswith(".yaml"):
base = i
else:
base = i[:-5]
try:
index = int(base)
except ValueError:
continue
if TEST_FORMAT.format(index) != base or index < 1:
continue
test = cls(index, problem=self)
if not i.endswith(".yaml"):
if "{}.yaml".format(i) in lst:
continue
# There's no descriptor, so this test is has default settings,
# so we don't run load.
else:
test.load()
res.append(test)
res.sort(key=lambda test: test.index)
return res
def get_solution_tests(self):
"""Collects all of the `SolutionTest`s from the file system and
returns it as a list, sorted by index.
"""
return self.get_tests(SolutionTest)
def get_checker_tests(self):
"""Collects all of the `CheckerTest`s from the file system and
returns it as a list, sorted by index.
"""
return self.get_tests(CheckerTest)
def get_validator_tests(self):
"""Collects all of the `ValidatorTest`s from the file system and
returns it as a list, sorted by index.
"""
return self.get_tests(ValidatorTest)
def edit_solution_tests(self):
"""Returns a editable multiline value for managing tests."""
res = """\
# Managing tests of problem {problem}
#
# Each non-empty line of this file, except comments, which begin with '#'
# signifies a test. Test may be either manually entered or generated.
#
# Manually entered tests are lines beginning with 'M', then flags,
# then a path to the input file, relative to the problem root.
# Globs are supported (you can use /something/*), tests are ordered
# lexicographically.
#
# Generated tests are lines beginning with 'G', then flags,
# then generator command. By default, ranges are expanded into
# several tests. For example, generator command "gen [1..3]" expands
# into three tests, with generator commands "gen 1", "gen 2" and "gen 3"
# respectively. You can also use several ranges in one command and
# specify step, for example "gen 10 [1,3..9] 20 [5,4..1]".
#
# List of flags:
# S - this test is a sample
# R - do not expand ranges or globs in this test
#
# For example, following line means a manually entered test that is
# included in the statements and is located at PROBLEMROOT/tests/01:
#
# MS tests/01
#
# Edit your tests, then save this file and exit the editor
""".format(problem=self.internal_name)
lines = []
for test in self.get_solution_tests():
if test.generate:
line = "G"
exp = expand_generator_command(test.generate)
if len(exp) != 1 or exp[0] != test.generate:
line += "R"
else:
line = "M"
if test.sample:
line += "S"
line += " "
if test.generate:
line += test.generate
else:
line += os.path.join("tests", TEST_FORMAT.format(test.index))
lines.append(line)
return res + "\n".join(lines)
def update_solution_tests(self, text):
"""Updates SolutionTests from editable text
(see edit_solution_tests).
"""
tests = []
dirname = os.path.join(self.root, "tests")
for line in text.split("\n"):
l = line.strip()
if l.startswith("#") or not l:
continue
if " " not in l:
raise ValueError("Malformed line: '{}'".format(l))
flags = l[:l.find(" ")]
arg = l[l.find(" ")+1:]
test = dict(sample="S" in flags)
if flags[0] == "M":
if "R" not in flags:
for i in sorted(glob.glob(os.path.join(self.root, arg))):
test = test.copy()
with open(i, 'rb') as f:
test['data'] = f.read()
tests.append(test)
else:
with open(os.path.join(self.root, arg), 'rb') as f:
test['data'] = f.read()
tests.append(test)
elif flags[0] == "G":
if "R" not in flags:
for i in expand_generator_command(arg):
test = test.copy()
test["generate"] = i
tests.append(test)
else:
test["generate"] = arg
tests.append(test)
else:
raise ValueError("Malformed line: '{}'".format(l))
to_remove = set(os.listdir(dirname))
for i, test in enumerate(tests):
index = TEST_FORMAT.format(i + 1)
if 'data' in test:
to_remove.discard(index)
to_remove.discard(index + ".yaml")
obj = SolutionTest(index=i + 1, problem=self,
sample=test['sample'],
generate=test.get('generate'))
obj.save()
if 'data' in test:
with open(os.path.join(dirname, index), 'wb') as f:
f.write(test['data'])
for i in to_remove:
os.remove(os.path.join(dirname, i))
def add_statement(self, lang, name):
"""Add new statement.
Args:
lang: statement language.
name: name of the problem in that language.
"""
root = os.path.join(self.root, "statements", lang)
if os.path.exists(root):
logger.error("{} already exists", root)
return False
os.makedirs(root, exist_ok=True)
with open(os.path.join(root, "name.txt"), "w") as f:
print(name, file=f)
with open(os.path.join(root, "problem.tex"), "w") as f:
print("""\
% Write problem legend here
\\InputFile
% Write input format here
\\OutputFile
% Write output format here
% Sample tests replace the following line, if you remove it,
% they will not be displayed.
\\SAMPLES
\\Explanations
% Write your explanations here. You can also remove this (or any other)
% section entirely. It's up to you.
""", file=f)
return True
def build(self, statements=True):
"""Build the problem verifying that:
- There is an active checker and it compiles
- (If interactive) There is an active interactor and it compiles
- There is a main solution and it compiles
- All active validators compile
- All tests are generated and valid
- Main solution gets OK
Should be ran prior to verification.
"""
if not self.active_checker:
raise ProblemConfigurationError("Active checker is not set")
try:
self.active_checker.ensure_compile()
except subprocess.CalledProcessError:
raise ProblemConfigurationError("Active checker compilation failed")
if self.interactive:
if not self.input_file.stdio or not self.output_file.stdio:
raise ProblemConfigurationError("Interactive problems must use stdio")
if not self.active_interactor:
raise ProblemConfigurationError("Active interactor is not set")
try:
self.active_interactor.ensure_compile()
except subprocess.CalledProcessError:
raise ProblemConfigurationError("Active interactor compilation failed")
main_solution = self.get_main_solution()
try:
main_solution.ensure_compile()
except subprocess.CalledProcessError:
raise ProblemConfigurationError("Main solution compilation failed")
for validator in self.active_validators:
try:
validator.ensure_compile()
except subprocess.CalledProcessError:
raise ProblemConfigurationError(
"Validator {} compilation failed".format(validator)
)
tests = self.get_solution_tests()
for test in tests:
try:
test.build()
except subprocess.CalledProcessError:
raise ProblemConfigurationError("Generator compilation failed")
for validator in self.active_validators:
verdict = validator.validate(test.get_input_path())
if verdict.verdict != Verdict.OK:
raise ProblemConfigurationError(
"Validator {} rejects test {}: {}".format(
validator.identifier,
test.index,
verdict.comment
))
for test in tests:
verdict = main_solution.judge(test)
if not main_solution.tag.check_one(verdict.verdict):
raise ProblemConfigurationError(
"Main solution {} gets {} on test {}: {}".format(
main_solution.identifier,
verdict.verdict,
test.index,
verdict.comment
))
if statements:
for stmt in self.get_statements():
try:
stmt.build()
except subprocess.CalledProcessError:
raise ProblemConfigurationError(
"Failed to build {} statement. See '{}' for details".format(
stmt.language, stmt.get_log_path()
))
logger.success("Problem built successfully")
def verify(self):
"""Build and lint problem for configuration errors.
Raises errors when:
- Problem fails to build correctly (see `Problem.build`).
- Solutions have incorrect tags.
- Active checker doesn't pass all checker tests.
- Active validators don't pass all validator tests.
Reports warnings when:
- Checker tests are missing and custom checker is used.
- Validator tests are missing and custom validator is used.
- Custom validator is not used.
- No tests.
- Sample tests are not first.
"""
from pygon.solution import Solution
self.build(statements=False)
solutions = Solution.all(self)
tests = self.get_solution_tests()
for solution in solutions:
verdicts = []
for test in tests:
verdicts.append(solution.judge(test).verdict)
if solution.tag.check_all(verdicts):
logger.success("Solution {} has correct tag"
.format(solution.identifier))
else:
raise ProblemConfigurationError("Solution {} has incorrect tag"
.format(solution.identifier))
checker_tests = self.get_checker_tests()
for test in checker_tests:
test.validate(self.active_checker)
if checker_tests:
logger.success("Checker passed all tests")
elif not self.active_checker.standard:
logger.warning("No checker tests found, please consider adding them")
validator_tests = self.get_validator_tests()
for test in self.get_validator_tests():
test.validate(self.active_validators)
if any(not i.standard for i in self.active_validators):
if validator_tests:
logger.success("Validators passed all tests")
else:
logger.warning("No validator tests found, please consider adding them")
else:
logger.warning("No custom validators found, please consider adding them")
if tests:
prefix = True
for test in tests:
if test.sample:
if not prefix:
logger.warning(
"Test case {} is a sample, but is not among "
"the first tests for the problem".format(test.index))
else:
prefix = False
else:
logger.warning("No test cases found")
def ejudge_export(self, language=None):
"""Export problem in ejudge format to BUILD_ROOT/ejudge"""
path = os.path.join(self.root, BUILD_DIR, "ejudge")
rmtree(path, ignore_errors=True)
ejudge_export(self, path, language=language)
|
en
| 0.832702
|
# Copyright (c) 2019 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This module defines class for working with problems. A problem. Attributes: root: path to problem root internal_name (str): problem internal name (e.g. "aplusb") input_file (FileName): input file output_file (FileName): output file interactive (bool): is this problem interactive? time_limit (float): time limit in seconds memory_limit (float): memory limit in MiB active_checker (Checker): active checker for the problem (or None) active_interactor (Interactor): active interactor for the problem (or None) active_validators (list): list of active Validators for the problem Constructs a Problem Args: root: path to problem root Load itself from descriptor. Get a source filename from identifier, or determine that the source doesn't exist. Args: directory: directory where to look for the source (e.g. "checkers") name: identifier of the source (e.g. "check") Returns: None if source was not found, filename of the source otherwise (e.g. "check.cpp") Returns a path to problem's descriptor file Returns a list of all sources' filenames. Args: directory: directory where to look for the source (e.g. "solutions") Returns: list: source filenames (e.g. ["solve_ok.cpp", "solve_wa.cpp"]) Discover sources that lack descriptors and create them. Args: cls: a Source subclass Returns list of all Statements. Returns the problem's main Solution. Raises: ProblemConfigurationError - no/more than one main solution is found Collects all of the tests of type cls: solution/checker/validator tests. # There's no descriptor, so this test is has default settings, # so we don't run load. Collects all of the `SolutionTest`s from the file system and returns it as a list, sorted by index. Collects all of the `CheckerTest`s from the file system and returns it as a list, sorted by index. Collects all of the `ValidatorTest`s from the file system and returns it as a list, sorted by index. Returns a editable multiline value for managing tests. \ # Managing tests of problem {problem} # # Each non-empty line of this file, except comments, which begin with '#' # signifies a test. Test may be either manually entered or generated. # # Manually entered tests are lines beginning with 'M', then flags, # then a path to the input file, relative to the problem root. # Globs are supported (you can use /something/*), tests are ordered # lexicographically. # # Generated tests are lines beginning with 'G', then flags, # then generator command. By default, ranges are expanded into # several tests. For example, generator command "gen [1..3]" expands # into three tests, with generator commands "gen 1", "gen 2" and "gen 3" # respectively. You can also use several ranges in one command and # specify step, for example "gen 10 [1,3..9] 20 [5,4..1]". # # List of flags: # S - this test is a sample # R - do not expand ranges or globs in this test # # For example, following line means a manually entered test that is # included in the statements and is located at PROBLEMROOT/tests/01: # # MS tests/01 # # Edit your tests, then save this file and exit the editor Updates SolutionTests from editable text (see edit_solution_tests). Add new statement. Args: lang: statement language. name: name of the problem in that language. \ % Write problem legend here \\InputFile % Write input format here \\OutputFile % Write output format here % Sample tests replace the following line, if you remove it, % they will not be displayed. \\SAMPLES \\Explanations % Write your explanations here. You can also remove this (or any other) % section entirely. It's up to you. Build the problem verifying that: - There is an active checker and it compiles - (If interactive) There is an active interactor and it compiles - There is a main solution and it compiles - All active validators compile - All tests are generated and valid - Main solution gets OK Should be ran prior to verification. Build and lint problem for configuration errors. Raises errors when: - Problem fails to build correctly (see `Problem.build`). - Solutions have incorrect tags. - Active checker doesn't pass all checker tests. - Active validators don't pass all validator tests. Reports warnings when: - Checker tests are missing and custom checker is used. - Validator tests are missing and custom validator is used. - Custom validator is not used. - No tests. - Sample tests are not first. Export problem in ejudge format to BUILD_ROOT/ejudge
| 2.047965
| 2
|
1_basics/3_luftballon.py
|
Coding-for-the-Arts/drawbot-samples
| 0
|
6626170
|
<filename>1_basics/3_luftballon.py
"""
Heissluftballon
"""
newPage(1000, 1000)
oval(300, 400, 500, 500)
rect(100, 100, 200, 200)
"""
- Klicke auf «run» (cmd r)
- Ändere die Werte in den Klammern und klicke auf «run» (cmd r)
- Wähle einen Wert aus, drücke die cmd-Taste und drücke/ziehe am Trackpad nach links/rechts.
- Bewege den Korb unter den Ballon.
"""
|
<filename>1_basics/3_luftballon.py
"""
Heissluftballon
"""
newPage(1000, 1000)
oval(300, 400, 500, 500)
rect(100, 100, 200, 200)
"""
- Klicke auf «run» (cmd r)
- Ändere die Werte in den Klammern und klicke auf «run» (cmd r)
- Wähle einen Wert aus, drücke die cmd-Taste und drücke/ziehe am Trackpad nach links/rechts.
- Bewege den Korb unter den Ballon.
"""
|
de
| 0.985989
|
Heissluftballon - Klicke auf «run» (cmd r) - Ändere die Werte in den Klammern und klicke auf «run» (cmd r) - Wähle einen Wert aus, drücke die cmd-Taste und drücke/ziehe am Trackpad nach links/rechts. - Bewege den Korb unter den Ballon.
| 1.879408
| 2
|
test/benchmark/throughput.py
|
lzjzx1122/FaaSFlow
| 24
|
6626171
|
<gh_stars>10-100
from typing_extensions import final
from gevent import monkey; monkey.patch_all(thread=False)
import requests
from tqdm import tqdm
import repository
import gevent
import uuid
import time
# import prepare_basic_input
import sys
repo = repository.Repository()
speed = 400 # request / minute
latency_results = []
workflow_name = sys.argv[1]
def trigger_function(request_id, function_name):
info = repo.get_function_info(function_name, workflow_name + '_function_info')
url = 'http://{}/request'.format(info['ip'])
data = {
'request_id': request_id,
'workflow_name': workflow_name,
'function_name': function_name,
'no_parent_execution': True
}
try:
requests.post(url, json=data)
except Exception:
print('Exception Happend, Clearing...')
master_addr = repo.get_all_addrs(workflow_name + '_workflow_metadata')[0]
clear_url = 'http://{}/clear'.format(master_addr)
requests.post(clear_url, json={'request_id': request_id, 'master': True, 'workflow_name': workflow_name})
def run_workflow():
global speed
gevent.spawn_later(60 / speed, run_workflow)
request_id = str(uuid.uuid4())
repo.allocate_db(request_id)
# print('----preparing input ', request_id, '----')
# prepare_basic_input.prepare_basic_input(request_id)
print('----dispatching request ', request_id, '----')
start = time.time()
start_functions = repo.get_start_functions(workflow_name + '_workflow_metadata')
jobs = []
for n in start_functions:
jobs.append(gevent.spawn(trigger_function, request_id, n))
gevent.joinall(jobs)
master_addr = repo.get_all_addrs(workflow_name + '_workflow_metadata')[0]
clear_url = 'http://{}/clear'.format(master_addr)
requests.post(clear_url, json={'request_id': request_id, 'master': True, 'workflow_name': workflow_name})
end = time.time()
print('----ending ', request_id, '----')
latency_results.append(end - start)
repo.save_latency(end - start)
def analyze():
gevent.spawn_later(5, analyze)
global latency_results
print('!!!! we have ', len(latency_results), ' results by now !!!!')
if len(latency_results) >= 20:
print(latency_results[-10:])
if len(latency_results) >= 110:
results = latency_results[-105:]
results.sort()
print('max: ', results[-15:])
# print('!!!! 95%: ', results[-5], ' 99%: ', results[-1], ' !!!!')
def run():
global speed
print('----running workflow ', speed, ' request / s----')
repo.mem_clearall()
repo.reset_all_mem(clear_function_data=True)
repo.clear_latency_db()
gevent.spawn_later(1, run_workflow)
gevent.spawn_later(5, analyze)
gevent.wait()
if __name__ == '__main__':
# prepare()
run()
|
from typing_extensions import final
from gevent import monkey; monkey.patch_all(thread=False)
import requests
from tqdm import tqdm
import repository
import gevent
import uuid
import time
# import prepare_basic_input
import sys
repo = repository.Repository()
speed = 400 # request / minute
latency_results = []
workflow_name = sys.argv[1]
def trigger_function(request_id, function_name):
info = repo.get_function_info(function_name, workflow_name + '_function_info')
url = 'http://{}/request'.format(info['ip'])
data = {
'request_id': request_id,
'workflow_name': workflow_name,
'function_name': function_name,
'no_parent_execution': True
}
try:
requests.post(url, json=data)
except Exception:
print('Exception Happend, Clearing...')
master_addr = repo.get_all_addrs(workflow_name + '_workflow_metadata')[0]
clear_url = 'http://{}/clear'.format(master_addr)
requests.post(clear_url, json={'request_id': request_id, 'master': True, 'workflow_name': workflow_name})
def run_workflow():
global speed
gevent.spawn_later(60 / speed, run_workflow)
request_id = str(uuid.uuid4())
repo.allocate_db(request_id)
# print('----preparing input ', request_id, '----')
# prepare_basic_input.prepare_basic_input(request_id)
print('----dispatching request ', request_id, '----')
start = time.time()
start_functions = repo.get_start_functions(workflow_name + '_workflow_metadata')
jobs = []
for n in start_functions:
jobs.append(gevent.spawn(trigger_function, request_id, n))
gevent.joinall(jobs)
master_addr = repo.get_all_addrs(workflow_name + '_workflow_metadata')[0]
clear_url = 'http://{}/clear'.format(master_addr)
requests.post(clear_url, json={'request_id': request_id, 'master': True, 'workflow_name': workflow_name})
end = time.time()
print('----ending ', request_id, '----')
latency_results.append(end - start)
repo.save_latency(end - start)
def analyze():
gevent.spawn_later(5, analyze)
global latency_results
print('!!!! we have ', len(latency_results), ' results by now !!!!')
if len(latency_results) >= 20:
print(latency_results[-10:])
if len(latency_results) >= 110:
results = latency_results[-105:]
results.sort()
print('max: ', results[-15:])
# print('!!!! 95%: ', results[-5], ' 99%: ', results[-1], ' !!!!')
def run():
global speed
print('----running workflow ', speed, ' request / s----')
repo.mem_clearall()
repo.reset_all_mem(clear_function_data=True)
repo.clear_latency_db()
gevent.spawn_later(1, run_workflow)
gevent.spawn_later(5, analyze)
gevent.wait()
if __name__ == '__main__':
# prepare()
run()
|
en
| 0.19968
|
# import prepare_basic_input # request / minute # print('----preparing input ', request_id, '----') # prepare_basic_input.prepare_basic_input(request_id) # print('!!!! 95%: ', results[-5], ' 99%: ', results[-1], ' !!!!') # prepare()
| 2.137427
| 2
|
python/torch/exp11_blocked_matrices.py
|
Navid-github/LAMP_benchmark
| 4
|
6626172
|
import logging
from benchmarker import benchmark
import torch as torch
logger = logging.getLogger('exp11_blocked_matrices')
@benchmark
def blocked_solve_naive(A1, A2, B, C):
C = torch.linalg.solve(torch.cat(
(torch.cat((A1, torch.zeros((A1.shape[0], A1.shape[1]), dtype=torch.float64)), dim=1),
torch.cat((torch.zeros((A2.shape[0], A2.shape[1]), dtype=torch.float64), A2), dim=1)), dim=0), B)
return C
@benchmark
def blocked_solve_recommended(A1, A2, B, C):
b1 = B[0:A1.shape[0], 0:B.shape[1]]
b2 = B[A1.shape[0]:, 0:B.shape[1]]
C = torch.cat((torch.linalg.solve(A1, b1), torch.linalg.solve(A2, b2)), dim=0)
return C
def exp11_blocked_matrices(b, n):
bm_n = int(n / 2)
A1 = torch.randn((bm_n, bm_n), dtype=torch.float64)
A2 = torch.randn((bm_n, bm_n), dtype=torch.float64)
B = torch.randn((2 * bm_n, 2 * bm_n), dtype=torch.float64)
C = torch.zeros((2 * bm_n, 2 * bm_n), dtype=torch.float64)
res1 = b.benchmark("compact", blocked_solve_naive, A1, A2, B, C)
res2 = b.benchmark("blocked", blocked_solve_recommended, A1, A2, B, C)
logger.info('PartitionedMatrices correctness: {}'.format(torch.allclose(res1, res2)))
|
import logging
from benchmarker import benchmark
import torch as torch
logger = logging.getLogger('exp11_blocked_matrices')
@benchmark
def blocked_solve_naive(A1, A2, B, C):
C = torch.linalg.solve(torch.cat(
(torch.cat((A1, torch.zeros((A1.shape[0], A1.shape[1]), dtype=torch.float64)), dim=1),
torch.cat((torch.zeros((A2.shape[0], A2.shape[1]), dtype=torch.float64), A2), dim=1)), dim=0), B)
return C
@benchmark
def blocked_solve_recommended(A1, A2, B, C):
b1 = B[0:A1.shape[0], 0:B.shape[1]]
b2 = B[A1.shape[0]:, 0:B.shape[1]]
C = torch.cat((torch.linalg.solve(A1, b1), torch.linalg.solve(A2, b2)), dim=0)
return C
def exp11_blocked_matrices(b, n):
bm_n = int(n / 2)
A1 = torch.randn((bm_n, bm_n), dtype=torch.float64)
A2 = torch.randn((bm_n, bm_n), dtype=torch.float64)
B = torch.randn((2 * bm_n, 2 * bm_n), dtype=torch.float64)
C = torch.zeros((2 * bm_n, 2 * bm_n), dtype=torch.float64)
res1 = b.benchmark("compact", blocked_solve_naive, A1, A2, B, C)
res2 = b.benchmark("blocked", blocked_solve_recommended, A1, A2, B, C)
logger.info('PartitionedMatrices correctness: {}'.format(torch.allclose(res1, res2)))
|
none
| 1
| 2.313904
| 2
|
|
src/dynamic_graph/sot_talos_balance/test/test_dcmZmpCopControl.py
|
nim65s/sot-talos-balance
| 0
|
6626173
|
<filename>src/dynamic_graph/sot_talos_balance/test/test_dcmZmpCopControl.py
'''Test CoM admittance control as described in paper.'''
from time import sleep
from dynamic_graph.sot_talos_balance.utils.run_test_utils import (ask_for_confirmation, run_ft_calibration, run_test,
runCommandClient)
try:
# Python 2
input = raw_input # noqa
except NameError:
pass
run_test('appli_dcmZmpCopControl.py')
run_ft_calibration('robot.ftc')
input("Wait before running the test")
# Connect ZMP reference and reset controllers
print('Set controller')
runCommandClient('plug(robot.zmp_estimator.emergencyStop,robot.cm.emergencyStop_zmp)')
runCommandClient('plug(robot.distribute.emergencyStop,robot.cm.emergencyStop_distribute)')
runCommandClient('plug(robot.distribute.zmpRef,robot.com_admittance_control.zmpDes)')
runCommandClient('robot.com_admittance_control.setState(robot.wp.comDes.value,[0.0,0.0,0.0])')
runCommandClient('robot.com_admittance_control.Kp.value = Kp_adm')
runCommandClient('robot.rightAnkleController.gainsXY.value = Kp_ankles')
runCommandClient('robot.leftAnkleController.gainsXY.value = Kp_ankles')
runCommandClient('robot.dcm_control.resetDcmIntegralError()')
runCommandClient('robot.dcm_control.Ki.value = Ki_dcm')
c = ask_for_confirmation("Execute a sinusoid?")
if c:
print("Putting the robot in position...")
runCommandClient('robot.comTrajGen.move(1,-0.025,1.0)')
sleep(1.0)
print("Robot is in position!")
c2 = ask_for_confirmation("Confirm executing the sinusoid?")
if c2:
print("Executing the sinusoid...")
runCommandClient('robot.comTrajGen.startSinusoid(1,0.025,2.0)')
print("Sinusoid started!")
else:
print("Not executing the sinusoid")
c3 = ask_for_confirmation("Put the robot back?")
if c3:
print("Stopping the robot...")
runCommandClient('robot.comTrajGen.stop(1)')
sleep(5.0)
print("Putting the robot back...")
runCommandClient('robot.comTrajGen.move(1,0.0,1.0)')
sleep(1.0)
print("The robot is back in position!")
else:
print("Not putting the robot back")
else:
print("Not executing the sinusoid")
c = ask_for_confirmation("Raise the foot?")
if c:
print("Putting the robot in position...")
runCommandClient('robot.comTrajGen.move(1,-0.08,10.0)')
runCommandClient('robot.rhoTrajGen.move(0,0.4,10.0)')
sleep(10.0)
print("Robot is in position!")
c2 = ask_for_confirmation("Confirm raising the foot?")
if c2:
print("Raising the foot...")
runCommandClient('robot.distribute.phase.value = -1')
runCommandClient('h = robot.dynamic.LF.value[2][3]')
runCommandClient('robot.lfTrajGen.move(2,h+0.05,10.0)')
sleep(10.0)
print("Foot has been raised!")
c3 = ask_for_confirmation("Put the foot back?")
else:
print("Not raising the foot")
c3 = False
if c3:
print("Putting the foot back...")
runCommandClient('robot.lfTrajGen.move(2,h,10.0)')
sleep(10.0)
runCommandClient('robot.distribute.phase.value = 0')
print("The foot is back in position!")
else:
print("Not putting the foot back")
if c3 or not c2:
c4 = ask_for_confirmation("Put the robot back?")
else:
c4 = False
if c4:
print("Putting the robot back...")
runCommandClient('robot.comTrajGen.move(1,0.0,10.0)')
runCommandClient('robot.rhoTrajGen.move(0,0.5,10.0)')
sleep(10.0)
print("The robot is back in position!")
else:
print("Not raising the foot")
# raw_input("Wait before dumping the data")
# runCommandClient('dump_tracer(robot.tracer)')
|
<filename>src/dynamic_graph/sot_talos_balance/test/test_dcmZmpCopControl.py
'''Test CoM admittance control as described in paper.'''
from time import sleep
from dynamic_graph.sot_talos_balance.utils.run_test_utils import (ask_for_confirmation, run_ft_calibration, run_test,
runCommandClient)
try:
# Python 2
input = raw_input # noqa
except NameError:
pass
run_test('appli_dcmZmpCopControl.py')
run_ft_calibration('robot.ftc')
input("Wait before running the test")
# Connect ZMP reference and reset controllers
print('Set controller')
runCommandClient('plug(robot.zmp_estimator.emergencyStop,robot.cm.emergencyStop_zmp)')
runCommandClient('plug(robot.distribute.emergencyStop,robot.cm.emergencyStop_distribute)')
runCommandClient('plug(robot.distribute.zmpRef,robot.com_admittance_control.zmpDes)')
runCommandClient('robot.com_admittance_control.setState(robot.wp.comDes.value,[0.0,0.0,0.0])')
runCommandClient('robot.com_admittance_control.Kp.value = Kp_adm')
runCommandClient('robot.rightAnkleController.gainsXY.value = Kp_ankles')
runCommandClient('robot.leftAnkleController.gainsXY.value = Kp_ankles')
runCommandClient('robot.dcm_control.resetDcmIntegralError()')
runCommandClient('robot.dcm_control.Ki.value = Ki_dcm')
c = ask_for_confirmation("Execute a sinusoid?")
if c:
print("Putting the robot in position...")
runCommandClient('robot.comTrajGen.move(1,-0.025,1.0)')
sleep(1.0)
print("Robot is in position!")
c2 = ask_for_confirmation("Confirm executing the sinusoid?")
if c2:
print("Executing the sinusoid...")
runCommandClient('robot.comTrajGen.startSinusoid(1,0.025,2.0)')
print("Sinusoid started!")
else:
print("Not executing the sinusoid")
c3 = ask_for_confirmation("Put the robot back?")
if c3:
print("Stopping the robot...")
runCommandClient('robot.comTrajGen.stop(1)')
sleep(5.0)
print("Putting the robot back...")
runCommandClient('robot.comTrajGen.move(1,0.0,1.0)')
sleep(1.0)
print("The robot is back in position!")
else:
print("Not putting the robot back")
else:
print("Not executing the sinusoid")
c = ask_for_confirmation("Raise the foot?")
if c:
print("Putting the robot in position...")
runCommandClient('robot.comTrajGen.move(1,-0.08,10.0)')
runCommandClient('robot.rhoTrajGen.move(0,0.4,10.0)')
sleep(10.0)
print("Robot is in position!")
c2 = ask_for_confirmation("Confirm raising the foot?")
if c2:
print("Raising the foot...")
runCommandClient('robot.distribute.phase.value = -1')
runCommandClient('h = robot.dynamic.LF.value[2][3]')
runCommandClient('robot.lfTrajGen.move(2,h+0.05,10.0)')
sleep(10.0)
print("Foot has been raised!")
c3 = ask_for_confirmation("Put the foot back?")
else:
print("Not raising the foot")
c3 = False
if c3:
print("Putting the foot back...")
runCommandClient('robot.lfTrajGen.move(2,h,10.0)')
sleep(10.0)
runCommandClient('robot.distribute.phase.value = 0')
print("The foot is back in position!")
else:
print("Not putting the foot back")
if c3 or not c2:
c4 = ask_for_confirmation("Put the robot back?")
else:
c4 = False
if c4:
print("Putting the robot back...")
runCommandClient('robot.comTrajGen.move(1,0.0,10.0)')
runCommandClient('robot.rhoTrajGen.move(0,0.5,10.0)')
sleep(10.0)
print("The robot is back in position!")
else:
print("Not raising the foot")
# raw_input("Wait before dumping the data")
# runCommandClient('dump_tracer(robot.tracer)')
|
en
| 0.760115
|
Test CoM admittance control as described in paper. # Python 2 # noqa # Connect ZMP reference and reset controllers # raw_input("Wait before dumping the data") # runCommandClient('dump_tracer(robot.tracer)')
| 2.373072
| 2
|
Algorithms/tf2algos/iqn.py
|
Abluceli/HRG-SAC
| 5
|
6626174
|
<filename>Algorithms/tf2algos/iqn.py
import numpy as np
import tensorflow as tf
import Nn
from utils.sth import sth
from Algorithms.tf2algos.base.off_policy import Off_Policy
from utils.expl_expt import ExplorationExploitationClass
from utils.tf2_utils import huber_loss
class IQN(Off_Policy):
'''
Implicit Quantile Networks, https://arxiv.org/abs/1806.06923
Double DQN
'''
def __init__(self,
s_dim,
visual_sources,
visual_resolution,
a_dim_or_list,
is_continuous,
online_quantiles=8,
target_quantiles=8,
select_quantiles=32,
quantiles_idx=64,
huber_delta=1.,
lr=5.0e-4,
eps_init=1,
eps_mid=0.2,
eps_final=0.01,
init2mid_annealing_episode=100,
assign_interval=2,
hidden_units={
'q_net': [128, 64],
'quantile': [128, 64],
'tile': [64]
},
**kwargs):
assert not is_continuous, 'iqn only support discrete action space'
super().__init__(
s_dim=s_dim,
visual_sources=visual_sources,
visual_resolution=visual_resolution,
a_dim_or_list=a_dim_or_list,
is_continuous=is_continuous,
**kwargs)
self.pi = tf.constant(np.pi)
self.online_quantiles = online_quantiles
self.target_quantiles = target_quantiles
self.select_quantiles = select_quantiles
self.quantiles_idx = quantiles_idx
self.huber_delta = huber_delta
self.assign_interval = assign_interval
self.expl_expt_mng = ExplorationExploitationClass(eps_init=eps_init,
eps_mid=eps_mid,
eps_final=eps_final,
init2mid_annealing_episode=init2mid_annealing_episode,
max_episode=self.max_episode)
self.visual_net = Nn.VisualNet('visual_net', self.visual_dim)
self.q_net = Nn.iqn_net(self.s_dim, self.a_counts, self.quantiles_idx, 'q_net', hidden_units, visual_net=self.visual_net)
self.q_target_net = Nn.iqn_net(self.s_dim, self.a_counts, self.quantiles_idx, 'q_target_net', hidden_units, visual_net=self.visual_net)
self.update_target_net_weights(self.q_target_net.weights, self.q_net.weights)
self.lr = tf.keras.optimizers.schedules.PolynomialDecay(lr, self.max_episode, 1e-10, power=1.0)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr(self.episode))
def show_logo(self):
self.recorder.logger.info('''
xxxxxxxx xxxxxxx xxx xxx
xxxxxxxx xxxxxxxxx xxxx xxx
xxx xxxx xxxx xxxxx xxx
xxx xxx xxx xxxxx xxx
xxx xxxx xxx xxxxxx xxx
xxx xxxx xxx xxxxxxxxxx
xxx xxxx xxx xxx xxxxxx
xxx xxxx xxxx xxx xxxxxx
xxxxxxxx xxxxxxxxx xxx xxxxx
xxxxxxxx xxxxxxx xxx xxxx
xxxx
xxxx
xxxx
''')
def choose_action(self, s, visual_s, evaluation=False):
if np.random.uniform() < self.expl_expt_mng.get_esp(self.episode, evaluation=evaluation):
a = np.random.randint(0, self.a_counts, len(s))
else:
a = self._get_action(s, visual_s).numpy()
return sth.int2action_index(a, self.a_dim_or_list)
@tf.function
def _get_action(self, s, visual_s):
s, visual_s = self.cast(s, visual_s)
with tf.device(self.device):
_, select_quantiles_tiled = self._generate_quantiles( # [N*B, 64]
batch_size=s.shape[0],
quantiles_num=self.select_quantiles,
quantiles_idx=self.quantiles_idx
)
_, q_values = self.q_net(s, visual_s, select_quantiles_tiled, quantiles_num=self.select_quantiles) # [B, A]
return tf.argmax(q_values, axis=-1) # [B,]
@tf.function
def _generate_quantiles(self, batch_size, quantiles_num, quantiles_idx):
with tf.device(self.device):
_quantiles = tf.random.uniform([batch_size * quantiles_num, 1], minval=0, maxval=1) # [N*B, 1]
_quantiles_tiled = tf.tile(_quantiles, [1, quantiles_idx]) # [N*B, 1] => [N*B, 64]
_quantiles_tiled = tf.cast(tf.range(quantiles_idx), tf.float32) * self.pi * _quantiles_tiled # pi * i * tau [N*B, 64] * [64, ] => [N*B, 64]
_quantiles_tiled = tf.cos(_quantiles_tiled) # [N*B, 64]
_quantiles = tf.reshape(_quantiles, [batch_size, quantiles_num, 1]) # [N*B, 1] => [B, N, 1]
return _quantiles, _quantiles_tiled
def learn(self, **kwargs):
self.episode = kwargs['episode']
for i in range(kwargs['step']):
if self.data.is_lg_batch_size:
s, visual_s, a, r, s_, visual_s_, done = self.data.sample()
if self.use_priority:
self.IS_w = self.data.get_IS_w()
td_error, summaries = self.train(s, visual_s, a, r, s_, visual_s_, done)
if self.use_priority:
td_error = np.squeeze(td_error.numpy())
self.data.update(td_error, self.episode)
if self.global_step % self.assign_interval == 0:
self.update_target_net_weights(self.q_target_net.weights, self.q_net.weights)
summaries.update(dict([
['LEARNING_RATE/lr', self.lr(self.episode)]
]))
self.write_training_summaries(self.global_step, summaries)
@tf.function(experimental_relax_shapes=True)
def train(self, s, visual_s, a, r, s_, visual_s_, done):
s, visual_s, a, r, s_, visual_s_, done = self.cast(s, visual_s, a, r, s_, visual_s_, done)
with tf.device(self.device):
with tf.GradientTape() as tape:
quantiles, quantiles_tiled = self._generate_quantiles( # [B, N, 1], [N*B, 64]
batch_size=s.shape[0],
quantiles_num=self.online_quantiles,
quantiles_idx=self.quantiles_idx
)
quantiles_value, q = self.q_net(s, visual_s, quantiles_tiled, quantiles_num=self.online_quantiles) # [N, B, A], [B, A]
_a = tf.reshape(tf.tile(a, [self.online_quantiles, 1]), [self.online_quantiles, -1, self.a_counts]) # [B, A] => [N*B, A] => [N, B, A]
quantiles_value = tf.reduce_sum(quantiles_value * _a, axis=-1, keepdims=True) # [N, B, A] => [N, B, 1]
q_eval = tf.reduce_sum(q * a, axis=-1, keepdims=True) # [B, A] => [B, 1]
next_max_action = self._get_action(s_, visual_s_) # [B,]
next_max_action = tf.one_hot(tf.squeeze(next_max_action), self.a_counts, 1., 0., dtype=tf.float32) # [B, A]
_next_max_action = tf.reshape(tf.tile(next_max_action, [self.target_quantiles, 1]), [self.target_quantiles, -1, self.a_counts]) # [B, A] => [N'*B, A] => [N', B, A]
_, target_quantiles_tiled = self._generate_quantiles( # [N'*B, 64]
batch_size=s_.shape[0],
quantiles_num=self.target_quantiles,
quantiles_idx=self.quantiles_idx
)
target_quantiles_value, target_q = self.q_target_net(s_, visual_s_, target_quantiles_tiled, quantiles_num=self.target_quantiles) # [N', B, A], [B, A]
target_quantiles_value = tf.reduce_sum(target_quantiles_value * _next_max_action, axis=-1, keepdims=True) # [N', B, A] => [N', B, 1]
target_q = tf.reduce_sum(target_q * a, axis=-1, keepdims=True) # [B, A] => [B, 1]
q_target = tf.stop_gradient(r + self.gamma * (1 - done) * target_q) # [B, 1]
td_error = q_eval - q_target # [B, 1]
_r = tf.reshape(tf.tile(r, [self.target_quantiles, 1]), [self.target_quantiles, -1, 1]) # [B, 1] => [N'*B, 1] => [N', B, 1]
_done = tf.reshape(tf.tile(done, [self.target_quantiles, 1]), [self.target_quantiles, -1, 1]) # [B, 1] => [N'*B, 1] => [N', B, 1]
quantiles_value_target = tf.stop_gradient(_r + self.gamma * (1 - _done) * target_quantiles_value) # [N', B, 1]
quantiles_value_target = tf.transpose(quantiles_value_target, [1, 2, 0]) # [B, 1, N']
quantiles_value_online = tf.transpose(quantiles_value, [1, 0, 2]) # [B, N, 1]
quantile_error = quantiles_value_online - quantiles_value_target # [B, N, 1] - [B, 1, N'] => [B, N, N']
huber = huber_loss(quantile_error, delta=self.huber_delta) # [B, N, N']
huber_abs = tf.abs(quantiles - tf.where(quantile_error < 0, tf.ones_like(quantile_error), tf.zeros_like(quantile_error))) # [B, N, 1] - [B, N, N'] => [B, N, N']
loss = tf.reduce_mean(huber_abs * huber, axis=-1) # [B, N, N'] => [B, N]
loss = tf.reduce_sum(loss, axis=-1) # [B, N] => [B, ]
loss = tf.reduce_mean(loss * self.IS_w) # [B, ] => 1
grads = tape.gradient(loss, self.q_net.tv)
self.optimizer.apply_gradients(
zip(grads, self.q_net.tv)
)
self.global_step.assign_add(1)
return td_error, dict([
['LOSS/loss', loss],
['Statistics/q_max', tf.reduce_max(q_eval)],
['Statistics/q_min', tf.reduce_min(q_eval)],
['Statistics/q_mean', tf.reduce_mean(q_eval)]
])
|
<filename>Algorithms/tf2algos/iqn.py
import numpy as np
import tensorflow as tf
import Nn
from utils.sth import sth
from Algorithms.tf2algos.base.off_policy import Off_Policy
from utils.expl_expt import ExplorationExploitationClass
from utils.tf2_utils import huber_loss
class IQN(Off_Policy):
'''
Implicit Quantile Networks, https://arxiv.org/abs/1806.06923
Double DQN
'''
def __init__(self,
s_dim,
visual_sources,
visual_resolution,
a_dim_or_list,
is_continuous,
online_quantiles=8,
target_quantiles=8,
select_quantiles=32,
quantiles_idx=64,
huber_delta=1.,
lr=5.0e-4,
eps_init=1,
eps_mid=0.2,
eps_final=0.01,
init2mid_annealing_episode=100,
assign_interval=2,
hidden_units={
'q_net': [128, 64],
'quantile': [128, 64],
'tile': [64]
},
**kwargs):
assert not is_continuous, 'iqn only support discrete action space'
super().__init__(
s_dim=s_dim,
visual_sources=visual_sources,
visual_resolution=visual_resolution,
a_dim_or_list=a_dim_or_list,
is_continuous=is_continuous,
**kwargs)
self.pi = tf.constant(np.pi)
self.online_quantiles = online_quantiles
self.target_quantiles = target_quantiles
self.select_quantiles = select_quantiles
self.quantiles_idx = quantiles_idx
self.huber_delta = huber_delta
self.assign_interval = assign_interval
self.expl_expt_mng = ExplorationExploitationClass(eps_init=eps_init,
eps_mid=eps_mid,
eps_final=eps_final,
init2mid_annealing_episode=init2mid_annealing_episode,
max_episode=self.max_episode)
self.visual_net = Nn.VisualNet('visual_net', self.visual_dim)
self.q_net = Nn.iqn_net(self.s_dim, self.a_counts, self.quantiles_idx, 'q_net', hidden_units, visual_net=self.visual_net)
self.q_target_net = Nn.iqn_net(self.s_dim, self.a_counts, self.quantiles_idx, 'q_target_net', hidden_units, visual_net=self.visual_net)
self.update_target_net_weights(self.q_target_net.weights, self.q_net.weights)
self.lr = tf.keras.optimizers.schedules.PolynomialDecay(lr, self.max_episode, 1e-10, power=1.0)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr(self.episode))
def show_logo(self):
self.recorder.logger.info('''
xxxxxxxx xxxxxxx xxx xxx
xxxxxxxx xxxxxxxxx xxxx xxx
xxx xxxx xxxx xxxxx xxx
xxx xxx xxx xxxxx xxx
xxx xxxx xxx xxxxxx xxx
xxx xxxx xxx xxxxxxxxxx
xxx xxxx xxx xxx xxxxxx
xxx xxxx xxxx xxx xxxxxx
xxxxxxxx xxxxxxxxx xxx xxxxx
xxxxxxxx xxxxxxx xxx xxxx
xxxx
xxxx
xxxx
''')
def choose_action(self, s, visual_s, evaluation=False):
if np.random.uniform() < self.expl_expt_mng.get_esp(self.episode, evaluation=evaluation):
a = np.random.randint(0, self.a_counts, len(s))
else:
a = self._get_action(s, visual_s).numpy()
return sth.int2action_index(a, self.a_dim_or_list)
@tf.function
def _get_action(self, s, visual_s):
s, visual_s = self.cast(s, visual_s)
with tf.device(self.device):
_, select_quantiles_tiled = self._generate_quantiles( # [N*B, 64]
batch_size=s.shape[0],
quantiles_num=self.select_quantiles,
quantiles_idx=self.quantiles_idx
)
_, q_values = self.q_net(s, visual_s, select_quantiles_tiled, quantiles_num=self.select_quantiles) # [B, A]
return tf.argmax(q_values, axis=-1) # [B,]
@tf.function
def _generate_quantiles(self, batch_size, quantiles_num, quantiles_idx):
with tf.device(self.device):
_quantiles = tf.random.uniform([batch_size * quantiles_num, 1], minval=0, maxval=1) # [N*B, 1]
_quantiles_tiled = tf.tile(_quantiles, [1, quantiles_idx]) # [N*B, 1] => [N*B, 64]
_quantiles_tiled = tf.cast(tf.range(quantiles_idx), tf.float32) * self.pi * _quantiles_tiled # pi * i * tau [N*B, 64] * [64, ] => [N*B, 64]
_quantiles_tiled = tf.cos(_quantiles_tiled) # [N*B, 64]
_quantiles = tf.reshape(_quantiles, [batch_size, quantiles_num, 1]) # [N*B, 1] => [B, N, 1]
return _quantiles, _quantiles_tiled
def learn(self, **kwargs):
self.episode = kwargs['episode']
for i in range(kwargs['step']):
if self.data.is_lg_batch_size:
s, visual_s, a, r, s_, visual_s_, done = self.data.sample()
if self.use_priority:
self.IS_w = self.data.get_IS_w()
td_error, summaries = self.train(s, visual_s, a, r, s_, visual_s_, done)
if self.use_priority:
td_error = np.squeeze(td_error.numpy())
self.data.update(td_error, self.episode)
if self.global_step % self.assign_interval == 0:
self.update_target_net_weights(self.q_target_net.weights, self.q_net.weights)
summaries.update(dict([
['LEARNING_RATE/lr', self.lr(self.episode)]
]))
self.write_training_summaries(self.global_step, summaries)
@tf.function(experimental_relax_shapes=True)
def train(self, s, visual_s, a, r, s_, visual_s_, done):
s, visual_s, a, r, s_, visual_s_, done = self.cast(s, visual_s, a, r, s_, visual_s_, done)
with tf.device(self.device):
with tf.GradientTape() as tape:
quantiles, quantiles_tiled = self._generate_quantiles( # [B, N, 1], [N*B, 64]
batch_size=s.shape[0],
quantiles_num=self.online_quantiles,
quantiles_idx=self.quantiles_idx
)
quantiles_value, q = self.q_net(s, visual_s, quantiles_tiled, quantiles_num=self.online_quantiles) # [N, B, A], [B, A]
_a = tf.reshape(tf.tile(a, [self.online_quantiles, 1]), [self.online_quantiles, -1, self.a_counts]) # [B, A] => [N*B, A] => [N, B, A]
quantiles_value = tf.reduce_sum(quantiles_value * _a, axis=-1, keepdims=True) # [N, B, A] => [N, B, 1]
q_eval = tf.reduce_sum(q * a, axis=-1, keepdims=True) # [B, A] => [B, 1]
next_max_action = self._get_action(s_, visual_s_) # [B,]
next_max_action = tf.one_hot(tf.squeeze(next_max_action), self.a_counts, 1., 0., dtype=tf.float32) # [B, A]
_next_max_action = tf.reshape(tf.tile(next_max_action, [self.target_quantiles, 1]), [self.target_quantiles, -1, self.a_counts]) # [B, A] => [N'*B, A] => [N', B, A]
_, target_quantiles_tiled = self._generate_quantiles( # [N'*B, 64]
batch_size=s_.shape[0],
quantiles_num=self.target_quantiles,
quantiles_idx=self.quantiles_idx
)
target_quantiles_value, target_q = self.q_target_net(s_, visual_s_, target_quantiles_tiled, quantiles_num=self.target_quantiles) # [N', B, A], [B, A]
target_quantiles_value = tf.reduce_sum(target_quantiles_value * _next_max_action, axis=-1, keepdims=True) # [N', B, A] => [N', B, 1]
target_q = tf.reduce_sum(target_q * a, axis=-1, keepdims=True) # [B, A] => [B, 1]
q_target = tf.stop_gradient(r + self.gamma * (1 - done) * target_q) # [B, 1]
td_error = q_eval - q_target # [B, 1]
_r = tf.reshape(tf.tile(r, [self.target_quantiles, 1]), [self.target_quantiles, -1, 1]) # [B, 1] => [N'*B, 1] => [N', B, 1]
_done = tf.reshape(tf.tile(done, [self.target_quantiles, 1]), [self.target_quantiles, -1, 1]) # [B, 1] => [N'*B, 1] => [N', B, 1]
quantiles_value_target = tf.stop_gradient(_r + self.gamma * (1 - _done) * target_quantiles_value) # [N', B, 1]
quantiles_value_target = tf.transpose(quantiles_value_target, [1, 2, 0]) # [B, 1, N']
quantiles_value_online = tf.transpose(quantiles_value, [1, 0, 2]) # [B, N, 1]
quantile_error = quantiles_value_online - quantiles_value_target # [B, N, 1] - [B, 1, N'] => [B, N, N']
huber = huber_loss(quantile_error, delta=self.huber_delta) # [B, N, N']
huber_abs = tf.abs(quantiles - tf.where(quantile_error < 0, tf.ones_like(quantile_error), tf.zeros_like(quantile_error))) # [B, N, 1] - [B, N, N'] => [B, N, N']
loss = tf.reduce_mean(huber_abs * huber, axis=-1) # [B, N, N'] => [B, N]
loss = tf.reduce_sum(loss, axis=-1) # [B, N] => [B, ]
loss = tf.reduce_mean(loss * self.IS_w) # [B, ] => 1
grads = tape.gradient(loss, self.q_net.tv)
self.optimizer.apply_gradients(
zip(grads, self.q_net.tv)
)
self.global_step.assign_add(1)
return td_error, dict([
['LOSS/loss', loss],
['Statistics/q_max', tf.reduce_max(q_eval)],
['Statistics/q_min', tf.reduce_min(q_eval)],
['Statistics/q_mean', tf.reduce_mean(q_eval)]
])
|
ja
| 0.548474
|
Implicit Quantile Networks, https://arxiv.org/abs/1806.06923 Double DQN xxxxxxxx xxxxxxx xxx xxx xxxxxxxx xxxxxxxxx xxxx xxx xxx xxxx xxxx xxxxx xxx xxx xxx xxx xxxxx xxx xxx xxxx xxx xxxxxx xxx xxx xxxx xxx xxxxxxxxxx xxx xxxx xxx xxx xxxxxx xxx xxxx xxxx xxx xxxxxx xxxxxxxx xxxxxxxxx xxx xxxxx xxxxxxxx xxxxxxx xxx xxxx xxxx xxxx xxxx # [N*B, 64] # [B, A] # [B,] # [N*B, 1] # [N*B, 1] => [N*B, 64] # pi * i * tau [N*B, 64] * [64, ] => [N*B, 64] # [N*B, 64] # [N*B, 1] => [B, N, 1] # [B, N, 1], [N*B, 64] # [N, B, A], [B, A] # [B, A] => [N*B, A] => [N, B, A] # [N, B, A] => [N, B, 1] # [B, A] => [B, 1] # [B,] # [B, A] # [B, A] => [N'*B, A] => [N', B, A] # [N'*B, 64] # [N', B, A], [B, A] # [N', B, A] => [N', B, 1] # [B, A] => [B, 1] # [B, 1] # [B, 1] # [B, 1] => [N'*B, 1] => [N', B, 1] # [B, 1] => [N'*B, 1] => [N', B, 1] # [N', B, 1] # [B, 1, N'] # [B, N, 1] # [B, N, 1] - [B, 1, N'] => [B, N, N'] # [B, N, N'] # [B, N, 1] - [B, N, N'] => [B, N, N'] # [B, N, N'] => [B, N] # [B, N] => [B, ] # [B, ] => 1
| 2.114041
| 2
|
google-cloud-sdk/platform/gsutil/third_party/apitools/apitools/gen/service_registry.py
|
bopopescu/searchparty
| 1
|
6626175
|
<gh_stars>1-10
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service registry for apitools."""
import collections
import logging
import re
import textwrap
from apitools.base.py import base_api
from apitools.gen import util
# We're a code generator. I don't care.
# pylint:disable=too-many-statements
_MIME_PATTERN_RE = re.compile(r'(?i)[a-z0-9_*-]+/[a-z0-9_*-]+')
class ServiceRegistry(object):
"""Registry for service types."""
def __init__(self, client_info, message_registry, command_registry,
names, root_package, base_files_package,
unelidable_request_methods):
self.__client_info = client_info
self.__package = client_info.package
self.__names = names
self.__service_method_info_map = collections.OrderedDict()
self.__message_registry = message_registry
self.__command_registry = command_registry
self.__root_package = root_package
self.__base_files_package = base_files_package
self.__unelidable_request_methods = unelidable_request_methods
self.__all_scopes = set(self.__client_info.scopes)
def Validate(self):
self.__message_registry.Validate()
@property
def scopes(self):
return sorted(list(self.__all_scopes))
def __GetServiceClassName(self, service_name):
return self.__names.ClassName(
'%sService' % self.__names.ClassName(service_name))
def __PrintDocstring(self, printer, method_info, method_name, name):
"""Print a docstring for a service method."""
if method_info.description:
description = util.CleanDescription(method_info.description)
first_line, newline, remaining = method_info.description.partition(
'\n')
if not first_line.endswith('.'):
first_line = '%s.' % first_line
description = '%s%s%s' % (first_line, newline, remaining)
else:
description = '%s method for the %s service.' % (method_name, name)
with printer.CommentContext():
printer('"""%s' % description)
printer()
printer('Args:')
printer(' request: (%s) input message', method_info.request_type_name)
printer(' global_params: (StandardQueryParameters, default: None) '
'global arguments')
if method_info.upload_config:
printer(' upload: (Upload, default: None) If present, upload')
printer(' this stream with the request.')
if method_info.supports_download:
printer(
' download: (Download, default: None) If present, download')
printer(' data from the request via this stream.')
printer('Returns:')
printer(' (%s) The response message.', method_info.response_type_name)
printer('"""')
def __WriteSingleService(
self, printer, name, method_info_map, client_class_name):
printer()
class_name = self.__GetServiceClassName(name)
printer('class %s(base_api.BaseApiService):', class_name)
with printer.Indent():
printer('"""Service class for the %s resource."""', name)
printer()
printer('_NAME = %s', repr(name))
# Print the configs for the methods first.
printer()
printer('def __init__(self, client):')
with printer.Indent():
printer('super(%s.%s, self).__init__(client)',
client_class_name, class_name)
printer('self._method_configs = {')
with printer.Indent(indent=' '):
for method_name, method_info in method_info_map.items():
printer("'%s': base_api.ApiMethodInfo(", method_name)
with printer.Indent(indent=' '):
attrs = sorted(
x.name for x in method_info.all_fields())
for attr in attrs:
if attr in ('upload_config', 'description'):
continue
printer(
'%s=%r,', attr, getattr(method_info, attr))
printer('),')
printer('}')
printer()
printer('self._upload_configs = {')
with printer.Indent(indent=' '):
for method_name, method_info in method_info_map.items():
upload_config = method_info.upload_config
if upload_config is not None:
printer(
"'%s': base_api.ApiUploadInfo(", method_name)
with printer.Indent(indent=' '):
attrs = sorted(
x.name for x in upload_config.all_fields())
for attr in attrs:
printer('%s=%r,',
attr, getattr(upload_config, attr))
printer('),')
printer('}')
# Now write each method in turn.
for method_name, method_info in method_info_map.items():
printer()
params = ['self', 'request', 'global_params=None']
if method_info.upload_config:
params.append('upload=None')
if method_info.supports_download:
params.append('download=None')
printer('def %s(%s):', method_name, ', '.join(params))
with printer.Indent():
self.__PrintDocstring(
printer, method_info, method_name, name)
printer("config = self.GetMethodConfig('%s')", method_name)
upload_config = method_info.upload_config
if upload_config is not None:
printer("upload_config = self.GetUploadConfig('%s')",
method_name)
arg_lines = [
'config, request, global_params=global_params']
if method_info.upload_config:
arg_lines.append(
'upload=upload, upload_config=upload_config')
if method_info.supports_download:
arg_lines.append('download=download')
printer('return self._RunMethod(')
with printer.Indent(indent=' '):
for line in arg_lines[:-1]:
printer('%s,', line)
printer('%s)', arg_lines[-1])
def __WriteProtoServiceDeclaration(self, printer, name, method_info_map):
"""Write a single service declaration to a proto file."""
printer()
printer('service %s {', self.__GetServiceClassName(name))
with printer.Indent():
for method_name, method_info in method_info_map.items():
for line in textwrap.wrap(method_info.description,
printer.CalculateWidth() - 3):
printer('// %s', line)
printer('rpc %s (%s) returns (%s);',
method_name,
method_info.request_type_name,
method_info.response_type_name)
printer('}')
def WriteProtoFile(self, printer):
"""Write the services in this registry to out as proto."""
self.Validate()
client_info = self.__client_info
printer('// Generated services for %s version %s.',
client_info.package, client_info.version)
printer()
printer('syntax = "proto2";')
printer('package %s;', self.__package)
printer('import "%s";', client_info.messages_proto_file_name)
printer()
for name, method_info_map in self.__service_method_info_map.items():
self.__WriteProtoServiceDeclaration(printer, name, method_info_map)
def WriteFile(self, printer):
"""Write the services in this registry to out."""
self.Validate()
client_info = self.__client_info
printer('"""Generated client library for %s version %s."""',
client_info.package, client_info.version)
printer('# NOTE: This file is autogenerated and should not be edited '
'by hand.')
printer('from %s import base_api', self.__base_files_package)
if self.__root_package:
import_prefix = 'from {0} '.format(self.__root_package)
else:
import_prefix = ''
printer('%simport %s as messages', import_prefix,
client_info.messages_rule_name)
printer()
printer()
printer('class %s(base_api.BaseApiClient):',
client_info.client_class_name)
with printer.Indent():
printer(
'"""Generated client library for service %s version %s."""',
client_info.package, client_info.version)
printer()
printer('MESSAGES_MODULE = messages')
printer('BASE_URL = {0!r}'.format(client_info.base_url))
printer()
printer('_PACKAGE = {0!r}'.format(client_info.package))
printer('_SCOPES = {0!r}'.format(
client_info.scopes or
['https://www.googleapis.com/auth/userinfo.email']))
printer('_VERSION = {0!r}'.format(client_info.version))
printer('_CLIENT_ID = {0!r}'.format(client_info.client_id))
printer('_CLIENT_SECRET = {0!r}'.format(client_info.client_secret))
printer('_USER_AGENT = {0!r}'.format(client_info.user_agent))
printer('_CLIENT_CLASS_NAME = {0!r}'.format(
client_info.client_class_name))
printer('_URL_VERSION = {0!r}'.format(client_info.url_version))
printer('_API_KEY = {0!r}'.format(client_info.api_key))
printer()
printer("def __init__(self, url='', credentials=None,")
with printer.Indent(indent=' '):
printer('get_credentials=True, http=None, model=None,')
printer('log_request=False, log_response=False,')
printer('credentials_args=None, default_global_params=None,')
printer('additional_http_headers=None):')
with printer.Indent():
printer('"""Create a new %s handle."""', client_info.package)
printer('url = url or self.BASE_URL')
printer(
'super(%s, self).__init__(', client_info.client_class_name)
printer(' url, credentials=credentials,')
printer(' get_credentials=get_credentials, http=http, '
'model=model,')
printer(' log_request=log_request, '
'log_response=log_response,')
printer(' credentials_args=credentials_args,')
printer(' default_global_params=default_global_params,')
printer(' additional_http_headers=additional_http_headers)')
for name in self.__service_method_info_map.keys():
printer('self.%s = self.%s(self)',
name, self.__GetServiceClassName(name))
for name, method_info in self.__service_method_info_map.items():
self.__WriteSingleService(
printer, name, method_info, client_info.client_class_name)
def __RegisterService(self, service_name, method_info_map):
if service_name in self.__service_method_info_map:
raise ValueError(
'Attempt to re-register descriptor %s' % service_name)
self.__service_method_info_map[service_name] = method_info_map
def __CreateRequestType(self, method_description, body_type=None):
"""Create a request type for this method."""
schema = {}
schema['id'] = self.__names.ClassName('%sRequest' % (
self.__names.ClassName(method_description['id'], separator='.'),))
schema['type'] = 'object'
schema['properties'] = collections.OrderedDict()
if 'parameterOrder' not in method_description:
ordered_parameters = list(method_description.get('parameters', []))
else:
ordered_parameters = method_description['parameterOrder'][:]
for k in method_description['parameters']:
if k not in ordered_parameters:
ordered_parameters.append(k)
for parameter_name in ordered_parameters:
field_name = self.__names.CleanName(parameter_name)
field = dict(method_description['parameters'][parameter_name])
if 'type' not in field:
raise ValueError('No type found in parameter %s' % field)
schema['properties'][field_name] = field
if body_type is not None:
body_field_name = self.__GetRequestField(
method_description, body_type)
if body_field_name in schema['properties']:
raise ValueError('Failed to normalize request resource name')
if 'description' not in body_type:
body_type['description'] = (
'A %s resource to be passed as the request body.' % (
self.__GetRequestType(body_type),))
schema['properties'][body_field_name] = body_type
self.__message_registry.AddDescriptorFromSchema(schema['id'], schema)
return schema['id']
def __CreateVoidResponseType(self, method_description):
"""Create an empty response type."""
schema = {}
method_name = self.__names.ClassName(
method_description['id'], separator='.')
schema['id'] = self.__names.ClassName('%sResponse' % method_name)
schema['type'] = 'object'
schema['description'] = 'An empty %s response.' % method_name
self.__message_registry.AddDescriptorFromSchema(schema['id'], schema)
return schema['id']
def __NeedRequestType(self, method_description, request_type):
"""Determine if this method needs a new request type created."""
if not request_type:
return True
method_id = method_description.get('id', '')
if method_id in self.__unelidable_request_methods:
return True
message = self.__message_registry.LookupDescriptorOrDie(request_type)
if message is None:
return True
field_names = [x.name for x in message.fields]
parameters = method_description.get('parameters', {})
for param_name, param_info in parameters.items():
if (param_info.get('location') != 'path' or
self.__names.CleanName(param_name) not in field_names):
break
else:
return False
return True
def __MaxSizeToInt(self, max_size):
"""Convert max_size to an int."""
size_groups = re.match(r'(?P<size>\d+)(?P<unit>.B)?$', max_size)
if size_groups is None:
raise ValueError('Could not parse maxSize')
size, unit = size_groups.group('size', 'unit')
shift = 0
if unit is not None:
unit_dict = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
shift = unit_dict.get(unit.upper())
if shift is None:
raise ValueError('Unknown unit %s' % unit)
return int(size) * (1 << shift)
def __ComputeUploadConfig(self, media_upload_config, method_id):
"""Fill out the upload config for this method."""
config = base_api.ApiUploadInfo()
if 'maxSize' in media_upload_config:
config.max_size = self.__MaxSizeToInt(
media_upload_config['maxSize'])
if 'accept' not in media_upload_config:
logging.warn(
'No accept types found for upload configuration in '
'method %s, using */*', method_id)
config.accept.extend([
str(a) for a in media_upload_config.get('accept', '*/*')])
for accept_pattern in config.accept:
if not _MIME_PATTERN_RE.match(accept_pattern):
logging.warn('Unexpected MIME type: %s', accept_pattern)
protocols = media_upload_config.get('protocols', {})
for protocol in ('simple', 'resumable'):
media = protocols.get(protocol, {})
for attr in ('multipart', 'path'):
if attr in media:
setattr(config, '%s_%s' % (protocol, attr), media[attr])
return config
def __ComputeMethodInfo(self, method_description, request, response,
request_field):
"""Compute the base_api.ApiMethodInfo for this method."""
relative_path = self.__names.NormalizeRelativePath(
''.join((self.__client_info.base_path,
method_description['path'])))
method_id = method_description['id']
ordered_params = []
for param_name in method_description.get('parameterOrder', []):
param_info = method_description['parameters'][param_name]
if param_info.get('required', False):
ordered_params.append(param_name)
method_info = base_api.ApiMethodInfo(
relative_path=relative_path,
method_id=method_id,
http_method=method_description['httpMethod'],
description=util.CleanDescription(
method_description.get('description', '')),
query_params=[],
path_params=[],
ordered_params=ordered_params,
request_type_name=self.__names.ClassName(request),
response_type_name=self.__names.ClassName(response),
request_field=request_field,
)
if method_description.get('supportsMediaUpload', False):
method_info.upload_config = self.__ComputeUploadConfig(
method_description.get('mediaUpload'), method_id)
method_info.supports_download = method_description.get(
'supportsMediaDownload', False)
self.__all_scopes.update(method_description.get('scopes', ()))
for param, desc in method_description.get('parameters', {}).items():
param = self.__names.CleanName(param)
location = desc['location']
if location == 'query':
method_info.query_params.append(param)
elif location == 'path':
method_info.path_params.append(param)
else:
raise ValueError(
'Unknown parameter location %s for parameter %s' % (
location, param))
method_info.path_params.sort()
method_info.query_params.sort()
return method_info
def __BodyFieldName(self, body_type):
if body_type is None:
return ''
return self.__names.FieldName(body_type['$ref'])
def __GetRequestType(self, body_type):
return self.__names.ClassName(body_type.get('$ref'))
def __GetRequestField(self, method_description, body_type):
"""Determine the request field for this method."""
body_field_name = self.__BodyFieldName(body_type)
if body_field_name in method_description.get('parameters', {}):
body_field_name = self.__names.FieldName(
'%s_resource' % body_field_name)
# It's exceedingly unlikely that we'd get two name collisions, which
# means it's bound to happen at some point.
while body_field_name in method_description.get('parameters', {}):
body_field_name = self.__names.FieldName(
'%s_body' % body_field_name)
return body_field_name
def AddServiceFromResource(self, service_name, methods):
"""Add a new service named service_name with the given methods."""
method_descriptions = methods.get('methods', {})
method_info_map = collections.OrderedDict()
items = sorted(method_descriptions.items())
for method_name, method_description in items:
method_name = self.__names.MethodName(method_name)
# NOTE: According to the discovery document, if the request or
# response is present, it will simply contain a `$ref`.
body_type = method_description.get('request')
if body_type is None:
request_type = None
else:
request_type = self.__GetRequestType(body_type)
if self.__NeedRequestType(method_description, request_type):
request = self.__CreateRequestType(
method_description, body_type=body_type)
request_field = self.__GetRequestField(
method_description, body_type)
else:
request = request_type
request_field = base_api.REQUEST_IS_BODY
if 'response' in method_description:
response = method_description['response']['$ref']
else:
response = self.__CreateVoidResponseType(method_description)
method_info_map[method_name] = self.__ComputeMethodInfo(
method_description, request, response, request_field)
self.__command_registry.AddCommandForMethod(
service_name, method_name, method_info_map[method_name],
request, response)
nested_services = methods.get('resources', {})
services = sorted(nested_services.items())
for subservice_name, submethods in services:
new_service_name = '%s_%s' % (service_name, subservice_name)
self.AddServiceFromResource(new_service_name, submethods)
self.__RegisterService(service_name, method_info_map)
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service registry for apitools."""
import collections
import logging
import re
import textwrap
from apitools.base.py import base_api
from apitools.gen import util
# We're a code generator. I don't care.
# pylint:disable=too-many-statements
_MIME_PATTERN_RE = re.compile(r'(?i)[a-z0-9_*-]+/[a-z0-9_*-]+')
class ServiceRegistry(object):
"""Registry for service types."""
def __init__(self, client_info, message_registry, command_registry,
names, root_package, base_files_package,
unelidable_request_methods):
self.__client_info = client_info
self.__package = client_info.package
self.__names = names
self.__service_method_info_map = collections.OrderedDict()
self.__message_registry = message_registry
self.__command_registry = command_registry
self.__root_package = root_package
self.__base_files_package = base_files_package
self.__unelidable_request_methods = unelidable_request_methods
self.__all_scopes = set(self.__client_info.scopes)
def Validate(self):
self.__message_registry.Validate()
@property
def scopes(self):
return sorted(list(self.__all_scopes))
def __GetServiceClassName(self, service_name):
return self.__names.ClassName(
'%sService' % self.__names.ClassName(service_name))
def __PrintDocstring(self, printer, method_info, method_name, name):
"""Print a docstring for a service method."""
if method_info.description:
description = util.CleanDescription(method_info.description)
first_line, newline, remaining = method_info.description.partition(
'\n')
if not first_line.endswith('.'):
first_line = '%s.' % first_line
description = '%s%s%s' % (first_line, newline, remaining)
else:
description = '%s method for the %s service.' % (method_name, name)
with printer.CommentContext():
printer('"""%s' % description)
printer()
printer('Args:')
printer(' request: (%s) input message', method_info.request_type_name)
printer(' global_params: (StandardQueryParameters, default: None) '
'global arguments')
if method_info.upload_config:
printer(' upload: (Upload, default: None) If present, upload')
printer(' this stream with the request.')
if method_info.supports_download:
printer(
' download: (Download, default: None) If present, download')
printer(' data from the request via this stream.')
printer('Returns:')
printer(' (%s) The response message.', method_info.response_type_name)
printer('"""')
def __WriteSingleService(
self, printer, name, method_info_map, client_class_name):
printer()
class_name = self.__GetServiceClassName(name)
printer('class %s(base_api.BaseApiService):', class_name)
with printer.Indent():
printer('"""Service class for the %s resource."""', name)
printer()
printer('_NAME = %s', repr(name))
# Print the configs for the methods first.
printer()
printer('def __init__(self, client):')
with printer.Indent():
printer('super(%s.%s, self).__init__(client)',
client_class_name, class_name)
printer('self._method_configs = {')
with printer.Indent(indent=' '):
for method_name, method_info in method_info_map.items():
printer("'%s': base_api.ApiMethodInfo(", method_name)
with printer.Indent(indent=' '):
attrs = sorted(
x.name for x in method_info.all_fields())
for attr in attrs:
if attr in ('upload_config', 'description'):
continue
printer(
'%s=%r,', attr, getattr(method_info, attr))
printer('),')
printer('}')
printer()
printer('self._upload_configs = {')
with printer.Indent(indent=' '):
for method_name, method_info in method_info_map.items():
upload_config = method_info.upload_config
if upload_config is not None:
printer(
"'%s': base_api.ApiUploadInfo(", method_name)
with printer.Indent(indent=' '):
attrs = sorted(
x.name for x in upload_config.all_fields())
for attr in attrs:
printer('%s=%r,',
attr, getattr(upload_config, attr))
printer('),')
printer('}')
# Now write each method in turn.
for method_name, method_info in method_info_map.items():
printer()
params = ['self', 'request', 'global_params=None']
if method_info.upload_config:
params.append('upload=None')
if method_info.supports_download:
params.append('download=None')
printer('def %s(%s):', method_name, ', '.join(params))
with printer.Indent():
self.__PrintDocstring(
printer, method_info, method_name, name)
printer("config = self.GetMethodConfig('%s')", method_name)
upload_config = method_info.upload_config
if upload_config is not None:
printer("upload_config = self.GetUploadConfig('%s')",
method_name)
arg_lines = [
'config, request, global_params=global_params']
if method_info.upload_config:
arg_lines.append(
'upload=upload, upload_config=upload_config')
if method_info.supports_download:
arg_lines.append('download=download')
printer('return self._RunMethod(')
with printer.Indent(indent=' '):
for line in arg_lines[:-1]:
printer('%s,', line)
printer('%s)', arg_lines[-1])
def __WriteProtoServiceDeclaration(self, printer, name, method_info_map):
"""Write a single service declaration to a proto file."""
printer()
printer('service %s {', self.__GetServiceClassName(name))
with printer.Indent():
for method_name, method_info in method_info_map.items():
for line in textwrap.wrap(method_info.description,
printer.CalculateWidth() - 3):
printer('// %s', line)
printer('rpc %s (%s) returns (%s);',
method_name,
method_info.request_type_name,
method_info.response_type_name)
printer('}')
def WriteProtoFile(self, printer):
"""Write the services in this registry to out as proto."""
self.Validate()
client_info = self.__client_info
printer('// Generated services for %s version %s.',
client_info.package, client_info.version)
printer()
printer('syntax = "proto2";')
printer('package %s;', self.__package)
printer('import "%s";', client_info.messages_proto_file_name)
printer()
for name, method_info_map in self.__service_method_info_map.items():
self.__WriteProtoServiceDeclaration(printer, name, method_info_map)
def WriteFile(self, printer):
"""Write the services in this registry to out."""
self.Validate()
client_info = self.__client_info
printer('"""Generated client library for %s version %s."""',
client_info.package, client_info.version)
printer('# NOTE: This file is autogenerated and should not be edited '
'by hand.')
printer('from %s import base_api', self.__base_files_package)
if self.__root_package:
import_prefix = 'from {0} '.format(self.__root_package)
else:
import_prefix = ''
printer('%simport %s as messages', import_prefix,
client_info.messages_rule_name)
printer()
printer()
printer('class %s(base_api.BaseApiClient):',
client_info.client_class_name)
with printer.Indent():
printer(
'"""Generated client library for service %s version %s."""',
client_info.package, client_info.version)
printer()
printer('MESSAGES_MODULE = messages')
printer('BASE_URL = {0!r}'.format(client_info.base_url))
printer()
printer('_PACKAGE = {0!r}'.format(client_info.package))
printer('_SCOPES = {0!r}'.format(
client_info.scopes or
['https://www.googleapis.com/auth/userinfo.email']))
printer('_VERSION = {0!r}'.format(client_info.version))
printer('_CLIENT_ID = {0!r}'.format(client_info.client_id))
printer('_CLIENT_SECRET = {0!r}'.format(client_info.client_secret))
printer('_USER_AGENT = {0!r}'.format(client_info.user_agent))
printer('_CLIENT_CLASS_NAME = {0!r}'.format(
client_info.client_class_name))
printer('_URL_VERSION = {0!r}'.format(client_info.url_version))
printer('_API_KEY = {0!r}'.format(client_info.api_key))
printer()
printer("def __init__(self, url='', credentials=None,")
with printer.Indent(indent=' '):
printer('get_credentials=True, http=None, model=None,')
printer('log_request=False, log_response=False,')
printer('credentials_args=None, default_global_params=None,')
printer('additional_http_headers=None):')
with printer.Indent():
printer('"""Create a new %s handle."""', client_info.package)
printer('url = url or self.BASE_URL')
printer(
'super(%s, self).__init__(', client_info.client_class_name)
printer(' url, credentials=credentials,')
printer(' get_credentials=get_credentials, http=http, '
'model=model,')
printer(' log_request=log_request, '
'log_response=log_response,')
printer(' credentials_args=credentials_args,')
printer(' default_global_params=default_global_params,')
printer(' additional_http_headers=additional_http_headers)')
for name in self.__service_method_info_map.keys():
printer('self.%s = self.%s(self)',
name, self.__GetServiceClassName(name))
for name, method_info in self.__service_method_info_map.items():
self.__WriteSingleService(
printer, name, method_info, client_info.client_class_name)
def __RegisterService(self, service_name, method_info_map):
if service_name in self.__service_method_info_map:
raise ValueError(
'Attempt to re-register descriptor %s' % service_name)
self.__service_method_info_map[service_name] = method_info_map
def __CreateRequestType(self, method_description, body_type=None):
"""Create a request type for this method."""
schema = {}
schema['id'] = self.__names.ClassName('%sRequest' % (
self.__names.ClassName(method_description['id'], separator='.'),))
schema['type'] = 'object'
schema['properties'] = collections.OrderedDict()
if 'parameterOrder' not in method_description:
ordered_parameters = list(method_description.get('parameters', []))
else:
ordered_parameters = method_description['parameterOrder'][:]
for k in method_description['parameters']:
if k not in ordered_parameters:
ordered_parameters.append(k)
for parameter_name in ordered_parameters:
field_name = self.__names.CleanName(parameter_name)
field = dict(method_description['parameters'][parameter_name])
if 'type' not in field:
raise ValueError('No type found in parameter %s' % field)
schema['properties'][field_name] = field
if body_type is not None:
body_field_name = self.__GetRequestField(
method_description, body_type)
if body_field_name in schema['properties']:
raise ValueError('Failed to normalize request resource name')
if 'description' not in body_type:
body_type['description'] = (
'A %s resource to be passed as the request body.' % (
self.__GetRequestType(body_type),))
schema['properties'][body_field_name] = body_type
self.__message_registry.AddDescriptorFromSchema(schema['id'], schema)
return schema['id']
def __CreateVoidResponseType(self, method_description):
"""Create an empty response type."""
schema = {}
method_name = self.__names.ClassName(
method_description['id'], separator='.')
schema['id'] = self.__names.ClassName('%sResponse' % method_name)
schema['type'] = 'object'
schema['description'] = 'An empty %s response.' % method_name
self.__message_registry.AddDescriptorFromSchema(schema['id'], schema)
return schema['id']
def __NeedRequestType(self, method_description, request_type):
"""Determine if this method needs a new request type created."""
if not request_type:
return True
method_id = method_description.get('id', '')
if method_id in self.__unelidable_request_methods:
return True
message = self.__message_registry.LookupDescriptorOrDie(request_type)
if message is None:
return True
field_names = [x.name for x in message.fields]
parameters = method_description.get('parameters', {})
for param_name, param_info in parameters.items():
if (param_info.get('location') != 'path' or
self.__names.CleanName(param_name) not in field_names):
break
else:
return False
return True
def __MaxSizeToInt(self, max_size):
"""Convert max_size to an int."""
size_groups = re.match(r'(?P<size>\d+)(?P<unit>.B)?$', max_size)
if size_groups is None:
raise ValueError('Could not parse maxSize')
size, unit = size_groups.group('size', 'unit')
shift = 0
if unit is not None:
unit_dict = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
shift = unit_dict.get(unit.upper())
if shift is None:
raise ValueError('Unknown unit %s' % unit)
return int(size) * (1 << shift)
def __ComputeUploadConfig(self, media_upload_config, method_id):
"""Fill out the upload config for this method."""
config = base_api.ApiUploadInfo()
if 'maxSize' in media_upload_config:
config.max_size = self.__MaxSizeToInt(
media_upload_config['maxSize'])
if 'accept' not in media_upload_config:
logging.warn(
'No accept types found for upload configuration in '
'method %s, using */*', method_id)
config.accept.extend([
str(a) for a in media_upload_config.get('accept', '*/*')])
for accept_pattern in config.accept:
if not _MIME_PATTERN_RE.match(accept_pattern):
logging.warn('Unexpected MIME type: %s', accept_pattern)
protocols = media_upload_config.get('protocols', {})
for protocol in ('simple', 'resumable'):
media = protocols.get(protocol, {})
for attr in ('multipart', 'path'):
if attr in media:
setattr(config, '%s_%s' % (protocol, attr), media[attr])
return config
def __ComputeMethodInfo(self, method_description, request, response,
request_field):
"""Compute the base_api.ApiMethodInfo for this method."""
relative_path = self.__names.NormalizeRelativePath(
''.join((self.__client_info.base_path,
method_description['path'])))
method_id = method_description['id']
ordered_params = []
for param_name in method_description.get('parameterOrder', []):
param_info = method_description['parameters'][param_name]
if param_info.get('required', False):
ordered_params.append(param_name)
method_info = base_api.ApiMethodInfo(
relative_path=relative_path,
method_id=method_id,
http_method=method_description['httpMethod'],
description=util.CleanDescription(
method_description.get('description', '')),
query_params=[],
path_params=[],
ordered_params=ordered_params,
request_type_name=self.__names.ClassName(request),
response_type_name=self.__names.ClassName(response),
request_field=request_field,
)
if method_description.get('supportsMediaUpload', False):
method_info.upload_config = self.__ComputeUploadConfig(
method_description.get('mediaUpload'), method_id)
method_info.supports_download = method_description.get(
'supportsMediaDownload', False)
self.__all_scopes.update(method_description.get('scopes', ()))
for param, desc in method_description.get('parameters', {}).items():
param = self.__names.CleanName(param)
location = desc['location']
if location == 'query':
method_info.query_params.append(param)
elif location == 'path':
method_info.path_params.append(param)
else:
raise ValueError(
'Unknown parameter location %s for parameter %s' % (
location, param))
method_info.path_params.sort()
method_info.query_params.sort()
return method_info
def __BodyFieldName(self, body_type):
if body_type is None:
return ''
return self.__names.FieldName(body_type['$ref'])
def __GetRequestType(self, body_type):
return self.__names.ClassName(body_type.get('$ref'))
def __GetRequestField(self, method_description, body_type):
"""Determine the request field for this method."""
body_field_name = self.__BodyFieldName(body_type)
if body_field_name in method_description.get('parameters', {}):
body_field_name = self.__names.FieldName(
'%s_resource' % body_field_name)
# It's exceedingly unlikely that we'd get two name collisions, which
# means it's bound to happen at some point.
while body_field_name in method_description.get('parameters', {}):
body_field_name = self.__names.FieldName(
'%s_body' % body_field_name)
return body_field_name
def AddServiceFromResource(self, service_name, methods):
"""Add a new service named service_name with the given methods."""
method_descriptions = methods.get('methods', {})
method_info_map = collections.OrderedDict()
items = sorted(method_descriptions.items())
for method_name, method_description in items:
method_name = self.__names.MethodName(method_name)
# NOTE: According to the discovery document, if the request or
# response is present, it will simply contain a `$ref`.
body_type = method_description.get('request')
if body_type is None:
request_type = None
else:
request_type = self.__GetRequestType(body_type)
if self.__NeedRequestType(method_description, request_type):
request = self.__CreateRequestType(
method_description, body_type=body_type)
request_field = self.__GetRequestField(
method_description, body_type)
else:
request = request_type
request_field = base_api.REQUEST_IS_BODY
if 'response' in method_description:
response = method_description['response']['$ref']
else:
response = self.__CreateVoidResponseType(method_description)
method_info_map[method_name] = self.__ComputeMethodInfo(
method_description, request, response, request_field)
self.__command_registry.AddCommandForMethod(
service_name, method_name, method_info_map[method_name],
request, response)
nested_services = methods.get('resources', {})
services = sorted(nested_services.items())
for subservice_name, submethods in services:
new_service_name = '%s_%s' % (service_name, subservice_name)
self.AddServiceFromResource(new_service_name, submethods)
self.__RegisterService(service_name, method_info_map)
|
en
| 0.779645
|
#!/usr/bin/env python # # Copyright 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Service registry for apitools. # We're a code generator. I don't care. # pylint:disable=too-many-statements Registry for service types. Print a docstring for a service method. %s' % description) printer() printer('Args:') printer(' request: (%s) input message', method_info.request_type_name) printer(' global_params: (StandardQueryParameters, default: None) ' 'global arguments') if method_info.upload_config: printer(' upload: (Upload, default: None) If present, upload') printer(' this stream with the request.') if method_info.supports_download: printer( ' download: (Download, default: None) If present, download') printer(' data from the request via this stream.') printer('Returns:') printer(' (%s) The response message.', method_info.response_type_name) printer(' Service class for the %s resource. # Print the configs for the methods first. # Now write each method in turn. Write a single service declaration to a proto file. Write the services in this registry to out as proto. Write the services in this registry to out. Generated client library for %s version %s. Generated client library for service %s version %s. Create a new %s handle. Create a request type for this method. Create an empty response type. Determine if this method needs a new request type created. Convert max_size to an int. Fill out the upload config for this method. Compute the base_api.ApiMethodInfo for this method. Determine the request field for this method. # It's exceedingly unlikely that we'd get two name collisions, which # means it's bound to happen at some point. Add a new service named service_name with the given methods. # NOTE: According to the discovery document, if the request or # response is present, it will simply contain a `$ref`.
| 1.862594
| 2
|
mv_gaussian/low_dim_w_five_obs/run_script_snpe_c.py
|
SamuelWiqvist/snpla
| 2
|
6626176
|
# Imports
import sys
import torch
import os
import numpy as np
import time
from torch.distributions.multivariate_normal import MultivariateNormal
from sbi.inference import SNPE_C, prepare_for_sbi
# Initial set up
lunarc = int(sys.argv[1])
dim = int(sys.argv[2])
seed = int(sys.argv[3])
seed_data = int(sys.argv[4])
hp_tuning = int(sys.argv[5]) # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp
# normal run: seed = 1:10, hp_tuning = 0
# hp search: seed = 11, hp_tuning = 1:10
print("Input args:")
print("Dim: " + str(dim))
print("seed: " + str(seed))
print("seed_data: " + str(seed_data))
# Set wd
print(os.getcwd())
# set the wd to the base folder for the project
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev')
sys.path.append('./')
print(os.getcwd())
id_job = str(dim) + '_' + str(seed) + '_' + str(seed_data)
if hp_tuning > 0:
id_job = id_job + "_" + str(hp_tuning)
# Load all utility functions for all methods
import mv_gaussian.low_dim_w_five_obs.functions as func
print(hp_tuning)
print(func.sample_hp("snpe_c", hp_tuning))
print(torch.rand(1))
print(func.sample_hp("snpe_c", hp_tuning)[0].item())
print(torch.rand(1))
# Set model and generate data
x_o, conj_model, analytical_posterior = func.set_up_model(seed)
def simulator(theta):
N_samples = theta.shape[0]
x = torch.zeros(N_samples, conj_model.N, dim)
for i in range(N_samples):
model_tmp = MultivariateNormal(theta[i], conj_model.model.covariance_matrix)
x[i, :, :] = model_tmp.rsample(sample_shape=(conj_model.N,))
# return calc_summary_stats(x), theta #/math.sqrt(5) # div with std of prior to nomarlize data
return func.flatten(x)
# check simulator and prior
simulator, prior = prepare_for_sbi(simulator, conj_model.prior)
# function that builds the network
def build_custom_post_net(batch_theta, batch_x):
flow_lik, flow_post = func.set_up_networks()
return flow_post
inference = SNPE_C(simulator, prior, density_estimator=build_custom_post_net)
learning_rate = 0.0005 # default value
if hp_tuning > 0:
learning_rate = func.sample_hp("snl", hp_tuning)[0].item()
start = time.time()
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
num_rounds = 10
x_o = x_o.flatten()
posteriors = []
proposal = None
for i in range(num_rounds):
posterior = inference(num_simulations=2500, proposal=proposal, max_num_epochs=100, learning_rate=learning_rate)
posteriors.append(posterior)
proposal = posterior.set_default_x(x_o)
end = time.time()
run_time = end - start
print("")
print("Runtime:" + str(round(run_time, 2)))
kl_divs_trained = []
start = time.time()
for i in range(num_rounds):
print(i)
posterior_sample = posteriors[i].sample((1000,), x=x_o)
kl_divs_trained.append(conj_model.kl_div(analytical_posterior, posterior_sample))
if hp_tuning == 0:
np.savetxt('mv_gaussian/low_dim_w_five_obs/data/post_samples_snpec_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
else:
np.savetxt('mv_gaussian/low_dim_w_five_obs/hp_tuning/post_samples_snpec_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
end = time.time()
run_time_inference = (end - start) / num_rounds
# Write results
if hp_tuning == 0:
with open('mv_gaussian/low_dim_w_five_obs/results/snpec_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(num_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
else:
with open('mv_gaussian/low_dim_w_five_obs/hp_tuning/snpec_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % hp_tuning)
f.write('%.6f\n' % learning_rate)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(num_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
|
# Imports
import sys
import torch
import os
import numpy as np
import time
from torch.distributions.multivariate_normal import MultivariateNormal
from sbi.inference import SNPE_C, prepare_for_sbi
# Initial set up
lunarc = int(sys.argv[1])
dim = int(sys.argv[2])
seed = int(sys.argv[3])
seed_data = int(sys.argv[4])
hp_tuning = int(sys.argv[5]) # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp
# normal run: seed = 1:10, hp_tuning = 0
# hp search: seed = 11, hp_tuning = 1:10
print("Input args:")
print("Dim: " + str(dim))
print("seed: " + str(seed))
print("seed_data: " + str(seed_data))
# Set wd
print(os.getcwd())
# set the wd to the base folder for the project
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev')
sys.path.append('./')
print(os.getcwd())
id_job = str(dim) + '_' + str(seed) + '_' + str(seed_data)
if hp_tuning > 0:
id_job = id_job + "_" + str(hp_tuning)
# Load all utility functions for all methods
import mv_gaussian.low_dim_w_five_obs.functions as func
print(hp_tuning)
print(func.sample_hp("snpe_c", hp_tuning))
print(torch.rand(1))
print(func.sample_hp("snpe_c", hp_tuning)[0].item())
print(torch.rand(1))
# Set model and generate data
x_o, conj_model, analytical_posterior = func.set_up_model(seed)
def simulator(theta):
N_samples = theta.shape[0]
x = torch.zeros(N_samples, conj_model.N, dim)
for i in range(N_samples):
model_tmp = MultivariateNormal(theta[i], conj_model.model.covariance_matrix)
x[i, :, :] = model_tmp.rsample(sample_shape=(conj_model.N,))
# return calc_summary_stats(x), theta #/math.sqrt(5) # div with std of prior to nomarlize data
return func.flatten(x)
# check simulator and prior
simulator, prior = prepare_for_sbi(simulator, conj_model.prior)
# function that builds the network
def build_custom_post_net(batch_theta, batch_x):
flow_lik, flow_post = func.set_up_networks()
return flow_post
inference = SNPE_C(simulator, prior, density_estimator=build_custom_post_net)
learning_rate = 0.0005 # default value
if hp_tuning > 0:
learning_rate = func.sample_hp("snl", hp_tuning)[0].item()
start = time.time()
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
num_rounds = 10
x_o = x_o.flatten()
posteriors = []
proposal = None
for i in range(num_rounds):
posterior = inference(num_simulations=2500, proposal=proposal, max_num_epochs=100, learning_rate=learning_rate)
posteriors.append(posterior)
proposal = posterior.set_default_x(x_o)
end = time.time()
run_time = end - start
print("")
print("Runtime:" + str(round(run_time, 2)))
kl_divs_trained = []
start = time.time()
for i in range(num_rounds):
print(i)
posterior_sample = posteriors[i].sample((1000,), x=x_o)
kl_divs_trained.append(conj_model.kl_div(analytical_posterior, posterior_sample))
if hp_tuning == 0:
np.savetxt('mv_gaussian/low_dim_w_five_obs/data/post_samples_snpec_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
else:
np.savetxt('mv_gaussian/low_dim_w_five_obs/hp_tuning/post_samples_snpec_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
end = time.time()
run_time_inference = (end - start) / num_rounds
# Write results
if hp_tuning == 0:
with open('mv_gaussian/low_dim_w_five_obs/results/snpec_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(num_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
else:
with open('mv_gaussian/low_dim_w_five_obs/hp_tuning/snpec_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % hp_tuning)
f.write('%.6f\n' % learning_rate)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(num_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
|
en
| 0.730469
|
# Imports # Initial set up # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp # normal run: seed = 1:10, hp_tuning = 0 # hp search: seed = 11, hp_tuning = 1:10 # Set wd # set the wd to the base folder for the project # Load all utility functions for all methods # Set model and generate data # return calc_summary_stats(x), theta #/math.sqrt(5) # div with std of prior to nomarlize data # check simulator and prior # function that builds the network # default value # Write results
| 1.907861
| 2
|
qm3/actions/neb.py
|
sergio-marti/ren-qm3
| 0
|
6626177
|
import math
import numpy
import typing
def distribute( nodes: int, guess: list ) -> list:
"""
guess (list) MUST contain at least the initial (coor_0) and end (coor_f) coordinate numpy.arrays
"""
delt = []
for i in range( 1, len( guess ) ):
delt.append( numpy.linalg.norm( guess[i] - guess[i-1] ) )
dtot = sum( delt )
npts = [ int( round( delt[i] / dtot * ( nodes + 1 ), 0 ) ) for i in range( len( delt ) ) ]
delt = []
for i in range( 1, len( guess ) ):
delt.append( ( guess[i] - guess[i-1] ) / npts[i-1] )
npts[-1] += 1
coor = []
for i in range( len( guess ) - 1 ):
for n in range( npts[i] ):
coor.append( guess[i] + n * delt[i] )
return( coor )
class serial( object ):
"""
the value of 'kumb' should be approx the same of the potential energy barrier
when optimizing the whole band, set the 'gradient_tolerance' equal to [0.1:0.5] * nodes (_kJ/mol.A)
J. Chem. Phys. v113, p9978 (2000) [10.1063/1.1323224]
"""
def __init__( self, mol: object, guess: list, kumb: float ):
self.mole = mol
self.kumb = kumb
self.sele = numpy.argwhere( mol.actv.ravel() ).ravel()
self.dime = len( self.sele )
self.node = len( guess )
self.natm = self.dime * self.node
self.actv = numpy.ones( ( self.natm, 1 ), dtype=numpy.bool_ )
self.coor = numpy.zeros( ( self.natm, 3 ), dtype=numpy.float64 )
for i in range( self.node ):
ii = i * self.dime
self.coor[ii:ii+self.dime] = guess[i]
def neb_data( self, node: int ):
with open( "node.%02d"%( node ), "wt" ) as f:
f.write( "REMARK func = %20.3lf\n"%( self.mole.func ) )
self.mole.pdb_write( f )
def get_grad( self ):
# ----------------------------------------------------------------------
def __calc_tau( potm, poti, potp, crdm, crdi, crdp ):
dcM = crdp - crdi
dcm = crdi - crdm
dpM = max( math.fabs( potp - poti ), math.fabs( potm - poti ) )
dpm = min( math.fabs( potp - poti ), math.fabs( potm - poti ) )
if( potp > poti and poti > potm ):
tau = dcM.copy()
elif( potp < poti and poti < potm ):
tau = dcm.copy()
else:
if( potp > potm ):
tau = dpM * dcM + dpm * dcm
else:
tau = dpm * dcM + dpM * dcm
tau /= numpy.linalg.norm( tau )
# fum = self.kumb * numpy.sum( ( dcm - dcM ) * tau )
# gum = fum * tau
# return( tau, fum * numpy.sum( ( dcm - dcM ) * tau ), gum )
gum = self.kumb * numpy.sum( ( dcm - dcM ) * tau ) * tau
return( tau, gum )
# ----------------------------------------------------------------------
vpot = []
self.grad = numpy.zeros( ( self.natm, 3 ), dtype=numpy.float64 )
# individuals
for who in range( self.node ):
ii = who * self.dime
self.mole.coor[self.sele] = self.coor[ii:ii+self.dime]
self.mole.get_grad()
self.mole.project_gRT()
vpot.append( self.mole.func )
self.grad[ii:ii+self.dime] = self.mole.grad[self.sele]
self.neb_data( who )
self.func = sum( vpot )
# connections (first and last are fixed references)
for who in range( 1, self.node - 1 ):
ii = who * self.dime
jj = ii + self.dime
# tau, fum, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1],
tau, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1],
self.coor[ii-self.dime:ii],
self.coor[ii:jj],
self.coor[jj:jj+self.dime] )
# self.func += fum * 0.5
self.grad[ii:jj] += gum - numpy.sum( tau * self.grad[ii:jj] ) * tau
class parall( object ):
"""
the value of 'kumb' should be approx the same of the potential energy barrier
when optimizing the whole band, set the 'gradient_tolerance' equal to [0.1:0.5] * nodes (_kJ/mol.A)
J. Chem. Phys. v113, p9978 (2000) [10.1063/1.1323224]
"""
def __init__( self, mol: object, guess: list, kumb: float, chunks: list, opar: object ):
self.mole = mol
self.kumb = kumb
self.sele = numpy.argwhere( mol.actv.ravel() ).ravel()
self.dime = len( self.sele )
self.node = len( guess )
self.chnk = chunks[:]
self.ncpu = len( chunks )
self.opar = opar
self.natm = self.dime * self.node
self.actv = numpy.ones( ( self.natm, 1 ), dtype=numpy.bool_ )
self.coor = numpy.zeros( ( self.natm, 3 ), dtype=numpy.float64 )
for i in range( self.node ):
ii = i * self.dime
self.coor[ii:ii+self.dime] = guess[i]
def neb_data( self, node: int ):
with open( "node.%02d"%( node ), "wt" ) as f:
f.write( "REMARK func = %20.3lf\n"%( self.mole.func ) )
self.mole.pdb_write( f )
def get_grad( self ):
# ----------------------------------------------------------------------
def __calc_tau( potm, poti, potp, crdm, crdi, crdp ):
dcM = crdp - crdi
dcm = crdi - crdm
dpM = max( math.fabs( potp - poti ), math.fabs( potm - poti ) )
dpm = min( math.fabs( potp - poti ), math.fabs( potm - poti ) )
if( potp > poti and poti > potm ):
tau = dcM.copy()
elif( potp < poti and poti < potm ):
tau = dcm.copy()
else:
if( potp > potm ):
tau = dpM * dcM + dpm * dcm
else:
tau = dpm * dcM + dpM * dcm
tau /= numpy.linalg.norm( tau )
# fum = self.kumb * numpy.sum( ( dcm - dcM ) * tau )
# gum = fum * tau
# return( tau, fum * numpy.sum( ( dcm - dcM ) * tau ), gum )
gum = self.kumb * numpy.sum( ( dcm - dcM ) * tau ) * tau
return( tau, gum )
# ----------------------------------------------------------------------
vpot = [ 0.0 for i in range( self.node ) ]
self.grad = numpy.zeros( ( self.natm, 3 ), dtype=numpy.float64 )
# sync coordinates to nodes (chunks)
self.opar.barrier()
for who in range( 1, self.ncpu ):
self.opar.send_i4( who, [ 1 ] )
tmp = []
for itm in self.chnk[who]:
ii = itm * self.dime
tmp += self.coor[ii:ii+self.dime].ravel().tolist()
self.opar.send_r8( who, tmp )
# individuals (own chunk)
for who in self.chnk[0]:
ii = who * self.dime
self.mole.coor[self.sele] = self.coor[ii:ii+self.dime]
self.mole.get_grad()
self.mole.project_gRT()
vpot[who] = self.mole.func
self.grad[ii:ii+self.dime] = self.mole.grad[self.sele]
self.neb_data( who )
# sync function and gradients from nodes (chunks)
self.opar.barrier()
for who in range( 1, self.ncpu ):
siz = len( self.chnk[who] )
fun = self.opar.recv_r8( who, siz )
tmp = siz * self.dime * 3
grd = numpy.array( self.opar.recv_r8( who, tmp ) )
grd.shape = ( self.dime * siz, 3 )
for i in range( len( self.chnk[who] ) ):
vpot[self.chnk[who][i]] = fun[i]
ii = self.chnk[who][i] * self.dime
jj = i * self.dime
self.grad[ii:ii+self.dime] = grd[jj:jj+self.dime]
self.func = sum( vpot )
# connections (first and last are fixed references)
for who in range( 1, self.node - 1 ):
ii = who * self.dime
jj = ii + self.dime
# tau, fum, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1],
tau, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1],
self.coor[ii-self.dime:ii],
self.coor[ii:jj],
self.coor[jj:jj+self.dime] )
# self.func += fum * 0.5
self.grad[ii:jj] += gum - numpy.sum( tau * self.grad[ii:jj] ) * tau
|
import math
import numpy
import typing
def distribute( nodes: int, guess: list ) -> list:
"""
guess (list) MUST contain at least the initial (coor_0) and end (coor_f) coordinate numpy.arrays
"""
delt = []
for i in range( 1, len( guess ) ):
delt.append( numpy.linalg.norm( guess[i] - guess[i-1] ) )
dtot = sum( delt )
npts = [ int( round( delt[i] / dtot * ( nodes + 1 ), 0 ) ) for i in range( len( delt ) ) ]
delt = []
for i in range( 1, len( guess ) ):
delt.append( ( guess[i] - guess[i-1] ) / npts[i-1] )
npts[-1] += 1
coor = []
for i in range( len( guess ) - 1 ):
for n in range( npts[i] ):
coor.append( guess[i] + n * delt[i] )
return( coor )
class serial( object ):
"""
the value of 'kumb' should be approx the same of the potential energy barrier
when optimizing the whole band, set the 'gradient_tolerance' equal to [0.1:0.5] * nodes (_kJ/mol.A)
J. Chem. Phys. v113, p9978 (2000) [10.1063/1.1323224]
"""
def __init__( self, mol: object, guess: list, kumb: float ):
self.mole = mol
self.kumb = kumb
self.sele = numpy.argwhere( mol.actv.ravel() ).ravel()
self.dime = len( self.sele )
self.node = len( guess )
self.natm = self.dime * self.node
self.actv = numpy.ones( ( self.natm, 1 ), dtype=numpy.bool_ )
self.coor = numpy.zeros( ( self.natm, 3 ), dtype=numpy.float64 )
for i in range( self.node ):
ii = i * self.dime
self.coor[ii:ii+self.dime] = guess[i]
def neb_data( self, node: int ):
with open( "node.%02d"%( node ), "wt" ) as f:
f.write( "REMARK func = %20.3lf\n"%( self.mole.func ) )
self.mole.pdb_write( f )
def get_grad( self ):
# ----------------------------------------------------------------------
def __calc_tau( potm, poti, potp, crdm, crdi, crdp ):
dcM = crdp - crdi
dcm = crdi - crdm
dpM = max( math.fabs( potp - poti ), math.fabs( potm - poti ) )
dpm = min( math.fabs( potp - poti ), math.fabs( potm - poti ) )
if( potp > poti and poti > potm ):
tau = dcM.copy()
elif( potp < poti and poti < potm ):
tau = dcm.copy()
else:
if( potp > potm ):
tau = dpM * dcM + dpm * dcm
else:
tau = dpm * dcM + dpM * dcm
tau /= numpy.linalg.norm( tau )
# fum = self.kumb * numpy.sum( ( dcm - dcM ) * tau )
# gum = fum * tau
# return( tau, fum * numpy.sum( ( dcm - dcM ) * tau ), gum )
gum = self.kumb * numpy.sum( ( dcm - dcM ) * tau ) * tau
return( tau, gum )
# ----------------------------------------------------------------------
vpot = []
self.grad = numpy.zeros( ( self.natm, 3 ), dtype=numpy.float64 )
# individuals
for who in range( self.node ):
ii = who * self.dime
self.mole.coor[self.sele] = self.coor[ii:ii+self.dime]
self.mole.get_grad()
self.mole.project_gRT()
vpot.append( self.mole.func )
self.grad[ii:ii+self.dime] = self.mole.grad[self.sele]
self.neb_data( who )
self.func = sum( vpot )
# connections (first and last are fixed references)
for who in range( 1, self.node - 1 ):
ii = who * self.dime
jj = ii + self.dime
# tau, fum, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1],
tau, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1],
self.coor[ii-self.dime:ii],
self.coor[ii:jj],
self.coor[jj:jj+self.dime] )
# self.func += fum * 0.5
self.grad[ii:jj] += gum - numpy.sum( tau * self.grad[ii:jj] ) * tau
class parall( object ):
"""
the value of 'kumb' should be approx the same of the potential energy barrier
when optimizing the whole band, set the 'gradient_tolerance' equal to [0.1:0.5] * nodes (_kJ/mol.A)
J. Chem. Phys. v113, p9978 (2000) [10.1063/1.1323224]
"""
def __init__( self, mol: object, guess: list, kumb: float, chunks: list, opar: object ):
self.mole = mol
self.kumb = kumb
self.sele = numpy.argwhere( mol.actv.ravel() ).ravel()
self.dime = len( self.sele )
self.node = len( guess )
self.chnk = chunks[:]
self.ncpu = len( chunks )
self.opar = opar
self.natm = self.dime * self.node
self.actv = numpy.ones( ( self.natm, 1 ), dtype=numpy.bool_ )
self.coor = numpy.zeros( ( self.natm, 3 ), dtype=numpy.float64 )
for i in range( self.node ):
ii = i * self.dime
self.coor[ii:ii+self.dime] = guess[i]
def neb_data( self, node: int ):
with open( "node.%02d"%( node ), "wt" ) as f:
f.write( "REMARK func = %20.3lf\n"%( self.mole.func ) )
self.mole.pdb_write( f )
def get_grad( self ):
# ----------------------------------------------------------------------
def __calc_tau( potm, poti, potp, crdm, crdi, crdp ):
dcM = crdp - crdi
dcm = crdi - crdm
dpM = max( math.fabs( potp - poti ), math.fabs( potm - poti ) )
dpm = min( math.fabs( potp - poti ), math.fabs( potm - poti ) )
if( potp > poti and poti > potm ):
tau = dcM.copy()
elif( potp < poti and poti < potm ):
tau = dcm.copy()
else:
if( potp > potm ):
tau = dpM * dcM + dpm * dcm
else:
tau = dpm * dcM + dpM * dcm
tau /= numpy.linalg.norm( tau )
# fum = self.kumb * numpy.sum( ( dcm - dcM ) * tau )
# gum = fum * tau
# return( tau, fum * numpy.sum( ( dcm - dcM ) * tau ), gum )
gum = self.kumb * numpy.sum( ( dcm - dcM ) * tau ) * tau
return( tau, gum )
# ----------------------------------------------------------------------
vpot = [ 0.0 for i in range( self.node ) ]
self.grad = numpy.zeros( ( self.natm, 3 ), dtype=numpy.float64 )
# sync coordinates to nodes (chunks)
self.opar.barrier()
for who in range( 1, self.ncpu ):
self.opar.send_i4( who, [ 1 ] )
tmp = []
for itm in self.chnk[who]:
ii = itm * self.dime
tmp += self.coor[ii:ii+self.dime].ravel().tolist()
self.opar.send_r8( who, tmp )
# individuals (own chunk)
for who in self.chnk[0]:
ii = who * self.dime
self.mole.coor[self.sele] = self.coor[ii:ii+self.dime]
self.mole.get_grad()
self.mole.project_gRT()
vpot[who] = self.mole.func
self.grad[ii:ii+self.dime] = self.mole.grad[self.sele]
self.neb_data( who )
# sync function and gradients from nodes (chunks)
self.opar.barrier()
for who in range( 1, self.ncpu ):
siz = len( self.chnk[who] )
fun = self.opar.recv_r8( who, siz )
tmp = siz * self.dime * 3
grd = numpy.array( self.opar.recv_r8( who, tmp ) )
grd.shape = ( self.dime * siz, 3 )
for i in range( len( self.chnk[who] ) ):
vpot[self.chnk[who][i]] = fun[i]
ii = self.chnk[who][i] * self.dime
jj = i * self.dime
self.grad[ii:ii+self.dime] = grd[jj:jj+self.dime]
self.func = sum( vpot )
# connections (first and last are fixed references)
for who in range( 1, self.node - 1 ):
ii = who * self.dime
jj = ii + self.dime
# tau, fum, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1],
tau, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1],
self.coor[ii-self.dime:ii],
self.coor[ii:jj],
self.coor[jj:jj+self.dime] )
# self.func += fum * 0.5
self.grad[ii:jj] += gum - numpy.sum( tau * self.grad[ii:jj] ) * tau
|
en
| 0.403737
|
guess (list) MUST contain at least the initial (coor_0) and end (coor_f) coordinate numpy.arrays the value of 'kumb' should be approx the same of the potential energy barrier when optimizing the whole band, set the 'gradient_tolerance' equal to [0.1:0.5] * nodes (_kJ/mol.A) J. Chem. Phys. v113, p9978 (2000) [10.1063/1.1323224] # ---------------------------------------------------------------------- # fum = self.kumb * numpy.sum( ( dcm - dcM ) * tau ) # gum = fum * tau # return( tau, fum * numpy.sum( ( dcm - dcM ) * tau ), gum ) # ---------------------------------------------------------------------- # individuals # connections (first and last are fixed references) # tau, fum, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1], # self.func += fum * 0.5 the value of 'kumb' should be approx the same of the potential energy barrier when optimizing the whole band, set the 'gradient_tolerance' equal to [0.1:0.5] * nodes (_kJ/mol.A) J. Chem. Phys. v113, p9978 (2000) [10.1063/1.1323224] # ---------------------------------------------------------------------- # fum = self.kumb * numpy.sum( ( dcm - dcM ) * tau ) # gum = fum * tau # return( tau, fum * numpy.sum( ( dcm - dcM ) * tau ), gum ) # ---------------------------------------------------------------------- # sync coordinates to nodes (chunks) # individuals (own chunk) # sync function and gradients from nodes (chunks) # connections (first and last are fixed references) # tau, fum, gum = __calc_tau( vpot[who-1], vpot[who], vpot[who+1], # self.func += fum * 0.5
| 2.988815
| 3
|
gruvii.py
|
smthnspcl/gruvii
| 0
|
6626178
|
#!/usr/bin/python3
from sys import argv
from os.path import isdir, exists
from os import listdir, makedirs, system
from pipes import quote
import numpy as np
import scipy.io.wavfile as wav
import tensorflow as tf
class Configuration(object):
dataset_directory = None
model_iterations = None
sampling_frequency = None
clip_length = None
hidden_dimensions = None
epochs = None
def __init__(self):
self.dataset_directory = "./dataset/test/"
self.model_iterations = 50
self.sampling_frequency = 44100
self.clip_length = 10
self.hidden_dimensions = 1024
self.batch_size = 5
self.epochs = 25
@staticmethod
def help():
print("usage: gruvii.py {arguments}")
print("{arguments}\t\t\t\t{default value}")
print("\t--help")
print("\t-d\t--dataset-directory\t./dataset/test/")
print("\t-i\t--iterations\t\t50")
print("\t-s\t--sampling-frequency\t44100")
print("\t-c\t--clip-length\t\t10")
print("\t-h\t--hidden-dimensions\t1024")
print("\t-b\t--batch-size\t\t5")
print("\t-e\t--epochs\t\t25")
exit()
@staticmethod
def parse():
c = Configuration()
i = 0
while i < len(argv):
a = argv[i]
if a in ["--help"]:
Configuration.help()
elif a in ["-d", "--dataset-directory"]:
c.dataset_directory = argv[i + 1]
elif a in ["-i", "--iterations"]:
c.model_iterations = int(argv[i + 1])
elif a in ["-s", "--sampling-frequency"]:
c.sampling_frequency = int(argv[i + 1])
elif a in ["-c", "--clip-length"]:
c.clip_length = int(argv[i + 1])
elif a in ["-h", "--hidden-dimensions"]:
c.hidden_dimensions = int(argv[i + 1])
elif a in ["-b", "--batch-size"]:
c.batch_size = int(argv[i + 1])
elif a in ["-e", "--epochs"]:
c.epochs = int(argv[i + 1])
i += 1
return c
class Trainer(object):
config = None
block_size = None
max_seq_length = None
def __init__(self, config):
self.config = config
self._calc()
def _calc(self):
self.block_size = self.config.sampling_frequency / 4
self.max_seq_length = int(round((self.config.sampling_frequency * self.config.clip_length) / self.block_size))
def prepare_data(self):
print("preparing data")
nd = self.convert_folder_to_wav(self.config.dataset_directory, self.config.sampling_frequency)
print("wrote waves to", nd)
if self.config.dataset_directory.endswith("/"):
of = self.config.dataset_directory.split("/")[-2]
else:
of = self.config.dataset_directory.split("/")[-1]
print("output file prefix:", of)
self.convert_wav_files_to_nptensor(nd, self.block_size, self.max_seq_length, of)
return of
@staticmethod
def convert_folder_to_wav(directory, sample_rate=44100):
od = directory + "wave/"
if isdir(od):
return od
for file in listdir(directory):
full_filename = directory + file
if file.endswith('.mp3'):
Trainer.convert_mp3_to_wav(filename=full_filename, sample_frequency=sample_rate)
if file.endswith('.flac'):
Trainer.convert_flac_to_wav(filename=full_filename, sample_frequency=sample_rate)
return od
@staticmethod
def convert_flac_to_wav(filename, sample_frequency):
new_path, tmp_path, orig_filename = Trainer.filter_ext(".flac", filename)
new_path += 'wave'
if not exists(new_path):
makedirs(new_path)
new_name = new_path + '/' + orig_filename + '.wav'
cmd = 'sox {0} {1} channels 1 rate {2}'.format(quote(filename), quote(new_name), sample_frequency)
system(cmd)
return new_name
@staticmethod
def filter_ext(ext, filename):
ext = filename[-len(ext):]
if ext != ext:
return
files = filename.split('/')
orig_filename = files[-1][0:-len(ext)]
new_path = ''
if filename[0] == '/':
new_path = '/'
for i in range(len(files) - 1):
new_path += files[i] + '/'
tmp_path = new_path + 'tmp'
new_path += 'wave'
return new_path, tmp_path, orig_filename
@staticmethod
def convert_mp3_to_wav(filename, sample_frequency):
new_path, tmp_path, orig_filename = Trainer.filter_ext(".mp3", filename)
if not exists(new_path):
makedirs(new_path)
if not exists(tmp_path):
makedirs(tmp_path)
filename_tmp = tmp_path + '/' + orig_filename + '.mp3'
new_name = new_path + '/' + orig_filename + '.wav'
sample_freq_str = "{0:.1f}".format(float(sample_frequency) / 1000.0)
cmd = 'lame -a -m m {0} {1}'.format(quote(filename), quote(filename_tmp))
system(cmd)
cmd = 'lame --decode {0} {1} --resample {2}'.format(quote(filename_tmp), quote(new_name), sample_freq_str)
system(cmd)
return new_name
@staticmethod
def read_wav_as_np(filename):
data = wav.read(filename)
np_arr = data[1].astype('float32') / 32767.0 # Normalize 16-bit input to [-1, 1] range
np_arr = np.array(np_arr)
return np_arr, data[0]
@staticmethod
def convert_np_audio_to_sample_blocks(song_np, block_size):
song_np = song_np.astype('int')
block_lists = []
total_samples = song_np.shape[0]
num_samples_so_far = 0
while num_samples_so_far < total_samples:
block = song_np[num_samples_so_far:num_samples_so_far + int(block_size)]
if block.shape[0] < block_size:
padding = np.zeros((int(block_size) - block.shape[0]))
block = np.concatenate((block, padding))
block_lists.append(block)
num_samples_so_far += block_size
num_samples_so_far = int(num_samples_so_far)
return block_lists
@staticmethod
def time_blocks_to_fft_blocks(blocks_time_domain):
fft_blocks = []
for block in blocks_time_domain:
fft_block = np.fft.fft(block)
new_block = np.concatenate((np.real(fft_block), np.imag(fft_block)))
fft_blocks.append(new_block)
return fft_blocks
@staticmethod
def load_training_example(filename, block_size=2048, use_time_domain=False):
data, bitrate = Trainer.read_wav_as_np(filename)
x_t = Trainer.convert_np_audio_to_sample_blocks(data, block_size)
y_t = x_t[1:]
y_t.append(np.zeros(int(block_size))) # Add special end block composed of all zeros
if use_time_domain:
return x_t, y_t
x = Trainer.time_blocks_to_fft_blocks(x_t)
y = Trainer.time_blocks_to_fft_blocks(y_t)
return x, y
@staticmethod
def convert_wav_files_to_nptensor(directory, block_size, max_seq_len, out_file, max_files=20,
use_time_domain=False):
files = []
for file in listdir(directory):
if file.endswith('.wav'):
files.append(directory + file)
print("converting", files, "to nptensors")
chunks_x = []
chunks_y = []
num_files = len(files)
if num_files > max_files:
num_files = max_files
for file_idx in range(num_files):
file = files[file_idx]
print('Processing: ', (file_idx + 1), '/', num_files)
print('Filename: ', file)
x, y = Trainer.load_training_example(file, block_size, use_time_domain=use_time_domain)
cur_seq = 0
total_seq = len(x)
print("total_seq:", total_seq, "max_seq_len:", max_seq_len)
while cur_seq + max_seq_len < total_seq:
chunks_x.append(x[cur_seq:cur_seq + max_seq_len])
chunks_y.append(y[cur_seq:cur_seq + max_seq_len])
cur_seq += max_seq_len
num_examples = len(chunks_x)
num_dims_out = block_size * 2
if use_time_domain:
num_dims_out = block_size
out_shape = (num_examples, max_seq_len, int(num_dims_out))
x_data = np.zeros(out_shape, "i")
y_data = np.zeros(out_shape, "i")
for n in range(num_examples):
for i in range(max_seq_len):
x_data[n][i] = chunks_x[n][i]
y_data[n][i] = chunks_y[n][i]
print('Saved example ', (n + 1), ' / ', num_examples)
print('Flushing to disk...')
mean_x = np.mean(np.mean(x_data, axis=0), axis=0) # Mean across num examples and num timesteps
std_x = np.sqrt(np.mean(np.mean(np.abs(x_data - mean_x) ** 2, axis=0), axis=0))
std_x = np.maximum(1.0e-8, std_x) # Clamp variance if too tiny
x_data[:][:] = (x_data[:][:] - mean_x) # Mean 0
x_data[:][:] = (x_data[:][:] / std_x) # Variance 1
y_data[:][:] = (y_data[:][:] - mean_x) # Mean 0
y_data[:][:] = (y_data[:][:] / std_x) # Variance 1
np.save(out_file + '_mean', mean_x)
np.save(out_file + '_var', std_x)
np.save(out_file + '_x', x_data)
np.save(out_file + '_y', y_data)
print('Done!')
def train(self, prefix):
print("loading training data")
x_t = np.load(prefix + "_x.npy")
y_t = np.load(prefix + "_y.npy")
print("loaded training data")
frq_space_dims = x_t.shape[2]
print("got", frq_space_dims, "frequency dimensions")
print("building model")
model = tf.keras.models.Sequential([
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(frq_space_dims)),
tf.keras.layers.LSTM(self.config.hidden_dimensions, return_sequences=True),
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(frq_space_dims))
])
print("compiling model")
model.compile(loss="mean_squared_error", optimizer="rmsprop")
i = 0
while i < self.config.model_iterations:
print("iteration:", i)
model.fit(x_t, y_t, self.config.batch_size, self.config.epochs)
i += self.config.epochs
model.save_weights(prefix + str(i))
if __name__ == '__main__':
cfg = Configuration.parse()
print("config:", cfg.__dict__)
t = Trainer(cfg)
npy_prefix = t.prepare_data()
t.train(npy_prefix)
|
#!/usr/bin/python3
from sys import argv
from os.path import isdir, exists
from os import listdir, makedirs, system
from pipes import quote
import numpy as np
import scipy.io.wavfile as wav
import tensorflow as tf
class Configuration(object):
dataset_directory = None
model_iterations = None
sampling_frequency = None
clip_length = None
hidden_dimensions = None
epochs = None
def __init__(self):
self.dataset_directory = "./dataset/test/"
self.model_iterations = 50
self.sampling_frequency = 44100
self.clip_length = 10
self.hidden_dimensions = 1024
self.batch_size = 5
self.epochs = 25
@staticmethod
def help():
print("usage: gruvii.py {arguments}")
print("{arguments}\t\t\t\t{default value}")
print("\t--help")
print("\t-d\t--dataset-directory\t./dataset/test/")
print("\t-i\t--iterations\t\t50")
print("\t-s\t--sampling-frequency\t44100")
print("\t-c\t--clip-length\t\t10")
print("\t-h\t--hidden-dimensions\t1024")
print("\t-b\t--batch-size\t\t5")
print("\t-e\t--epochs\t\t25")
exit()
@staticmethod
def parse():
c = Configuration()
i = 0
while i < len(argv):
a = argv[i]
if a in ["--help"]:
Configuration.help()
elif a in ["-d", "--dataset-directory"]:
c.dataset_directory = argv[i + 1]
elif a in ["-i", "--iterations"]:
c.model_iterations = int(argv[i + 1])
elif a in ["-s", "--sampling-frequency"]:
c.sampling_frequency = int(argv[i + 1])
elif a in ["-c", "--clip-length"]:
c.clip_length = int(argv[i + 1])
elif a in ["-h", "--hidden-dimensions"]:
c.hidden_dimensions = int(argv[i + 1])
elif a in ["-b", "--batch-size"]:
c.batch_size = int(argv[i + 1])
elif a in ["-e", "--epochs"]:
c.epochs = int(argv[i + 1])
i += 1
return c
class Trainer(object):
config = None
block_size = None
max_seq_length = None
def __init__(self, config):
self.config = config
self._calc()
def _calc(self):
self.block_size = self.config.sampling_frequency / 4
self.max_seq_length = int(round((self.config.sampling_frequency * self.config.clip_length) / self.block_size))
def prepare_data(self):
print("preparing data")
nd = self.convert_folder_to_wav(self.config.dataset_directory, self.config.sampling_frequency)
print("wrote waves to", nd)
if self.config.dataset_directory.endswith("/"):
of = self.config.dataset_directory.split("/")[-2]
else:
of = self.config.dataset_directory.split("/")[-1]
print("output file prefix:", of)
self.convert_wav_files_to_nptensor(nd, self.block_size, self.max_seq_length, of)
return of
@staticmethod
def convert_folder_to_wav(directory, sample_rate=44100):
od = directory + "wave/"
if isdir(od):
return od
for file in listdir(directory):
full_filename = directory + file
if file.endswith('.mp3'):
Trainer.convert_mp3_to_wav(filename=full_filename, sample_frequency=sample_rate)
if file.endswith('.flac'):
Trainer.convert_flac_to_wav(filename=full_filename, sample_frequency=sample_rate)
return od
@staticmethod
def convert_flac_to_wav(filename, sample_frequency):
new_path, tmp_path, orig_filename = Trainer.filter_ext(".flac", filename)
new_path += 'wave'
if not exists(new_path):
makedirs(new_path)
new_name = new_path + '/' + orig_filename + '.wav'
cmd = 'sox {0} {1} channels 1 rate {2}'.format(quote(filename), quote(new_name), sample_frequency)
system(cmd)
return new_name
@staticmethod
def filter_ext(ext, filename):
ext = filename[-len(ext):]
if ext != ext:
return
files = filename.split('/')
orig_filename = files[-1][0:-len(ext)]
new_path = ''
if filename[0] == '/':
new_path = '/'
for i in range(len(files) - 1):
new_path += files[i] + '/'
tmp_path = new_path + 'tmp'
new_path += 'wave'
return new_path, tmp_path, orig_filename
@staticmethod
def convert_mp3_to_wav(filename, sample_frequency):
new_path, tmp_path, orig_filename = Trainer.filter_ext(".mp3", filename)
if not exists(new_path):
makedirs(new_path)
if not exists(tmp_path):
makedirs(tmp_path)
filename_tmp = tmp_path + '/' + orig_filename + '.mp3'
new_name = new_path + '/' + orig_filename + '.wav'
sample_freq_str = "{0:.1f}".format(float(sample_frequency) / 1000.0)
cmd = 'lame -a -m m {0} {1}'.format(quote(filename), quote(filename_tmp))
system(cmd)
cmd = 'lame --decode {0} {1} --resample {2}'.format(quote(filename_tmp), quote(new_name), sample_freq_str)
system(cmd)
return new_name
@staticmethod
def read_wav_as_np(filename):
data = wav.read(filename)
np_arr = data[1].astype('float32') / 32767.0 # Normalize 16-bit input to [-1, 1] range
np_arr = np.array(np_arr)
return np_arr, data[0]
@staticmethod
def convert_np_audio_to_sample_blocks(song_np, block_size):
song_np = song_np.astype('int')
block_lists = []
total_samples = song_np.shape[0]
num_samples_so_far = 0
while num_samples_so_far < total_samples:
block = song_np[num_samples_so_far:num_samples_so_far + int(block_size)]
if block.shape[0] < block_size:
padding = np.zeros((int(block_size) - block.shape[0]))
block = np.concatenate((block, padding))
block_lists.append(block)
num_samples_so_far += block_size
num_samples_so_far = int(num_samples_so_far)
return block_lists
@staticmethod
def time_blocks_to_fft_blocks(blocks_time_domain):
fft_blocks = []
for block in blocks_time_domain:
fft_block = np.fft.fft(block)
new_block = np.concatenate((np.real(fft_block), np.imag(fft_block)))
fft_blocks.append(new_block)
return fft_blocks
@staticmethod
def load_training_example(filename, block_size=2048, use_time_domain=False):
data, bitrate = Trainer.read_wav_as_np(filename)
x_t = Trainer.convert_np_audio_to_sample_blocks(data, block_size)
y_t = x_t[1:]
y_t.append(np.zeros(int(block_size))) # Add special end block composed of all zeros
if use_time_domain:
return x_t, y_t
x = Trainer.time_blocks_to_fft_blocks(x_t)
y = Trainer.time_blocks_to_fft_blocks(y_t)
return x, y
@staticmethod
def convert_wav_files_to_nptensor(directory, block_size, max_seq_len, out_file, max_files=20,
use_time_domain=False):
files = []
for file in listdir(directory):
if file.endswith('.wav'):
files.append(directory + file)
print("converting", files, "to nptensors")
chunks_x = []
chunks_y = []
num_files = len(files)
if num_files > max_files:
num_files = max_files
for file_idx in range(num_files):
file = files[file_idx]
print('Processing: ', (file_idx + 1), '/', num_files)
print('Filename: ', file)
x, y = Trainer.load_training_example(file, block_size, use_time_domain=use_time_domain)
cur_seq = 0
total_seq = len(x)
print("total_seq:", total_seq, "max_seq_len:", max_seq_len)
while cur_seq + max_seq_len < total_seq:
chunks_x.append(x[cur_seq:cur_seq + max_seq_len])
chunks_y.append(y[cur_seq:cur_seq + max_seq_len])
cur_seq += max_seq_len
num_examples = len(chunks_x)
num_dims_out = block_size * 2
if use_time_domain:
num_dims_out = block_size
out_shape = (num_examples, max_seq_len, int(num_dims_out))
x_data = np.zeros(out_shape, "i")
y_data = np.zeros(out_shape, "i")
for n in range(num_examples):
for i in range(max_seq_len):
x_data[n][i] = chunks_x[n][i]
y_data[n][i] = chunks_y[n][i]
print('Saved example ', (n + 1), ' / ', num_examples)
print('Flushing to disk...')
mean_x = np.mean(np.mean(x_data, axis=0), axis=0) # Mean across num examples and num timesteps
std_x = np.sqrt(np.mean(np.mean(np.abs(x_data - mean_x) ** 2, axis=0), axis=0))
std_x = np.maximum(1.0e-8, std_x) # Clamp variance if too tiny
x_data[:][:] = (x_data[:][:] - mean_x) # Mean 0
x_data[:][:] = (x_data[:][:] / std_x) # Variance 1
y_data[:][:] = (y_data[:][:] - mean_x) # Mean 0
y_data[:][:] = (y_data[:][:] / std_x) # Variance 1
np.save(out_file + '_mean', mean_x)
np.save(out_file + '_var', std_x)
np.save(out_file + '_x', x_data)
np.save(out_file + '_y', y_data)
print('Done!')
def train(self, prefix):
print("loading training data")
x_t = np.load(prefix + "_x.npy")
y_t = np.load(prefix + "_y.npy")
print("loaded training data")
frq_space_dims = x_t.shape[2]
print("got", frq_space_dims, "frequency dimensions")
print("building model")
model = tf.keras.models.Sequential([
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(frq_space_dims)),
tf.keras.layers.LSTM(self.config.hidden_dimensions, return_sequences=True),
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(frq_space_dims))
])
print("compiling model")
model.compile(loss="mean_squared_error", optimizer="rmsprop")
i = 0
while i < self.config.model_iterations:
print("iteration:", i)
model.fit(x_t, y_t, self.config.batch_size, self.config.epochs)
i += self.config.epochs
model.save_weights(prefix + str(i))
if __name__ == '__main__':
cfg = Configuration.parse()
print("config:", cfg.__dict__)
t = Trainer(cfg)
npy_prefix = t.prepare_data()
t.train(npy_prefix)
|
en
| 0.64777
|
#!/usr/bin/python3 # Normalize 16-bit input to [-1, 1] range # Add special end block composed of all zeros # Mean across num examples and num timesteps # Clamp variance if too tiny # Mean 0 # Variance 1 # Mean 0 # Variance 1
| 2.556235
| 3
|
ccdl/tests/test_ccdl.py
|
xabgesagtx/ccdl
| 1
|
6626179
|
<reponame>xabgesagtx/ccdl
from unittest import TestCase
from ccdl import CcDownload
class TestCcDownload(TestCase):
def test_get_new_filename(self):
ccDl = CcDownload()
self.assertEqual("test.S02E086.mp4",ccDl.get_new_filename("test","http://www.cc.com/full-episodes/ijy227/the-nightly-show-with-larry-wilmore-april-5--2016---bill-nye-season-2-ep-02086"))
self.assertEqual("test.S1E002.mp4",ccDl.get_new_filename("test","http://www.cc.com/episodes/i6r4tc/the-opposition-with-jordan-klepper-september-26--2017---neal-katyal-season-1-ep-1002"))
self.assertEqual("test2.S21E086.mp4",ccDl.get_new_filename("test2","http://www.cc.com/full-episodes/xzj2nq/the-daily-show-with-trevor-noah-april-5--2016---jerrod-carmichael-season-21-ep-21086"))
self.assertEqual("test2.S21E086.mp4",ccDl.get_new_filename("test2","http://www.cc.com/full-episodes/xzj2nq/the-daily-show-with-trevor-noah-april-5--2016---jerrod-carmichael-season-21-ep-21086/"))
|
from unittest import TestCase
from ccdl import CcDownload
class TestCcDownload(TestCase):
def test_get_new_filename(self):
ccDl = CcDownload()
self.assertEqual("test.S02E086.mp4",ccDl.get_new_filename("test","http://www.cc.com/full-episodes/ijy227/the-nightly-show-with-larry-wilmore-april-5--2016---bill-nye-season-2-ep-02086"))
self.assertEqual("test.S1E002.mp4",ccDl.get_new_filename("test","http://www.cc.com/episodes/i6r4tc/the-opposition-with-jordan-klepper-september-26--2017---neal-katyal-season-1-ep-1002"))
self.assertEqual("test2.S21E086.mp4",ccDl.get_new_filename("test2","http://www.cc.com/full-episodes/xzj2nq/the-daily-show-with-trevor-noah-april-5--2016---jerrod-carmichael-season-21-ep-21086"))
self.assertEqual("test2.S21E086.mp4",ccDl.get_new_filename("test2","http://www.cc.com/full-episodes/xzj2nq/the-daily-show-with-trevor-noah-april-5--2016---jerrod-carmichael-season-21-ep-21086/"))
|
none
| 1
| 2.748636
| 3
|
|
Modules/01_calculate_logarithm.py
|
MihailMarkovski/Python-Advanced-2020
| 4
|
6626180
|
<reponame>MihailMarkovski/Python-Advanced-2020<filename>Modules/01_calculate_logarithm.py
from math import log
number = int(input())
base = input()
if base == 'natural':
print(f'{log(number):.2f}')
else:
print(f'{log(number, float(base)):.2f}')
|
from math import log
number = int(input())
base = input()
if base == 'natural':
print(f'{log(number):.2f}')
else:
print(f'{log(number, float(base)):.2f}')
|
none
| 1
| 3.785071
| 4
|
|
deauth.py
|
nipatiitti/deauth_beast
| 0
|
6626181
|
#!/usr/bin/env python
import sys
if len(sys.argv) != 3:
print ("Miten: ./scapy-deauth.py bssid aika")
print ("BSSIDS: airport -s")
sys.exit(1)
from scapy import all
from scapy.all import *
# conf.verb = 0 # Silence scapy
conf.iface = "en0"
bssid = sys.argv[1]
time = int(sys.argv[2])
clients = []
'''
packet = RadioTap()/Dot11(type=0,subtype=12,addr1=client,addr2=bssid,addr3=bssid)/Dot11Deauth(reason=7)
for n in range(int(count)):
sendp(packet)
print ('Deauth lahetetty: ' + conf.iface + ' to BSSID: ' + bssid + ' for Client: ' + client)
'''
def cb(p):
if p.haslayer(Dot11):
print("here")
if p.addr1 and p.addr2:
if bssid.lower() == p.addr1.lower():
#if p.type in [1, 2]:
if p.addr2 not in clients and p.addr2 != '':
clients.append(p.addr2)
print(p.addr2)
sniff(iface=conf.iface, prn=cb, timeout = time)
|
#!/usr/bin/env python
import sys
if len(sys.argv) != 3:
print ("Miten: ./scapy-deauth.py bssid aika")
print ("BSSIDS: airport -s")
sys.exit(1)
from scapy import all
from scapy.all import *
# conf.verb = 0 # Silence scapy
conf.iface = "en0"
bssid = sys.argv[1]
time = int(sys.argv[2])
clients = []
'''
packet = RadioTap()/Dot11(type=0,subtype=12,addr1=client,addr2=bssid,addr3=bssid)/Dot11Deauth(reason=7)
for n in range(int(count)):
sendp(packet)
print ('Deauth lahetetty: ' + conf.iface + ' to BSSID: ' + bssid + ' for Client: ' + client)
'''
def cb(p):
if p.haslayer(Dot11):
print("here")
if p.addr1 and p.addr2:
if bssid.lower() == p.addr1.lower():
#if p.type in [1, 2]:
if p.addr2 not in clients and p.addr2 != '':
clients.append(p.addr2)
print(p.addr2)
sniff(iface=conf.iface, prn=cb, timeout = time)
|
en
| 0.484063
|
#!/usr/bin/env python # conf.verb = 0 # Silence scapy packet = RadioTap()/Dot11(type=0,subtype=12,addr1=client,addr2=bssid,addr3=bssid)/Dot11Deauth(reason=7) for n in range(int(count)): sendp(packet) print ('Deauth lahetetty: ' + conf.iface + ' to BSSID: ' + bssid + ' for Client: ' + client) #if p.type in [1, 2]:
| 2.64344
| 3
|
data-models/python-datawrangling/src/gda/datawrangling/test_data_web_spidering.py
|
zhoujiagen/giant-data-analysis
| 2
|
6626182
|
# -*- coding: utf-8 -*-
"""
网络爬虫.
"""
import unittest
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
# use Chrome Dev Tools to extract element's XPath
# run with
# $ bin/scrapy runspider ../src/gda/datawrangling/test_data_web_spidering.py
class PythonPackageItem(scrapy.Item):
package_name = scrapy.Field()
version_number = scrapy.Field()
package_downloads = scrapy.Field()
package_page = scrapy.Field()
package_short_description = scrapy.Field()
home_page = scrapy.Field()
python_versions = scrapy.Field()
last_month_downloads = scrapy.Field()
class PythonPackageSpider(CrawlSpider):
name = "python-package"
allowed_domains = ['pypi.python.org']
start_urls = [
'https://pypi.org/project/scrapely/', ]
rules = (
Rule(LinkExtractor(
allow=['/pypi/[\w-]+/[\d\.]+', ],
restrict_xpaths=['//table/tr/td', ], ),
follow=True,
callback='parse_package', ),)
def grab_data(self, response, xpath_sel):
data = response.xpath(xpath_sel).extract()
if len(data) > 1:
return data
elif len(data) == 1:
if data[0].isdigit():
return int(data[0])
return data[0]
def parse(self, response):
item = PythonPackageItem()
item['package_page'] = response.url
# response.xpath('//div[@class="section"]/h1/text()').extract()
item['package_name'] = list(
map(lambda x: x.strip(), response.xpath('//*[@id="content"]/section[1]/div/div[1]/h1/text()').extract()))
item['package_short_description'] = response.xpath('//meta[@name="description"]/@content').extract()
item['home_page'] = response.xpath('//*[@id="content"]/section[3]/div/div/div[1]/div[2]/a/@href').extract()
item['python_versions'] = []
versions = response.xpath('//li/a[contains(text(), ":: Python ::")]/text()').extract()
for v in versions:
version_number = v.split("::")[-1]
item['python_versions'].append(version_number.strip())
item['last_month_downloads'] = response.xpath('//li/text()[contains(., "month")]/../span/text()').extract()
item['package_downloads'] = response.xpath(
'//table/tr/td/span/a[contains(@href,"pypi.python.org")]/@href').extract()
return item
class TestDataWebSpidering(unittest.TestCase):
def test_python_package(self):
spider = PythonPackageSpider()
spider.start_requests()
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
网络爬虫.
"""
import unittest
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
# use Chrome Dev Tools to extract element's XPath
# run with
# $ bin/scrapy runspider ../src/gda/datawrangling/test_data_web_spidering.py
class PythonPackageItem(scrapy.Item):
package_name = scrapy.Field()
version_number = scrapy.Field()
package_downloads = scrapy.Field()
package_page = scrapy.Field()
package_short_description = scrapy.Field()
home_page = scrapy.Field()
python_versions = scrapy.Field()
last_month_downloads = scrapy.Field()
class PythonPackageSpider(CrawlSpider):
name = "python-package"
allowed_domains = ['pypi.python.org']
start_urls = [
'https://pypi.org/project/scrapely/', ]
rules = (
Rule(LinkExtractor(
allow=['/pypi/[\w-]+/[\d\.]+', ],
restrict_xpaths=['//table/tr/td', ], ),
follow=True,
callback='parse_package', ),)
def grab_data(self, response, xpath_sel):
data = response.xpath(xpath_sel).extract()
if len(data) > 1:
return data
elif len(data) == 1:
if data[0].isdigit():
return int(data[0])
return data[0]
def parse(self, response):
item = PythonPackageItem()
item['package_page'] = response.url
# response.xpath('//div[@class="section"]/h1/text()').extract()
item['package_name'] = list(
map(lambda x: x.strip(), response.xpath('//*[@id="content"]/section[1]/div/div[1]/h1/text()').extract()))
item['package_short_description'] = response.xpath('//meta[@name="description"]/@content').extract()
item['home_page'] = response.xpath('//*[@id="content"]/section[3]/div/div/div[1]/div[2]/a/@href').extract()
item['python_versions'] = []
versions = response.xpath('//li/a[contains(text(), ":: Python ::")]/text()').extract()
for v in versions:
version_number = v.split("::")[-1]
item['python_versions'].append(version_number.strip())
item['last_month_downloads'] = response.xpath('//li/text()[contains(., "month")]/../span/text()').extract()
item['package_downloads'] = response.xpath(
'//table/tr/td/span/a[contains(@href,"pypi.python.org")]/@href').extract()
return item
class TestDataWebSpidering(unittest.TestCase):
def test_python_package(self):
spider = PythonPackageSpider()
spider.start_requests()
if __name__ == '__main__':
unittest.main()
|
en
| 0.364557
|
# -*- coding: utf-8 -*- 网络爬虫. # use Chrome Dev Tools to extract element's XPath # run with # $ bin/scrapy runspider ../src/gda/datawrangling/test_data_web_spidering.py # response.xpath('//div[@class="section"]/h1/text()').extract()
| 2.655295
| 3
|
electrum_plcu/gui/kivy/uix/dialogs/invoices.py
|
plc-ultima/electrum-plcu
| 0
|
6626183
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from decimal import Decimal
Builder.load_string('''
<InvoicesLabel@Label>
#color: .305, .309, .309, 1
text_size: self.width, None
halign: 'left'
valign: 'top'
<InvoiceItem@CardItem>
requestor: ''
memo: ''
amount: ''
status: ''
date: ''
icon: 'atlas://electrum_plcu/gui/kivy/theming/light/important'
Image:
id: icon
source: root.icon
size_hint: None, 1
width: self.height *.54
mipmap: True
BoxLayout:
spacing: '8dp'
height: '32dp'
orientation: 'vertical'
Widget
InvoicesLabel:
text: root.requestor
shorten: True
Widget
InvoicesLabel:
text: root.memo
color: .699, .699, .699, 1
font_size: '13sp'
shorten: True
Widget
BoxLayout:
spacing: '8dp'
height: '32dp'
orientation: 'vertical'
Widget
InvoicesLabel:
text: root.amount
font_size: '15sp'
halign: 'right'
width: '110sp'
Widget
InvoicesLabel:
text: root.status
font_size: '13sp'
halign: 'right'
color: .699, .699, .699, 1
Widget
<InvoicesDialog@Popup>
id: popup
title: _('Invoices')
BoxLayout:
id: box
orientation: 'vertical'
spacing: '1dp'
ScrollView:
GridLayout:
cols: 1
id: invoices_container
size_hint: 1, None
height: self.minimum_height
spacing: '2dp'
padding: '12dp'
''')
from kivy.properties import BooleanProperty
from electrum_plcu.gui.kivy.i18n import _
from electrum_plcu.util import format_time
from electrum_plcu.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum_plcu.gui.kivy.uix.context_menu import ContextMenu
invoice_text = {
PR_UNPAID:_('Pending'),
PR_UNKNOWN:_('Unknown'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
pr_icon = {
PR_UNPAID: 'atlas://electrum_plcu/gui/kivy/theming/light/important',
PR_UNKNOWN: 'atlas://electrum_plcu/gui/kivy/theming/light/important',
PR_PAID: 'atlas://electrum_plcu/gui/kivy/theming/light/confirmed',
PR_EXPIRED: 'atlas://electrum_plcu/gui/kivy/theming/light/close'
}
class InvoicesDialog(Factory.Popup):
def __init__(self, app, screen, callback):
Factory.Popup.__init__(self)
self.app = app
self.screen = screen
self.callback = callback
self.cards = {}
self.context_menu = None
def get_card(self, pr):
key = pr.get_id()
ci = self.cards.get(key)
if ci is None:
ci = Factory.InvoiceItem()
ci.key = key
ci.screen = self
self.cards[key] = ci
ci.requestor = pr.get_requestor()
ci.memo = pr.get_memo()
amount = pr.get_amount()
if amount:
ci.amount = self.app.format_amount_and_units(amount)
status = self.app.wallet.invoices.get_status(ci.key)
ci.status = invoice_text[status]
ci.icon = pr_icon[status]
else:
ci.amount = _('No Amount')
ci.status = ''
exp = pr.get_expiration_date()
ci.date = format_time(exp) if exp else _('Never')
return ci
def update(self):
self.menu_actions = [('Pay', self.do_pay), ('Details', self.do_view), ('Delete', self.do_delete)]
invoices_list = self.ids.invoices_container
invoices_list.clear_widgets()
_list = self.app.wallet.invoices.sorted_list()
for pr in _list:
ci = self.get_card(pr)
invoices_list.add_widget(ci)
def do_pay(self, obj):
self.hide_menu()
self.dismiss()
pr = self.app.wallet.invoices.get(obj.key)
self.app.on_pr(pr)
def do_view(self, obj):
pr = self.app.wallet.invoices.get(obj.key)
pr.verify(self.app.wallet.contacts)
self.app.show_pr_details(pr.get_dict(), obj.status, True)
def do_delete(self, obj):
from .question import Question
def cb(result):
if result:
self.app.wallet.invoices.remove(obj.key)
self.hide_menu()
self.update()
d = Question(_('Delete invoice?'), cb)
d.open()
def show_menu(self, obj):
self.hide_menu()
self.context_menu = ContextMenu(obj, self.menu_actions)
self.ids.box.add_widget(self.context_menu)
def hide_menu(self):
if self.context_menu is not None:
self.ids.box.remove_widget(self.context_menu)
self.context_menu = None
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from decimal import Decimal
Builder.load_string('''
<InvoicesLabel@Label>
#color: .305, .309, .309, 1
text_size: self.width, None
halign: 'left'
valign: 'top'
<InvoiceItem@CardItem>
requestor: ''
memo: ''
amount: ''
status: ''
date: ''
icon: 'atlas://electrum_plcu/gui/kivy/theming/light/important'
Image:
id: icon
source: root.icon
size_hint: None, 1
width: self.height *.54
mipmap: True
BoxLayout:
spacing: '8dp'
height: '32dp'
orientation: 'vertical'
Widget
InvoicesLabel:
text: root.requestor
shorten: True
Widget
InvoicesLabel:
text: root.memo
color: .699, .699, .699, 1
font_size: '13sp'
shorten: True
Widget
BoxLayout:
spacing: '8dp'
height: '32dp'
orientation: 'vertical'
Widget
InvoicesLabel:
text: root.amount
font_size: '15sp'
halign: 'right'
width: '110sp'
Widget
InvoicesLabel:
text: root.status
font_size: '13sp'
halign: 'right'
color: .699, .699, .699, 1
Widget
<InvoicesDialog@Popup>
id: popup
title: _('Invoices')
BoxLayout:
id: box
orientation: 'vertical'
spacing: '1dp'
ScrollView:
GridLayout:
cols: 1
id: invoices_container
size_hint: 1, None
height: self.minimum_height
spacing: '2dp'
padding: '12dp'
''')
from kivy.properties import BooleanProperty
from electrum_plcu.gui.kivy.i18n import _
from electrum_plcu.util import format_time
from electrum_plcu.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum_plcu.gui.kivy.uix.context_menu import ContextMenu
invoice_text = {
PR_UNPAID:_('Pending'),
PR_UNKNOWN:_('Unknown'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
pr_icon = {
PR_UNPAID: 'atlas://electrum_plcu/gui/kivy/theming/light/important',
PR_UNKNOWN: 'atlas://electrum_plcu/gui/kivy/theming/light/important',
PR_PAID: 'atlas://electrum_plcu/gui/kivy/theming/light/confirmed',
PR_EXPIRED: 'atlas://electrum_plcu/gui/kivy/theming/light/close'
}
class InvoicesDialog(Factory.Popup):
def __init__(self, app, screen, callback):
Factory.Popup.__init__(self)
self.app = app
self.screen = screen
self.callback = callback
self.cards = {}
self.context_menu = None
def get_card(self, pr):
key = pr.get_id()
ci = self.cards.get(key)
if ci is None:
ci = Factory.InvoiceItem()
ci.key = key
ci.screen = self
self.cards[key] = ci
ci.requestor = pr.get_requestor()
ci.memo = pr.get_memo()
amount = pr.get_amount()
if amount:
ci.amount = self.app.format_amount_and_units(amount)
status = self.app.wallet.invoices.get_status(ci.key)
ci.status = invoice_text[status]
ci.icon = pr_icon[status]
else:
ci.amount = _('No Amount')
ci.status = ''
exp = pr.get_expiration_date()
ci.date = format_time(exp) if exp else _('Never')
return ci
def update(self):
self.menu_actions = [('Pay', self.do_pay), ('Details', self.do_view), ('Delete', self.do_delete)]
invoices_list = self.ids.invoices_container
invoices_list.clear_widgets()
_list = self.app.wallet.invoices.sorted_list()
for pr in _list:
ci = self.get_card(pr)
invoices_list.add_widget(ci)
def do_pay(self, obj):
self.hide_menu()
self.dismiss()
pr = self.app.wallet.invoices.get(obj.key)
self.app.on_pr(pr)
def do_view(self, obj):
pr = self.app.wallet.invoices.get(obj.key)
pr.verify(self.app.wallet.contacts)
self.app.show_pr_details(pr.get_dict(), obj.status, True)
def do_delete(self, obj):
from .question import Question
def cb(result):
if result:
self.app.wallet.invoices.remove(obj.key)
self.hide_menu()
self.update()
d = Question(_('Delete invoice?'), cb)
d.open()
def show_menu(self, obj):
self.hide_menu()
self.context_menu = ContextMenu(obj, self.menu_actions)
self.ids.box.add_widget(self.context_menu)
def hide_menu(self):
if self.context_menu is not None:
self.ids.box.remove_widget(self.context_menu)
self.context_menu = None
|
en
| 0.494637
|
<InvoicesLabel@Label> #color: .305, .309, .309, 1 text_size: self.width, None halign: 'left' valign: 'top' <InvoiceItem@CardItem> requestor: '' memo: '' amount: '' status: '' date: '' icon: 'atlas://electrum_plcu/gui/kivy/theming/light/important' Image: id: icon source: root.icon size_hint: None, 1 width: self.height *.54 mipmap: True BoxLayout: spacing: '8dp' height: '32dp' orientation: 'vertical' Widget InvoicesLabel: text: root.requestor shorten: True Widget InvoicesLabel: text: root.memo color: .699, .699, .699, 1 font_size: '13sp' shorten: True Widget BoxLayout: spacing: '8dp' height: '32dp' orientation: 'vertical' Widget InvoicesLabel: text: root.amount font_size: '15sp' halign: 'right' width: '110sp' Widget InvoicesLabel: text: root.status font_size: '13sp' halign: 'right' color: .699, .699, .699, 1 Widget <InvoicesDialog@Popup> id: popup title: _('Invoices') BoxLayout: id: box orientation: 'vertical' spacing: '1dp' ScrollView: GridLayout: cols: 1 id: invoices_container size_hint: 1, None height: self.minimum_height spacing: '2dp' padding: '12dp'
| 2.090873
| 2
|
pyqt_image_file_explorer_table_widget/imageLabelWidget.py
|
yjg30737/pyqt-image-file-explorer
| 2
|
6626184
|
import os.path
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget, QLabel, QVBoxLayout
from PyQt5.QtCore import Qt
from pyqt_image_file_explorer_table_widget.imageWidget import ImageWidget
class ImageLabelWidget(QWidget):
def __init__(self, filename='', parent=None):
super().__init__(parent)
self.__absname = filename
self.__initUi(filename)
def __initUi(self, filename=''):
self.__topWidget = ImageWidget()
self.__topWidget.setPixmap(QPixmap(filename))
self.__bottomWidget = QLabel(os.path.basename(filename))
self.__bottomWidget.setAlignment(Qt.AlignCenter)
self.__bottomWidget.setWordWrap(True)
lay = QVBoxLayout()
lay.addWidget(self.__topWidget)
lay.addWidget(self.__bottomWidget)
lay.setContentsMargins(0, 0, 0, 0)
self.setLayout(lay)
def getText(self):
return self.__bottomWidget.text()
def getTextAsAbsName(self):
return self.__absname
def setText(self, text):
self.__bottomWidget.setText(text)
def showTinyImageBigger(self, f: bool):
self.__topWidget.showTinyImageBigger(f)
|
import os.path
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget, QLabel, QVBoxLayout
from PyQt5.QtCore import Qt
from pyqt_image_file_explorer_table_widget.imageWidget import ImageWidget
class ImageLabelWidget(QWidget):
def __init__(self, filename='', parent=None):
super().__init__(parent)
self.__absname = filename
self.__initUi(filename)
def __initUi(self, filename=''):
self.__topWidget = ImageWidget()
self.__topWidget.setPixmap(QPixmap(filename))
self.__bottomWidget = QLabel(os.path.basename(filename))
self.__bottomWidget.setAlignment(Qt.AlignCenter)
self.__bottomWidget.setWordWrap(True)
lay = QVBoxLayout()
lay.addWidget(self.__topWidget)
lay.addWidget(self.__bottomWidget)
lay.setContentsMargins(0, 0, 0, 0)
self.setLayout(lay)
def getText(self):
return self.__bottomWidget.text()
def getTextAsAbsName(self):
return self.__absname
def setText(self, text):
self.__bottomWidget.setText(text)
def showTinyImageBigger(self, f: bool):
self.__topWidget.showTinyImageBigger(f)
|
none
| 1
| 2.601587
| 3
|
|
stardog/http/client.py
|
alimoabd2127/pystardog
| 0
|
6626185
|
<reponame>alimoabd2127/pystardog<gh_stars>0
import requests
import requests.auth
import requests_toolbelt.multipart as multipart
from .. import exceptions as exceptions
class Client(object):
DEFAULT_ENDPOINT = "http://localhost:5820"
DEFAULT_USERNAME = "admin"
DEFAULT_PASSWORD = "<PASSWORD>"
def __init__(
self,
endpoint=None,
database=None,
username=None,
password=None,
session=None,
auth=None,
):
self.url = endpoint if endpoint else self.DEFAULT_ENDPOINT
# XXX this might not be right when the auth object is used. Ideally we could drop storing this
# information with this object but it is used when a store procedure is made as the "creator"
self.username = username if username else self.DEFAULT_USERNAME
if database:
self.url = "{}/{}".format(self.url, database)
if session is None:
self.session = requests.Session()
elif isinstance(session, requests.session.Session):
# allows using e.g. proxy configuration defined explicitly
# besides standard environment variables like http_proxy, https_proxy, no_proxy and curl_ca_bundle
self.session = session
else:
raise TypeError(
f"{type(session)=} must be a valid requests.session.Session object."
)
if auth is None:
auth = requests.auth.HTTPBasicAuth(
self.username, password if password else self.DEFAULT_PASSWORD
)
self.session.auth = auth
def post(self, path, **kwargs):
return self.__wrap(self.session.post(self.url + path, **kwargs))
def put(self, path, **kwargs):
return self.__wrap(self.session.put(self.url + path, **kwargs))
def get(self, path, **kwargs):
return self.__wrap(self.session.get(self.url + path, **kwargs))
def delete(self, path, **kwargs):
return self.__wrap(self.session.delete(self.url + path, **kwargs))
def close(self):
self.session.close()
def __wrap(self, request):
if not request.ok:
try:
msg = request.json()
except ValueError:
# sometimes errors come as strings
msg = {"message": request.text}
raise exceptions.StardogException(
"[{}] {}: {}".format(
request.status_code, msg.get("code", ""), msg.get("message", "")
)
)
return request
def _multipart(self, response):
decoder = multipart.decoder.MultipartDecoder.from_response(response)
return [part.content for part in decoder.parts]
|
import requests
import requests.auth
import requests_toolbelt.multipart as multipart
from .. import exceptions as exceptions
class Client(object):
DEFAULT_ENDPOINT = "http://localhost:5820"
DEFAULT_USERNAME = "admin"
DEFAULT_PASSWORD = "<PASSWORD>"
def __init__(
self,
endpoint=None,
database=None,
username=None,
password=None,
session=None,
auth=None,
):
self.url = endpoint if endpoint else self.DEFAULT_ENDPOINT
# XXX this might not be right when the auth object is used. Ideally we could drop storing this
# information with this object but it is used when a store procedure is made as the "creator"
self.username = username if username else self.DEFAULT_USERNAME
if database:
self.url = "{}/{}".format(self.url, database)
if session is None:
self.session = requests.Session()
elif isinstance(session, requests.session.Session):
# allows using e.g. proxy configuration defined explicitly
# besides standard environment variables like http_proxy, https_proxy, no_proxy and curl_ca_bundle
self.session = session
else:
raise TypeError(
f"{type(session)=} must be a valid requests.session.Session object."
)
if auth is None:
auth = requests.auth.HTTPBasicAuth(
self.username, password if password else self.DEFAULT_PASSWORD
)
self.session.auth = auth
def post(self, path, **kwargs):
return self.__wrap(self.session.post(self.url + path, **kwargs))
def put(self, path, **kwargs):
return self.__wrap(self.session.put(self.url + path, **kwargs))
def get(self, path, **kwargs):
return self.__wrap(self.session.get(self.url + path, **kwargs))
def delete(self, path, **kwargs):
return self.__wrap(self.session.delete(self.url + path, **kwargs))
def close(self):
self.session.close()
def __wrap(self, request):
if not request.ok:
try:
msg = request.json()
except ValueError:
# sometimes errors come as strings
msg = {"message": request.text}
raise exceptions.StardogException(
"[{}] {}: {}".format(
request.status_code, msg.get("code", ""), msg.get("message", "")
)
)
return request
def _multipart(self, response):
decoder = multipart.decoder.MultipartDecoder.from_response(response)
return [part.content for part in decoder.parts]
|
en
| 0.860195
|
# XXX this might not be right when the auth object is used. Ideally we could drop storing this # information with this object but it is used when a store procedure is made as the "creator" # allows using e.g. proxy configuration defined explicitly # besides standard environment variables like http_proxy, https_proxy, no_proxy and curl_ca_bundle # sometimes errors come as strings
| 2.663983
| 3
|
src/trafficSimulator/vehicle_generator.py
|
slack-TU/trafficSimulator
| 156
|
6626186
|
from .vehicle import Vehicle
from numpy.random import randint
class VehicleGenerator:
def __init__(self, sim, config={}):
self.sim = sim
# Set default configurations
self.set_default_config()
# Update configurations
for attr, val in config.items():
setattr(self, attr, val)
# Calculate properties
self.init_properties()
def set_default_config(self):
"""Set default configuration"""
self.vehicle_rate = 20
self.vehicles = [
(1, {})
]
self.last_added_time = 0
def init_properties(self):
self.upcoming_vehicle = self.generate_vehicle()
def generate_vehicle(self):
"""Returns a random vehicle from self.vehicles with random proportions"""
total = sum(pair[0] for pair in self.vehicles)
r = randint(1, total+1)
for (weight, config) in self.vehicles:
r -= weight
if r <= 0:
return Vehicle(config)
def update(self):
"""Add vehicles"""
if self.sim.t - self.last_added_time >= 60 / self.vehicle_rate:
# If time elasped after last added vehicle is
# greater than vehicle_period; generate a vehicle
road = self.sim.roads[self.upcoming_vehicle.path[0]]
if len(road.vehicles) == 0\
or road.vehicles[-1].x > self.upcoming_vehicle.s0 + self.upcoming_vehicle.l:
# If there is space for the generated vehicle; add it
self.upcoming_vehicle.time_added = self.sim.t
road.vehicles.append(self.upcoming_vehicle)
# Reset last_added_time and upcoming_vehicle
self.last_added_time = self.sim.t
self.upcoming_vehicle = self.generate_vehicle()
|
from .vehicle import Vehicle
from numpy.random import randint
class VehicleGenerator:
def __init__(self, sim, config={}):
self.sim = sim
# Set default configurations
self.set_default_config()
# Update configurations
for attr, val in config.items():
setattr(self, attr, val)
# Calculate properties
self.init_properties()
def set_default_config(self):
"""Set default configuration"""
self.vehicle_rate = 20
self.vehicles = [
(1, {})
]
self.last_added_time = 0
def init_properties(self):
self.upcoming_vehicle = self.generate_vehicle()
def generate_vehicle(self):
"""Returns a random vehicle from self.vehicles with random proportions"""
total = sum(pair[0] for pair in self.vehicles)
r = randint(1, total+1)
for (weight, config) in self.vehicles:
r -= weight
if r <= 0:
return Vehicle(config)
def update(self):
"""Add vehicles"""
if self.sim.t - self.last_added_time >= 60 / self.vehicle_rate:
# If time elasped after last added vehicle is
# greater than vehicle_period; generate a vehicle
road = self.sim.roads[self.upcoming_vehicle.path[0]]
if len(road.vehicles) == 0\
or road.vehicles[-1].x > self.upcoming_vehicle.s0 + self.upcoming_vehicle.l:
# If there is space for the generated vehicle; add it
self.upcoming_vehicle.time_added = self.sim.t
road.vehicles.append(self.upcoming_vehicle)
# Reset last_added_time and upcoming_vehicle
self.last_added_time = self.sim.t
self.upcoming_vehicle = self.generate_vehicle()
|
en
| 0.747144
|
# Set default configurations # Update configurations # Calculate properties Set default configuration Returns a random vehicle from self.vehicles with random proportions Add vehicles # If time elasped after last added vehicle is # greater than vehicle_period; generate a vehicle # If there is space for the generated vehicle; add it # Reset last_added_time and upcoming_vehicle
| 3.059097
| 3
|
python/paddle/fluid/tests/unittests/test_save_inference_model_conditional_op.py
|
zmxdream/Paddle
| 8
|
6626187
|
<gh_stars>1-10
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
def getModelOp(model_path):
model_bytes = paddle.static.load_from_file(model_path)
pg = paddle.static.deserialize_program(model_bytes)
main_block = pg.desc.block(0)
size = main_block.op_size()
result = set()
for i in range(0, size):
#print(main_block.op(i).type())
result.add(main_block.op(i).type())
return result
class WhileNet(paddle.nn.Layer):
def __init__(self):
super(WhileNet, self).__init__()
def forward(self, x):
y = paddle.rand(shape=[1, 3, 4, 4])
w1 = paddle.shape(y)[0]
w2 = paddle.shape(x)[0]
while w2 != w1:
x = F.avg_pool2d(x, kernel_size=3, padding=1, stride=2)
w2 = paddle.shape(x)[0]
return x + y
class ForNet(paddle.nn.Layer):
def __init__(self):
super(ForNet, self).__init__()
def forward(self, x):
y = paddle.randint(low=0, high=5, shape=[1], dtype='int32')
z = paddle.randint(low=0, high=5, shape=[1], dtype='int32')
for i in range(0, z):
x = x + i
return x + y
class IfElseNet(paddle.nn.Layer):
def __init__(self):
super(IfElseNet, self).__init__()
def forward(self, x):
y = paddle.to_tensor([5])
if x > y:
x = x + 1
else:
x = x - 1
return x
class TestConditionalOp(unittest.TestCase):
def test_while_op(self):
paddle.disable_static()
net = WhileNet()
net = paddle.jit.to_static(
net,
input_spec=[
paddle.static.InputSpec(
shape=[1, 3, 8, 8], dtype='float32')
])
paddle.jit.save(net, './while_net')
right_pdmodel = set([
"uniform_random", "shape", "slice", "not_equal", "while",
"elementwise_add"
])
paddle.enable_static()
pdmodel = getModelOp("while_net.pdmodel")
#print(len(right_pdmodel.difference(pdmodel)))
self.assertTrue(
len(right_pdmodel.difference(pdmodel)) == 0,
"The while op is pruned by mistake.")
def test_for_op(self):
paddle.disable_static()
net = ForNet()
net = paddle.jit.to_static(
net,
input_spec=[paddle.static.InputSpec(
shape=[1], dtype='int32')])
paddle.jit.save(net, './for_net')
right_pdmodel = set([
"randint", "fill_constant", "cast", "less_than", "while",
"elementwise_add"
])
paddle.enable_static()
pdmodel = getModelOp("for_net.pdmodel")
#print(len(right_pdmodel.difference(pdmodel)))
self.assertTrue(
len(right_pdmodel.difference(pdmodel)) == 0,
"The for op is pruned by mistake.")
def test_if_op(self):
paddle.disable_static()
net = IfElseNet()
net = paddle.jit.to_static(
net,
input_spec=[paddle.static.InputSpec(
shape=[1], dtype='int32')])
paddle.jit.save(net, './if_net')
right_pdmodel = set([
"assign_value", "greater_than", "cast", "conditional_block",
"logical_not", "select_input"
])
paddle.enable_static()
pdmodel = getModelOp("if_net.pdmodel")
#print(len(right_pdmodel.difference(pdmodel)))
self.assertTrue(
len(right_pdmodel.difference(pdmodel)) == 0,
"The if op is pruned by mistake.")
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
def getModelOp(model_path):
model_bytes = paddle.static.load_from_file(model_path)
pg = paddle.static.deserialize_program(model_bytes)
main_block = pg.desc.block(0)
size = main_block.op_size()
result = set()
for i in range(0, size):
#print(main_block.op(i).type())
result.add(main_block.op(i).type())
return result
class WhileNet(paddle.nn.Layer):
def __init__(self):
super(WhileNet, self).__init__()
def forward(self, x):
y = paddle.rand(shape=[1, 3, 4, 4])
w1 = paddle.shape(y)[0]
w2 = paddle.shape(x)[0]
while w2 != w1:
x = F.avg_pool2d(x, kernel_size=3, padding=1, stride=2)
w2 = paddle.shape(x)[0]
return x + y
class ForNet(paddle.nn.Layer):
def __init__(self):
super(ForNet, self).__init__()
def forward(self, x):
y = paddle.randint(low=0, high=5, shape=[1], dtype='int32')
z = paddle.randint(low=0, high=5, shape=[1], dtype='int32')
for i in range(0, z):
x = x + i
return x + y
class IfElseNet(paddle.nn.Layer):
def __init__(self):
super(IfElseNet, self).__init__()
def forward(self, x):
y = paddle.to_tensor([5])
if x > y:
x = x + 1
else:
x = x - 1
return x
class TestConditionalOp(unittest.TestCase):
def test_while_op(self):
paddle.disable_static()
net = WhileNet()
net = paddle.jit.to_static(
net,
input_spec=[
paddle.static.InputSpec(
shape=[1, 3, 8, 8], dtype='float32')
])
paddle.jit.save(net, './while_net')
right_pdmodel = set([
"uniform_random", "shape", "slice", "not_equal", "while",
"elementwise_add"
])
paddle.enable_static()
pdmodel = getModelOp("while_net.pdmodel")
#print(len(right_pdmodel.difference(pdmodel)))
self.assertTrue(
len(right_pdmodel.difference(pdmodel)) == 0,
"The while op is pruned by mistake.")
def test_for_op(self):
paddle.disable_static()
net = ForNet()
net = paddle.jit.to_static(
net,
input_spec=[paddle.static.InputSpec(
shape=[1], dtype='int32')])
paddle.jit.save(net, './for_net')
right_pdmodel = set([
"randint", "fill_constant", "cast", "less_than", "while",
"elementwise_add"
])
paddle.enable_static()
pdmodel = getModelOp("for_net.pdmodel")
#print(len(right_pdmodel.difference(pdmodel)))
self.assertTrue(
len(right_pdmodel.difference(pdmodel)) == 0,
"The for op is pruned by mistake.")
def test_if_op(self):
paddle.disable_static()
net = IfElseNet()
net = paddle.jit.to_static(
net,
input_spec=[paddle.static.InputSpec(
shape=[1], dtype='int32')])
paddle.jit.save(net, './if_net')
right_pdmodel = set([
"assign_value", "greater_than", "cast", "conditional_block",
"logical_not", "select_input"
])
paddle.enable_static()
pdmodel = getModelOp("if_net.pdmodel")
#print(len(right_pdmodel.difference(pdmodel)))
self.assertTrue(
len(right_pdmodel.difference(pdmodel)) == 0,
"The if op is pruned by mistake.")
if __name__ == '__main__':
unittest.main()
|
en
| 0.763452
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #print(main_block.op(i).type()) #print(len(right_pdmodel.difference(pdmodel))) #print(len(right_pdmodel.difference(pdmodel))) #print(len(right_pdmodel.difference(pdmodel)))
| 2.261843
| 2
|
graphs/implementation.py
|
albexl/data-structures-for-teaching
| 2
|
6626188
|
from typing import Dict, Iterator, List, Protocol, TypeVar, Tuple
Location = TypeVar('Location')
class Graph(Protocol):
def neighbors(self, id: Location) -> List[Location]:
pass
class SimpleGraph:
def __init__(self):
self.edges: Dict[Location, List[Location]] = {}
def neighbors(self, id: Location) -> List[Location]:
return self.edges[id]
GridLocation = Tuple[int, int]
class Grid:
def __init__(height: int, width: int):
self.height = height
self.width = width
self.walls: List[GridLocation] = []
def in_bounds(self, id: GridLocation) -> bool:
(x, y) = id
return x >= 0 and x < self.width and y >= 0 and y < self.height
def passable(self, id: GridLocation):
return id not in self.walls
def neighbors(self, id: GridLocation) -> Iterator[GridLocation]:
(x, y) = id
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
neighbors = [(x + dx[i], y + dy[i]) for i in range(len(dx))]
neighbors = filter(self.in_bounds, neighbors)
neighbors = filter(self.passable, neighbors)
return neighbors
|
from typing import Dict, Iterator, List, Protocol, TypeVar, Tuple
Location = TypeVar('Location')
class Graph(Protocol):
def neighbors(self, id: Location) -> List[Location]:
pass
class SimpleGraph:
def __init__(self):
self.edges: Dict[Location, List[Location]] = {}
def neighbors(self, id: Location) -> List[Location]:
return self.edges[id]
GridLocation = Tuple[int, int]
class Grid:
def __init__(height: int, width: int):
self.height = height
self.width = width
self.walls: List[GridLocation] = []
def in_bounds(self, id: GridLocation) -> bool:
(x, y) = id
return x >= 0 and x < self.width and y >= 0 and y < self.height
def passable(self, id: GridLocation):
return id not in self.walls
def neighbors(self, id: GridLocation) -> Iterator[GridLocation]:
(x, y) = id
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
neighbors = [(x + dx[i], y + dy[i]) for i in range(len(dx))]
neighbors = filter(self.in_bounds, neighbors)
neighbors = filter(self.passable, neighbors)
return neighbors
|
none
| 1
| 3.324218
| 3
|
|
math/1093.statistics-from-a-large-sample.py
|
y1zhou/leetcode
| 0
|
6626189
|
<filename>math/1093.statistics-from-a-large-sample.py
#
# @lc app=leetcode id=1093 lang=python3
#
# [1093] Statistics from a Large Sample
#
from typing import List
class Solution:
def sampleStats(self, count: List[int]) -> List[float]:
imin, imax, imode, smean, smedian = -1, 0, 0, 0, 0
sampleSize = 0
for i in range(len(count)):
if imin == -1 and count[i] != 0:
imin = i
if count[i] != 0:
imax = i
smean += i * count[i]
sampleSize += count[i]
if count[i] > count[imode]:
imode = i
if sampleSize % 2 == 0:
midPoint = sampleSize // 2
curPos = 0
for i in range(imin, imax + 1):
smedian += count[i]
if curPos > 0:
smedian = (i + curPos) / 2
break
if smedian > midPoint:
smedian = i
break
elif smedian == midPoint:
curPos = i
else:
midPoint = sampleSize // 2 + 1
for i in range(imin, imax + 1):
smedian += count[i]
if smedian > midPoint:
smedian = i
break
return [
float(imin),
float(imax),
smean / sampleSize,
float(smedian),
float(imode),
]
|
<filename>math/1093.statistics-from-a-large-sample.py
#
# @lc app=leetcode id=1093 lang=python3
#
# [1093] Statistics from a Large Sample
#
from typing import List
class Solution:
def sampleStats(self, count: List[int]) -> List[float]:
imin, imax, imode, smean, smedian = -1, 0, 0, 0, 0
sampleSize = 0
for i in range(len(count)):
if imin == -1 and count[i] != 0:
imin = i
if count[i] != 0:
imax = i
smean += i * count[i]
sampleSize += count[i]
if count[i] > count[imode]:
imode = i
if sampleSize % 2 == 0:
midPoint = sampleSize // 2
curPos = 0
for i in range(imin, imax + 1):
smedian += count[i]
if curPos > 0:
smedian = (i + curPos) / 2
break
if smedian > midPoint:
smedian = i
break
elif smedian == midPoint:
curPos = i
else:
midPoint = sampleSize // 2 + 1
for i in range(imin, imax + 1):
smedian += count[i]
if smedian > midPoint:
smedian = i
break
return [
float(imin),
float(imax),
smean / sampleSize,
float(smedian),
float(imode),
]
|
en
| 0.586579
|
# # @lc app=leetcode id=1093 lang=python3 # # [1093] Statistics from a Large Sample #
| 3.048805
| 3
|
venv/lib/python3.8/site-packages/lark/load_grammar.py
|
YuehanLee/CS190I
| 0
|
6626190
|
"""Parses and creates Grammar objects"""
import hashlib
import os.path
import sys
from collections import namedtuple
from copy import copy, deepcopy
from io import open
import pkgutil
from ast import literal_eval
from numbers import Integral
from .utils import bfs, Py36, logger, classify_bool, is_id_continue, is_id_start, bfs_all_unique
from .lexer import Token, TerminalDef, PatternStr, PatternRE
from .parse_tree_builder import ParseTreeBuilder
from .parser_frontends import ParsingFrontend
from .common import LexerConf, ParserConf
from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol
from .utils import classify, suppress, dedup_list, Str
from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken, ParseError
from .tree import Tree, SlottedTree as ST
from .visitors import Transformer, Visitor, v_args, Transformer_InPlace, Transformer_NonRecursive
inline_args = v_args(inline=True)
__path__ = os.path.dirname(__file__)
IMPORT_PATHS = ['grammars']
EXT = '.lark'
_RE_FLAGS = 'imslux'
_EMPTY = Symbol('__empty__')
_TERMINAL_NAMES = {
'.' : 'DOT',
',' : 'COMMA',
':' : 'COLON',
';' : 'SEMICOLON',
'+' : 'PLUS',
'-' : 'MINUS',
'*' : 'STAR',
'/' : 'SLASH',
'\\' : 'BACKSLASH',
'|' : 'VBAR',
'?' : 'QMARK',
'!' : 'BANG',
'@' : 'AT',
'#' : 'HASH',
'$' : 'DOLLAR',
'%' : 'PERCENT',
'^' : 'CIRCUMFLEX',
'&' : 'AMPERSAND',
'_' : 'UNDERSCORE',
'<' : 'LESSTHAN',
'>' : 'MORETHAN',
'=' : 'EQUAL',
'"' : 'DBLQUOTE',
'\'' : 'QUOTE',
'`' : 'BACKQUOTE',
'~' : 'TILDE',
'(' : 'LPAR',
')' : 'RPAR',
'{' : 'LBRACE',
'}' : 'RBRACE',
'[' : 'LSQB',
']' : 'RSQB',
'\n' : 'NEWLINE',
'\r\n' : 'CRLF',
'\t' : 'TAB',
' ' : 'SPACE',
}
# Grammar Parser
TERMINALS = {
'_LPAR': r'\(',
'_RPAR': r'\)',
'_LBRA': r'\[',
'_RBRA': r'\]',
'_LBRACE': r'\{',
'_RBRACE': r'\}',
'OP': '[+*]|[?](?![a-z])',
'_COLON': ':',
'_COMMA': ',',
'_OR': r'\|',
'_DOT': r'\.(?!\.)',
'_DOTDOT': r'\.\.',
'TILDE': '~',
'RULE': '!?[_?]?[a-z][_a-z0-9]*',
'TERMINAL': '_?[A-Z][_A-Z0-9]*',
'STRING': r'"(\\"|\\\\|[^"\n])*?"i?',
'REGEXP': r'/(?!/)(\\/|\\\\|[^/])*?/[%s]*' % _RE_FLAGS,
'_NL': r'(\r?\n)+\s*',
'WS': r'[ \t]+',
'COMMENT': r'\s*//[^\n]*',
'_TO': '->',
'_IGNORE': r'%ignore',
'_OVERRIDE': r'%override',
'_DECLARE': r'%declare',
'_EXTEND': r'%extend',
'_IMPORT': r'%import',
'NUMBER': r'[+-]?\d+',
}
RULES = {
'start': ['_list'],
'_list': ['_item', '_list _item'],
'_item': ['rule', 'term', 'ignore', 'import', 'declare', 'override', 'extend', '_NL'],
'rule': ['RULE template_params _COLON expansions _NL',
'RULE template_params _DOT NUMBER _COLON expansions _NL'],
'template_params': ['_LBRACE _template_params _RBRACE',
''],
'_template_params': ['RULE',
'_template_params _COMMA RULE'],
'expansions': ['alias',
'expansions _OR alias',
'expansions _NL _OR alias'],
'?alias': ['expansion _TO RULE', 'expansion'],
'expansion': ['_expansion'],
'_expansion': ['', '_expansion expr'],
'?expr': ['atom',
'atom OP',
'atom TILDE NUMBER',
'atom TILDE NUMBER _DOTDOT NUMBER',
],
'?atom': ['_LPAR expansions _RPAR',
'maybe',
'value'],
'value': ['terminal',
'nonterminal',
'literal',
'range',
'template_usage'],
'terminal': ['TERMINAL'],
'nonterminal': ['RULE'],
'?name': ['RULE', 'TERMINAL'],
'maybe': ['_LBRA expansions _RBRA'],
'range': ['STRING _DOTDOT STRING'],
'template_usage': ['RULE _LBRACE _template_args _RBRACE'],
'_template_args': ['value',
'_template_args _COMMA value'],
'term': ['TERMINAL _COLON expansions _NL',
'TERMINAL _DOT NUMBER _COLON expansions _NL'],
'override': ['_OVERRIDE rule',
'_OVERRIDE term'],
'extend': ['_EXTEND rule',
'_EXTEND term'],
'ignore': ['_IGNORE expansions _NL'],
'declare': ['_DECLARE _declare_args _NL'],
'import': ['_IMPORT _import_path _NL',
'_IMPORT _import_path _LPAR name_list _RPAR _NL',
'_IMPORT _import_path _TO name _NL'],
'_import_path': ['import_lib', 'import_rel'],
'import_lib': ['_import_args'],
'import_rel': ['_DOT _import_args'],
'_import_args': ['name', '_import_args _DOT name'],
'name_list': ['_name_list'],
'_name_list': ['name', '_name_list _COMMA name'],
'_declare_args': ['name', '_declare_args name'],
'literal': ['REGEXP', 'STRING'],
}
@inline_args
class EBNF_to_BNF(Transformer_InPlace):
def __init__(self):
self.new_rules = []
self.rules_by_expr = {}
self.prefix = 'anon'
self.i = 0
self.rule_options = None
def _add_recurse_rule(self, type_, expr):
if expr in self.rules_by_expr:
return self.rules_by_expr[expr]
new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
self.i += 1
t = NonTerminal(new_name)
tree = ST('expansions', [ST('expansion', [expr]), ST('expansion', [t, expr])])
self.new_rules.append((new_name, tree, self.rule_options))
self.rules_by_expr[expr] = t
return t
def expr(self, rule, op, *args):
if op.value == '?':
empty = ST('expansion', [])
return ST('expansions', [rule, empty])
elif op.value == '+':
# a : b c+ d
# -->
# a : b _c d
# _c : _c c | c;
return self._add_recurse_rule('plus', rule)
elif op.value == '*':
# a : b c* d
# -->
# a : b _c? d
# _c : _c c | c;
new_name = self._add_recurse_rule('star', rule)
return ST('expansions', [new_name, ST('expansion', [])])
elif op.value == '~':
if len(args) == 1:
mn = mx = int(args[0])
else:
mn, mx = map(int, args)
if mx < mn or mn < 0:
raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (rule, mn, mx))
return ST('expansions', [ST('expansion', [rule] * n) for n in range(mn, mx+1)])
assert False, op
def maybe(self, rule):
keep_all_tokens = self.rule_options and self.rule_options.keep_all_tokens
def will_not_get_removed(sym):
if isinstance(sym, NonTerminal):
return not sym.name.startswith('_')
if isinstance(sym, Terminal):
return keep_all_tokens or not sym.filter_out
assert False
if any(rule.scan_values(will_not_get_removed)):
empty = _EMPTY
else:
empty = ST('expansion', [])
return ST('expansions', [rule, empty])
class SimplifyRule_Visitor(Visitor):
@staticmethod
def _flatten(tree):
while True:
to_expand = [i for i, child in enumerate(tree.children)
if isinstance(child, Tree) and child.data == tree.data]
if not to_expand:
break
tree.expand_kids_by_index(*to_expand)
def expansion(self, tree):
# rules_list unpacking
# a : b (c|d) e
# -->
# a : b c e | b d e
#
# In AST terms:
# expansion(b, expansions(c, d), e)
# -->
# expansions( expansion(b, c, e), expansion(b, d, e) )
self._flatten(tree)
for i, child in enumerate(tree.children):
if isinstance(child, Tree) and child.data == 'expansions':
tree.data = 'expansions'
tree.children = [self.visit(ST('expansion', [option if i == j else other
for j, other in enumerate(tree.children)]))
for option in dedup_list(child.children)]
self._flatten(tree)
break
def alias(self, tree):
rule, alias_name = tree.children
if rule.data == 'expansions':
aliases = []
for child in tree.children[0].children:
aliases.append(ST('alias', [child, alias_name]))
tree.data = 'expansions'
tree.children = aliases
def expansions(self, tree):
self._flatten(tree)
# Ensure all children are unique
if len(set(tree.children)) != len(tree.children):
tree.children = dedup_list(tree.children) # dedup is expensive, so try to minimize its use
class RuleTreeToText(Transformer):
def expansions(self, x):
return x
def expansion(self, symbols):
return symbols, None
def alias(self, x):
(expansion, _alias), alias = x
assert _alias is None, (alias, expansion, '-', _alias) # Double alias not allowed
return expansion, alias.value
class PrepareAnonTerminals(Transformer_InPlace):
"""Create a unique list of anonymous terminals. Attempt to give meaningful names to them when we add them"""
def __init__(self, terminals):
self.terminals = terminals
self.term_set = {td.name for td in self.terminals}
self.term_reverse = {td.pattern: td for td in terminals}
self.i = 0
self.rule_options = None
@inline_args
def pattern(self, p):
value = p.value
if p in self.term_reverse and p.flags != self.term_reverse[p].pattern.flags:
raise GrammarError(u'Conflicting flags for the same terminal: %s' % p)
term_name = None
if isinstance(p, PatternStr):
try:
# If already defined, use the user-defined terminal name
term_name = self.term_reverse[p].name
except KeyError:
# Try to assign an indicative anon-terminal name
try:
term_name = _TERMINAL_NAMES[value]
except KeyError:
if is_id_continue(value) and is_id_start(value[0]) and value.upper() not in self.term_set:
term_name = value.upper()
if term_name in self.term_set:
term_name = None
elif isinstance(p, PatternRE):
if p in self.term_reverse: # Kind of a weird placement.name
term_name = self.term_reverse[p].name
else:
assert False, p
if term_name is None:
term_name = '__ANON_%d' % self.i
self.i += 1
if term_name not in self.term_set:
assert p not in self.term_reverse
self.term_set.add(term_name)
termdef = TerminalDef(term_name, p)
self.term_reverse[p] = termdef
self.terminals.append(termdef)
filter_out = False if self.rule_options and self.rule_options.keep_all_tokens else isinstance(p, PatternStr)
return Terminal(term_name, filter_out=filter_out)
class _ReplaceSymbols(Transformer_InPlace):
"""Helper for ApplyTemplates"""
def __init__(self):
self.names = {}
def value(self, c):
if len(c) == 1 and isinstance(c[0], Token) and c[0].value in self.names:
return self.names[c[0].value]
return self.__default__('value', c, None)
def template_usage(self, c):
if c[0] in self.names:
return self.__default__('template_usage', [self.names[c[0]].name] + c[1:], None)
return self.__default__('template_usage', c, None)
class ApplyTemplates(Transformer_InPlace):
"""Apply the templates, creating new rules that represent the used templates"""
def __init__(self, rule_defs):
self.rule_defs = rule_defs
self.replacer = _ReplaceSymbols()
self.created_templates = set()
def template_usage(self, c):
name = c[0]
args = c[1:]
result_name = "%s{%s}" % (name, ",".join(a.name for a in args))
if result_name not in self.created_templates:
self.created_templates.add(result_name)
(_n, params, tree, options) ,= (t for t in self.rule_defs if t[0] == name)
assert len(params) == len(args), args
result_tree = deepcopy(tree)
self.replacer.names = dict(zip(params, args))
self.replacer.transform(result_tree)
self.rule_defs.append((result_name, [], result_tree, deepcopy(options)))
return NonTerminal(result_name)
def _rfind(s, choices):
return max(s.rfind(c) for c in choices)
def eval_escaping(s):
w = ''
i = iter(s)
for n in i:
w += n
if n == '\\':
try:
n2 = next(i)
except StopIteration:
raise GrammarError("Literal ended unexpectedly (bad escaping): `%r`" % s)
if n2 == '\\':
w += '\\\\'
elif n2 not in 'uxnftr':
w += '\\'
w += n2
w = w.replace('\\"', '"').replace("'", "\\'")
to_eval = "u'''%s'''" % w
try:
s = literal_eval(to_eval)
except SyntaxError as e:
raise GrammarError(s, e)
return s
def _literal_to_pattern(literal):
v = literal.value
flag_start = _rfind(v, '/"')+1
assert flag_start > 0
flags = v[flag_start:]
assert all(f in _RE_FLAGS for f in flags), flags
if literal.type == 'STRING' and '\n' in v:
raise GrammarError('You cannot put newlines in string literals')
if literal.type == 'REGEXP' and '\n' in v and 'x' not in flags:
raise GrammarError('You can only use newlines in regular expressions '
'with the `x` (verbose) flag')
v = v[:flag_start]
assert v[0] == v[-1] and v[0] in '"/'
x = v[1:-1]
s = eval_escaping(x)
if literal.type == 'STRING':
s = s.replace('\\\\', '\\')
return PatternStr(s, flags, raw=literal.value)
elif literal.type == 'REGEXP':
return PatternRE(s, flags, raw=literal.value)
else:
assert False, 'Invariant failed: literal.type not in ["STRING", "REGEXP"]'
@inline_args
class PrepareLiterals(Transformer_InPlace):
def literal(self, literal):
return ST('pattern', [_literal_to_pattern(literal)])
def range(self, start, end):
assert start.type == end.type == 'STRING'
start = start.value[1:-1]
end = end.value[1:-1]
assert len(eval_escaping(start)) == len(eval_escaping(end)) == 1
regexp = '[%s-%s]' % (start, end)
return ST('pattern', [PatternRE(regexp)])
def _make_joined_pattern(regexp, flags_set):
# In Python 3.6, a new syntax for flags was introduced, that allows us to restrict the scope
# of flags to a specific regexp group. We are already using it in `lexer.Pattern._get_flags`
# However, for prior Python versions, we still need to use global flags, so we have to make sure
# that there are no flag collisions when we merge several terminals.
flags = ()
if not Py36:
if len(flags_set) > 1:
raise GrammarError("Lark doesn't support joining terminals with conflicting flags in python <3.6!")
elif len(flags_set) == 1:
flags ,= flags_set
return PatternRE(regexp, flags)
class TerminalTreeToPattern(Transformer):
def pattern(self, ps):
p ,= ps
return p
def expansion(self, items):
assert items
if len(items) == 1:
return items[0]
pattern = ''.join(i.to_regexp() for i in items)
return _make_joined_pattern(pattern, {i.flags for i in items})
def expansions(self, exps):
if len(exps) == 1:
return exps[0]
pattern = '(?:%s)' % ('|'.join(i.to_regexp() for i in exps))
return _make_joined_pattern(pattern, {i.flags for i in exps})
def expr(self, args):
inner, op = args[:2]
if op == '~':
if len(args) == 3:
op = "{%d}" % int(args[2])
else:
mn, mx = map(int, args[2:])
if mx < mn:
raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (inner, mn, mx))
op = "{%d,%d}" % (mn, mx)
else:
assert len(args) == 2
return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags)
def maybe(self, expr):
return self.expr(expr + ['?'])
def alias(self, t):
raise GrammarError("Aliasing not allowed in terminals (You used -> in the wrong place)")
def value(self, v):
return v[0]
class PrepareSymbols(Transformer_InPlace):
def value(self, v):
v ,= v
if isinstance(v, Tree):
return v
elif v.type == 'RULE':
return NonTerminal(Str(v.value))
elif v.type == 'TERMINAL':
return Terminal(Str(v.value), filter_out=v.startswith('_'))
assert False
def nr_deepcopy_tree(t):
"""Deepcopy tree `t` without recursion"""
return Transformer_NonRecursive(False).transform(t)
class Grammar:
def __init__(self, rule_defs, term_defs, ignore):
self.term_defs = term_defs
self.rule_defs = rule_defs
self.ignore = ignore
def compile(self, start, terminals_to_keep):
# We change the trees in-place (to support huge grammars)
# So deepcopy allows calling compile more than once.
term_defs = deepcopy(list(self.term_defs))
rule_defs = [(n,p,nr_deepcopy_tree(t),o) for n,p,t,o in self.rule_defs]
# ===================
# Compile Terminals
# ===================
# Convert terminal-trees to strings/regexps
for name, (term_tree, priority) in term_defs:
if term_tree is None: # Terminal added through %declare
continue
expansions = list(term_tree.find_data('expansion'))
if len(expansions) == 1 and not expansions[0].children:
raise GrammarError("Terminals cannot be empty (%s)" % name)
transformer = PrepareLiterals() * TerminalTreeToPattern()
terminals = [TerminalDef(name, transformer.transform(term_tree), priority)
for name, (term_tree, priority) in term_defs if term_tree]
# =================
# Compile Rules
# =================
# 1. Pre-process terminals
anon_tokens_transf = PrepareAnonTerminals(terminals)
transformer = PrepareLiterals() * PrepareSymbols() * anon_tokens_transf # Adds to terminals
# 2. Inline Templates
transformer *= ApplyTemplates(rule_defs)
# 3. Convert EBNF to BNF (and apply step 1 & 2)
ebnf_to_bnf = EBNF_to_BNF()
rules = []
i = 0
while i < len(rule_defs): # We have to do it like this because rule_defs might grow due to templates
name, params, rule_tree, options = rule_defs[i]
i += 1
if len(params) != 0: # Dont transform templates
continue
rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
ebnf_to_bnf.rule_options = rule_options
ebnf_to_bnf.prefix = name
anon_tokens_transf.rule_options = rule_options
tree = transformer.transform(rule_tree)
res = ebnf_to_bnf.transform(tree)
rules.append((name, res, options))
rules += ebnf_to_bnf.new_rules
assert len(rules) == len({name for name, _t, _o in rules}), "Whoops, name collision"
# 4. Compile tree to Rule objects
rule_tree_to_text = RuleTreeToText()
simplify_rule = SimplifyRule_Visitor()
compiled_rules = []
for rule_content in rules:
name, tree, options = rule_content
simplify_rule.visit(tree)
expansions = rule_tree_to_text.transform(tree)
for i, (expansion, alias) in enumerate(expansions):
if alias and name.startswith('_'):
raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)"% (name, alias))
empty_indices = [x==_EMPTY for x in expansion]
if any(empty_indices):
exp_options = copy(options) or RuleOptions()
exp_options.empty_indices = empty_indices
expansion = [x for x in expansion if x!=_EMPTY]
else:
exp_options = options
assert all(isinstance(x, Symbol) for x in expansion), expansion
rule = Rule(NonTerminal(name), expansion, i, alias, exp_options)
compiled_rules.append(rule)
# Remove duplicates of empty rules, throw error for non-empty duplicates
if len(set(compiled_rules)) != len(compiled_rules):
duplicates = classify(compiled_rules, lambda x: x)
for dups in duplicates.values():
if len(dups) > 1:
if dups[0].expansion:
raise GrammarError("Rules defined twice: %s\n\n(Might happen due to colliding expansion of optionals: [] or ?)"
% ''.join('\n * %s' % i for i in dups))
# Empty rule; assert all other attributes are equal
assert len({(r.alias, r.order, r.options) for r in dups}) == len(dups)
# Remove duplicates
compiled_rules = list(set(compiled_rules))
# Filter out unused rules
while True:
c = len(compiled_rules)
used_rules = {s for r in compiled_rules
for s in r.expansion
if isinstance(s, NonTerminal)
and s != r.origin}
used_rules |= {NonTerminal(s) for s in start}
compiled_rules, unused = classify_bool(compiled_rules, lambda r: r.origin in used_rules)
for r in unused:
logger.debug("Unused rule: %s", r)
if len(compiled_rules) == c:
break
# Filter out unused terminals
used_terms = {t.name for r in compiled_rules
for t in r.expansion
if isinstance(t, Terminal)}
terminals, unused = classify_bool(terminals, lambda t: t.name in used_terms or t.name in self.ignore or t.name in terminals_to_keep)
if unused:
logger.debug("Unused terminals: %s", [t.name for t in unused])
return terminals, compiled_rules, self.ignore
PackageResource = namedtuple('PackageResource', 'pkg_name path')
class FromPackageLoader(object):
"""
Provides a simple way of creating custom import loaders that load from packages via ``pkgutil.get_data`` instead of using `open`.
This allows them to be compatible even from within zip files.
Relative imports are handled, so you can just freely use them.
pkg_name: The name of the package. You can probably provide `__name__` most of the time
search_paths: All the path that will be search on absolute imports.
"""
def __init__(self, pkg_name, search_paths=("", )):
self.pkg_name = pkg_name
self.search_paths = search_paths
def __repr__(self):
return "%s(%r, %r)" % (type(self).__name__, self.pkg_name, self.search_paths)
def __call__(self, base_path, grammar_path):
if base_path is None:
to_try = self.search_paths
else:
# Check whether or not the importing grammar was loaded by this module.
if not isinstance(base_path, PackageResource) or base_path.pkg_name != self.pkg_name:
# Technically false, but FileNotFound doesn't exist in python2.7, and this message should never reach the end user anyway
raise IOError()
to_try = [base_path.path]
for path in to_try:
full_path = os.path.join(path, grammar_path)
try:
text = pkgutil.get_data(self.pkg_name, full_path)
except IOError:
continue
else:
return PackageResource(self.pkg_name, full_path), text.decode()
raise IOError()
stdlib_loader = FromPackageLoader('lark', IMPORT_PATHS)
def resolve_term_references(term_dict):
# TODO Solve with transitive closure (maybe)
while True:
changed = False
for name, token_tree in term_dict.items():
if token_tree is None: # Terminal added through %declare
continue
for exp in token_tree.find_data('value'):
item ,= exp.children
if isinstance(item, Token):
if item.type == 'RULE':
raise GrammarError("Rules aren't allowed inside terminals (%s in %s)" % (item, name))
if item.type == 'TERMINAL':
try:
term_value = term_dict[item]
except KeyError:
raise GrammarError("Terminal used but not defined: %s" % item)
assert term_value is not None
exp.children[0] = term_value
changed = True
if not changed:
break
for name, term in term_dict.items():
if term: # Not just declared
for child in term.children:
ids = [id(x) for x in child.iter_subtrees()]
if id(term) in ids:
raise GrammarError("Recursion in terminal '%s' (recursion is only allowed in rules, not terminals)" % name)
def options_from_rule(name, params, *x):
if len(x) > 1:
priority, expansions = x
priority = int(priority)
else:
expansions ,= x
priority = None
params = [t.value for t in params.children] if params is not None else [] # For the grammar parser
keep_all_tokens = name.startswith('!')
name = name.lstrip('!')
expand1 = name.startswith('?')
name = name.lstrip('?')
return name, params, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority,
template_source=(name if params else None))
def symbols_from_strcase(expansion):
return [Terminal(x, filter_out=x.startswith('_')) if x.isupper() else NonTerminal(x) for x in expansion]
@inline_args
class PrepareGrammar(Transformer_InPlace):
def terminal(self, name):
return name
def nonterminal(self, name):
return name
def _find_used_symbols(tree):
assert tree.data == 'expansions'
return {t for x in tree.find_data('expansion')
for t in x.scan_values(lambda t: t.type in ('RULE', 'TERMINAL'))}
def _get_parser():
try:
return _get_parser.cache
except AttributeError:
terminals = [TerminalDef(name, PatternRE(value)) for name, value in TERMINALS.items()]
rules = [options_from_rule(name, None, x) for name, x in RULES.items()]
rules = [Rule(NonTerminal(r), symbols_from_strcase(x.split()), i, None, o)
for r, _p, xs, o in rules for i, x in enumerate(xs)]
callback = ParseTreeBuilder(rules, ST).create_callback()
import re
lexer_conf = LexerConf(terminals, re, ['WS', 'COMMENT'])
parser_conf = ParserConf(rules, callback, ['start'])
lexer_conf.lexer_type = 'standard'
parser_conf.parser_type = 'lalr'
_get_parser.cache = ParsingFrontend(lexer_conf, parser_conf, {})
return _get_parser.cache
GRAMMAR_ERRORS = [
('Incorrect type of value', ['a: 1\n']),
('Unclosed parenthesis', ['a: (\n']),
('Unmatched closing parenthesis', ['a: )\n', 'a: [)\n', 'a: (]\n']),
('Expecting rule or terminal definition (missing colon)', ['a\n', 'A\n', 'a->\n', 'A->\n', 'a A\n']),
('Illegal name for rules or terminals', ['Aa:\n']),
('Alias expects lowercase name', ['a: -> "a"\n']),
('Unexpected colon', ['a::\n', 'a: b:\n', 'a: B:\n', 'a: "a":\n']),
('Misplaced operator', ['a: b??', 'a: b(?)', 'a:+\n', 'a:?\n', 'a:*\n', 'a:|*\n']),
('Expecting option ("|") or a new rule or terminal definition', ['a:a\n()\n']),
('Terminal names cannot contain dots', ['A.B\n']),
('Expecting rule or terminal definition', ['"a"\n']),
('%import expects a name', ['%import "a"\n']),
('%ignore expects a value', ['%ignore %import\n']),
]
def _translate_parser_exception(parse, e):
error = e.match_examples(parse, GRAMMAR_ERRORS, use_accepts=True)
if error:
return error
elif 'STRING' in e.expected:
return "Expecting a value"
def _parse_grammar(text, name, start='start'):
try:
tree = _get_parser().parse(text + '\n', start)
except UnexpectedCharacters as e:
context = e.get_context(text)
raise GrammarError("Unexpected input at line %d column %d in %s: \n\n%s" %
(e.line, e.column, name, context))
except UnexpectedToken as e:
context = e.get_context(text)
error = _translate_parser_exception(_get_parser().parse, e)
if error:
raise GrammarError("%s, at line %s column %s\n\n%s" % (error, e.line, e.column, context))
raise
return PrepareGrammar().transform(tree)
def _error_repr(error):
if isinstance(error, UnexpectedToken):
error2 = _translate_parser_exception(_get_parser().parse, error)
if error2:
return error2
expected = ', '.join(error.accepts or error.expected)
return "Unexpected token %r. Expected one of: {%s}" % (str(error.token), expected)
else:
return str(error)
def _search_interactive_parser(interactive_parser, predicate):
def expand(node):
path, p = node
for choice in p.choices():
t = Token(choice, '')
try:
new_p = p.feed_token(t)
except ParseError: # Illegal
pass
else:
yield path + (choice,), new_p
for path, p in bfs_all_unique([((), interactive_parser)], expand):
if predicate(p):
return path, p
def find_grammar_errors(text, start='start'):
errors = []
def on_error(e):
errors.append((e, _error_repr(e)))
# recover to a new line
token_path, _ = _search_interactive_parser(e.interactive_parser.as_immutable(), lambda p: '_NL' in p.choices())
for token_type in token_path:
e.interactive_parser.feed_token(Token(token_type, ''))
e.interactive_parser.feed_token(Token('_NL', '\n'))
return True
_tree = _get_parser().parse(text + '\n', start, on_error=on_error)
errors_by_line = classify(errors, lambda e: e[0].line)
errors = [el[0] for el in errors_by_line.values()] # already sorted
for e in errors:
e[0].interactive_parser = None
return errors
def _get_mangle(prefix, aliases, base_mangle=None):
def mangle(s):
if s in aliases:
s = aliases[s]
else:
if s[0] == '_':
s = '_%s__%s' % (prefix, s[1:])
else:
s = '%s__%s' % (prefix, s)
if base_mangle is not None:
s = base_mangle(s)
return s
return mangle
def _mangle_exp(exp, mangle):
if mangle is None:
return exp
exp = deepcopy(exp) # TODO: is this needed
for t in exp.iter_subtrees():
for i, c in enumerate(t.children):
if isinstance(c, Token) and c.type in ('RULE', 'TERMINAL'):
t.children[i] = Token(c.type, mangle(c.value))
return exp
class GrammarBuilder:
def __init__(self, global_keep_all_tokens=False, import_paths=None, used_files=None):
self.global_keep_all_tokens = global_keep_all_tokens
self.import_paths = import_paths or []
self.used_files = used_files or {}
self._definitions = {}
self._ignore_names = []
def _is_term(self, name):
# Imported terminals are of the form `Path__to__Grammar__file__TERMINAL_NAME`
# Only the last part is the actual name, and the rest might contain mixed case
return name.rpartition('__')[-1].isupper()
def _grammar_error(self, msg, *names):
args = {}
for i, name in enumerate(names, start=1):
postfix = '' if i == 1 else str(i)
args['name' + postfix] = name
args['type' + postfix] = lowercase_type = ("rule", "terminal")[self._is_term(name)]
args['Type' + postfix] = lowercase_type.title()
raise GrammarError(msg.format(**args))
def _check_options(self, name, options):
if self._is_term(name):
if options is None:
options = 1
# if we don't use Integral here, we run into python2.7/python3 problems with long vs int
elif not isinstance(options, Integral):
raise GrammarError("Terminal require a single int as 'options' (e.g. priority), got %s" % (type(options),))
else:
if options is None:
options = RuleOptions()
elif not isinstance(options, RuleOptions):
raise GrammarError("Rules require a RuleOptions instance as 'options'")
if self.global_keep_all_tokens:
options.keep_all_tokens = True
return options
def _define(self, name, exp, params=(), options=None, override=False):
if name in self._definitions:
if not override:
self._grammar_error("{Type} '{name}' defined more than once", name)
elif override:
self._grammar_error("Cannot override a nonexisting {type} {name}", name)
if name.startswith('__'):
self._grammar_error('Names starting with double-underscore are reserved (Error at {name})', name)
self._definitions[name] = (params, exp, self._check_options(name, options))
def _extend(self, name, exp, params=(), options=None):
if name not in self._definitions:
self._grammar_error("Can't extend {type} {name} as it wasn't defined before", name)
if tuple(params) != tuple(self._definitions[name][0]):
self._grammar_error("Cannot extend {type} with different parameters: {name}", name)
# TODO: think about what to do with 'options'
base = self._definitions[name][1]
while len(base.children) == 2:
assert isinstance(base.children[0], Tree) and base.children[0].data == 'expansions', base
base = base.children[0]
base.children.insert(0, exp)
def _ignore(self, exp_or_name):
if isinstance(exp_or_name, str):
self._ignore_names.append(exp_or_name)
else:
assert isinstance(exp_or_name, Tree)
t = exp_or_name
if t.data == 'expansions' and len(t.children) == 1:
t2 ,= t.children
if t2.data=='expansion' and len(t2.children) == 1:
item ,= t2.children
if item.data == 'value':
item ,= item.children
if isinstance(item, Token) and item.type == 'TERMINAL':
self._ignore_names.append(item.value)
return
name = '__IGNORE_%d'% len(self._ignore_names)
self._ignore_names.append(name)
self._definitions[name] = ((), t, 1)
def _declare(self, *names):
for name in names:
self._define(name, None)
def _unpack_import(self, stmt, grammar_name):
if len(stmt.children) > 1:
path_node, arg1 = stmt.children
else:
path_node, = stmt.children
arg1 = None
if isinstance(arg1, Tree): # Multi import
dotted_path = tuple(path_node.children)
names = arg1.children
aliases = dict(zip(names, names)) # Can't have aliased multi import, so all aliases will be the same as names
else: # Single import
dotted_path = tuple(path_node.children[:-1])
if not dotted_path:
name ,= path_node.children
raise GrammarError("Nothing was imported from grammar `%s`" % name)
name = path_node.children[-1] # Get name from dotted path
aliases = {name.value: (arg1 or name).value} # Aliases if exist
if path_node.data == 'import_lib': # Import from library
base_path = None
else: # Relative import
if grammar_name == '<string>': # Import relative to script file path if grammar is coded in script
try:
base_file = os.path.abspath(sys.modules['__main__'].__file__)
except AttributeError:
base_file = None
else:
base_file = grammar_name # Import relative to grammar file path if external grammar file
if base_file:
if isinstance(base_file, PackageResource):
base_path = PackageResource(base_file.pkg_name, os.path.split(base_file.path)[0])
else:
base_path = os.path.split(base_file)[0]
else:
base_path = os.path.abspath(os.path.curdir)
return dotted_path, base_path, aliases
def _unpack_definition(self, tree, mangle):
if tree.data == 'rule':
name, params, exp, opts = options_from_rule(*tree.children)
else:
name = tree.children[0].value
params = () # TODO terminal templates
opts = int(tree.children[1]) if len(tree.children) == 3 else 1 # priority
exp = tree.children[-1]
if mangle is not None:
params = tuple(mangle(p) for p in params)
name = mangle(name)
exp = _mangle_exp(exp, mangle)
return name, exp, params, opts
def load_grammar(self, grammar_text, grammar_name="<?>", mangle=None):
tree = _parse_grammar(grammar_text, grammar_name)
imports = {}
for stmt in tree.children:
if stmt.data == 'import':
dotted_path, base_path, aliases = self._unpack_import(stmt, grammar_name)
try:
import_base_path, import_aliases = imports[dotted_path]
assert base_path == import_base_path, 'Inconsistent base_path for %s.' % '.'.join(dotted_path)
import_aliases.update(aliases)
except KeyError:
imports[dotted_path] = base_path, aliases
for dotted_path, (base_path, aliases) in imports.items():
self.do_import(dotted_path, base_path, aliases, mangle)
for stmt in tree.children:
if stmt.data in ('term', 'rule'):
self._define(*self._unpack_definition(stmt, mangle))
elif stmt.data == 'override':
r ,= stmt.children
self._define(*self._unpack_definition(r, mangle), override=True)
elif stmt.data == 'extend':
r ,= stmt.children
self._extend(*self._unpack_definition(r, mangle))
elif stmt.data == 'ignore':
# if mangle is not None, we shouldn't apply ignore, since we aren't in a toplevel grammar
if mangle is None:
self._ignore(*stmt.children)
elif stmt.data == 'declare':
names = [t.value for t in stmt.children]
if mangle is None:
self._declare(*names)
else:
self._declare(*map(mangle, names))
elif stmt.data == 'import':
pass
else:
assert False, stmt
term_defs = { name: exp
for name, (_params, exp, _options) in self._definitions.items()
if self._is_term(name)
}
resolve_term_references(term_defs)
def _remove_unused(self, used):
def rule_dependencies(symbol):
if self._is_term(symbol):
return []
try:
params, tree,_ = self._definitions[symbol]
except KeyError:
return []
return _find_used_symbols(tree) - set(params)
_used = set(bfs(used, rule_dependencies))
self._definitions = {k: v for k, v in self._definitions.items() if k in _used}
def do_import(self, dotted_path, base_path, aliases, base_mangle=None):
assert dotted_path
mangle = _get_mangle('__'.join(dotted_path), aliases, base_mangle)
grammar_path = os.path.join(*dotted_path) + EXT
to_try = self.import_paths + ([base_path] if base_path is not None else []) + [stdlib_loader]
for source in to_try:
try:
if callable(source):
joined_path, text = source(base_path, grammar_path)
else:
joined_path = os.path.join(source, grammar_path)
with open(joined_path, encoding='utf8') as f:
text = f.read()
except IOError:
continue
else:
h = hashlib.md5(text.encode('utf8')).hexdigest()
if self.used_files.get(joined_path, h) != h:
raise RuntimeError("Grammar file was changed during importing")
self.used_files[joined_path] = h
gb = GrammarBuilder(self.global_keep_all_tokens, self.import_paths, self.used_files)
gb.load_grammar(text, joined_path, mangle)
gb._remove_unused(map(mangle, aliases))
for name in gb._definitions:
if name in self._definitions:
raise GrammarError("Cannot import '%s' from '%s': Symbol already defined." % (name, grammar_path))
self._definitions.update(**gb._definitions)
break
else:
# Search failed. Make Python throw a nice error.
open(grammar_path, encoding='utf8')
assert False, "Couldn't import grammar %s, but a corresponding file was found at a place where lark doesn't search for it" % (dotted_path,)
def validate(self):
for name, (params, exp, _options) in self._definitions.items():
for i, p in enumerate(params):
if p in self._definitions:
raise GrammarError("Template Parameter conflicts with rule %s (in template %s)" % (p, name))
if p in params[:i]:
raise GrammarError("Duplicate Template Parameter %s (in template %s)" % (p, name))
if exp is None: # Remaining checks don't apply to abstract rules/terminals
continue
for temp in exp.find_data('template_usage'):
sym = temp.children[0]
args = temp.children[1:]
if sym not in params:
if sym not in self._definitions:
self._grammar_error("Template '%s' used but not defined (in {type} {name})" % sym, name)
if len(args) != len(self._definitions[sym][0]):
expected, actual = len(self._definitions[sym][0]), len(args)
self._grammar_error("Wrong number of template arguments used for {name} "
"(expected %s, got %s) (in {type2} {name2})" % (expected, actual), sym, name)
for sym in _find_used_symbols(exp):
if sym not in self._definitions and sym not in params:
self._grammar_error("{Type} '{name}' used but not defined (in {type2} {name2})", sym, name)
if not set(self._definitions).issuperset(self._ignore_names):
raise GrammarError("Terminals %s were marked to ignore but were not defined!" % (set(self._ignore_names) - set(self._definitions)))
def build(self):
self.validate()
rule_defs = []
term_defs = []
for name, (params, exp, options) in self._definitions.items():
if self._is_term(name):
assert len(params) == 0
term_defs.append((name, (exp, options)))
else:
rule_defs.append((name, params, exp, options))
# resolve_term_references(term_defs)
return Grammar(rule_defs, term_defs, self._ignore_names)
def verify_used_files(file_hashes):
for path, old in file_hashes.items():
text = None
if isinstance(path, str) and os.path.exists(path):
with open(path, encoding='utf8') as f:
text = f.read()
elif isinstance(path, PackageResource):
with suppress(IOError):
text = pkgutil.get_data(*path).decode('utf-8')
if text is None: # We don't know how to load the path. ignore it.
continue
current = hashlib.md5(text.encode()).hexdigest()
if old != current:
logger.info("File %r changed, rebuilding Parser" % path)
return False
return True
def load_grammar(grammar, source, import_paths, global_keep_all_tokens):
builder = GrammarBuilder(global_keep_all_tokens, import_paths)
builder.load_grammar(grammar, source)
return builder.build(), builder.used_files
|
"""Parses and creates Grammar objects"""
import hashlib
import os.path
import sys
from collections import namedtuple
from copy import copy, deepcopy
from io import open
import pkgutil
from ast import literal_eval
from numbers import Integral
from .utils import bfs, Py36, logger, classify_bool, is_id_continue, is_id_start, bfs_all_unique
from .lexer import Token, TerminalDef, PatternStr, PatternRE
from .parse_tree_builder import ParseTreeBuilder
from .parser_frontends import ParsingFrontend
from .common import LexerConf, ParserConf
from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol
from .utils import classify, suppress, dedup_list, Str
from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken, ParseError
from .tree import Tree, SlottedTree as ST
from .visitors import Transformer, Visitor, v_args, Transformer_InPlace, Transformer_NonRecursive
inline_args = v_args(inline=True)
__path__ = os.path.dirname(__file__)
IMPORT_PATHS = ['grammars']
EXT = '.lark'
_RE_FLAGS = 'imslux'
_EMPTY = Symbol('__empty__')
_TERMINAL_NAMES = {
'.' : 'DOT',
',' : 'COMMA',
':' : 'COLON',
';' : 'SEMICOLON',
'+' : 'PLUS',
'-' : 'MINUS',
'*' : 'STAR',
'/' : 'SLASH',
'\\' : 'BACKSLASH',
'|' : 'VBAR',
'?' : 'QMARK',
'!' : 'BANG',
'@' : 'AT',
'#' : 'HASH',
'$' : 'DOLLAR',
'%' : 'PERCENT',
'^' : 'CIRCUMFLEX',
'&' : 'AMPERSAND',
'_' : 'UNDERSCORE',
'<' : 'LESSTHAN',
'>' : 'MORETHAN',
'=' : 'EQUAL',
'"' : 'DBLQUOTE',
'\'' : 'QUOTE',
'`' : 'BACKQUOTE',
'~' : 'TILDE',
'(' : 'LPAR',
')' : 'RPAR',
'{' : 'LBRACE',
'}' : 'RBRACE',
'[' : 'LSQB',
']' : 'RSQB',
'\n' : 'NEWLINE',
'\r\n' : 'CRLF',
'\t' : 'TAB',
' ' : 'SPACE',
}
# Grammar Parser
TERMINALS = {
'_LPAR': r'\(',
'_RPAR': r'\)',
'_LBRA': r'\[',
'_RBRA': r'\]',
'_LBRACE': r'\{',
'_RBRACE': r'\}',
'OP': '[+*]|[?](?![a-z])',
'_COLON': ':',
'_COMMA': ',',
'_OR': r'\|',
'_DOT': r'\.(?!\.)',
'_DOTDOT': r'\.\.',
'TILDE': '~',
'RULE': '!?[_?]?[a-z][_a-z0-9]*',
'TERMINAL': '_?[A-Z][_A-Z0-9]*',
'STRING': r'"(\\"|\\\\|[^"\n])*?"i?',
'REGEXP': r'/(?!/)(\\/|\\\\|[^/])*?/[%s]*' % _RE_FLAGS,
'_NL': r'(\r?\n)+\s*',
'WS': r'[ \t]+',
'COMMENT': r'\s*//[^\n]*',
'_TO': '->',
'_IGNORE': r'%ignore',
'_OVERRIDE': r'%override',
'_DECLARE': r'%declare',
'_EXTEND': r'%extend',
'_IMPORT': r'%import',
'NUMBER': r'[+-]?\d+',
}
RULES = {
'start': ['_list'],
'_list': ['_item', '_list _item'],
'_item': ['rule', 'term', 'ignore', 'import', 'declare', 'override', 'extend', '_NL'],
'rule': ['RULE template_params _COLON expansions _NL',
'RULE template_params _DOT NUMBER _COLON expansions _NL'],
'template_params': ['_LBRACE _template_params _RBRACE',
''],
'_template_params': ['RULE',
'_template_params _COMMA RULE'],
'expansions': ['alias',
'expansions _OR alias',
'expansions _NL _OR alias'],
'?alias': ['expansion _TO RULE', 'expansion'],
'expansion': ['_expansion'],
'_expansion': ['', '_expansion expr'],
'?expr': ['atom',
'atom OP',
'atom TILDE NUMBER',
'atom TILDE NUMBER _DOTDOT NUMBER',
],
'?atom': ['_LPAR expansions _RPAR',
'maybe',
'value'],
'value': ['terminal',
'nonterminal',
'literal',
'range',
'template_usage'],
'terminal': ['TERMINAL'],
'nonterminal': ['RULE'],
'?name': ['RULE', 'TERMINAL'],
'maybe': ['_LBRA expansions _RBRA'],
'range': ['STRING _DOTDOT STRING'],
'template_usage': ['RULE _LBRACE _template_args _RBRACE'],
'_template_args': ['value',
'_template_args _COMMA value'],
'term': ['TERMINAL _COLON expansions _NL',
'TERMINAL _DOT NUMBER _COLON expansions _NL'],
'override': ['_OVERRIDE rule',
'_OVERRIDE term'],
'extend': ['_EXTEND rule',
'_EXTEND term'],
'ignore': ['_IGNORE expansions _NL'],
'declare': ['_DECLARE _declare_args _NL'],
'import': ['_IMPORT _import_path _NL',
'_IMPORT _import_path _LPAR name_list _RPAR _NL',
'_IMPORT _import_path _TO name _NL'],
'_import_path': ['import_lib', 'import_rel'],
'import_lib': ['_import_args'],
'import_rel': ['_DOT _import_args'],
'_import_args': ['name', '_import_args _DOT name'],
'name_list': ['_name_list'],
'_name_list': ['name', '_name_list _COMMA name'],
'_declare_args': ['name', '_declare_args name'],
'literal': ['REGEXP', 'STRING'],
}
@inline_args
class EBNF_to_BNF(Transformer_InPlace):
def __init__(self):
self.new_rules = []
self.rules_by_expr = {}
self.prefix = 'anon'
self.i = 0
self.rule_options = None
def _add_recurse_rule(self, type_, expr):
if expr in self.rules_by_expr:
return self.rules_by_expr[expr]
new_name = '__%s_%s_%d' % (self.prefix, type_, self.i)
self.i += 1
t = NonTerminal(new_name)
tree = ST('expansions', [ST('expansion', [expr]), ST('expansion', [t, expr])])
self.new_rules.append((new_name, tree, self.rule_options))
self.rules_by_expr[expr] = t
return t
def expr(self, rule, op, *args):
if op.value == '?':
empty = ST('expansion', [])
return ST('expansions', [rule, empty])
elif op.value == '+':
# a : b c+ d
# -->
# a : b _c d
# _c : _c c | c;
return self._add_recurse_rule('plus', rule)
elif op.value == '*':
# a : b c* d
# -->
# a : b _c? d
# _c : _c c | c;
new_name = self._add_recurse_rule('star', rule)
return ST('expansions', [new_name, ST('expansion', [])])
elif op.value == '~':
if len(args) == 1:
mn = mx = int(args[0])
else:
mn, mx = map(int, args)
if mx < mn or mn < 0:
raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (rule, mn, mx))
return ST('expansions', [ST('expansion', [rule] * n) for n in range(mn, mx+1)])
assert False, op
def maybe(self, rule):
keep_all_tokens = self.rule_options and self.rule_options.keep_all_tokens
def will_not_get_removed(sym):
if isinstance(sym, NonTerminal):
return not sym.name.startswith('_')
if isinstance(sym, Terminal):
return keep_all_tokens or not sym.filter_out
assert False
if any(rule.scan_values(will_not_get_removed)):
empty = _EMPTY
else:
empty = ST('expansion', [])
return ST('expansions', [rule, empty])
class SimplifyRule_Visitor(Visitor):
@staticmethod
def _flatten(tree):
while True:
to_expand = [i for i, child in enumerate(tree.children)
if isinstance(child, Tree) and child.data == tree.data]
if not to_expand:
break
tree.expand_kids_by_index(*to_expand)
def expansion(self, tree):
# rules_list unpacking
# a : b (c|d) e
# -->
# a : b c e | b d e
#
# In AST terms:
# expansion(b, expansions(c, d), e)
# -->
# expansions( expansion(b, c, e), expansion(b, d, e) )
self._flatten(tree)
for i, child in enumerate(tree.children):
if isinstance(child, Tree) and child.data == 'expansions':
tree.data = 'expansions'
tree.children = [self.visit(ST('expansion', [option if i == j else other
for j, other in enumerate(tree.children)]))
for option in dedup_list(child.children)]
self._flatten(tree)
break
def alias(self, tree):
rule, alias_name = tree.children
if rule.data == 'expansions':
aliases = []
for child in tree.children[0].children:
aliases.append(ST('alias', [child, alias_name]))
tree.data = 'expansions'
tree.children = aliases
def expansions(self, tree):
self._flatten(tree)
# Ensure all children are unique
if len(set(tree.children)) != len(tree.children):
tree.children = dedup_list(tree.children) # dedup is expensive, so try to minimize its use
class RuleTreeToText(Transformer):
def expansions(self, x):
return x
def expansion(self, symbols):
return symbols, None
def alias(self, x):
(expansion, _alias), alias = x
assert _alias is None, (alias, expansion, '-', _alias) # Double alias not allowed
return expansion, alias.value
class PrepareAnonTerminals(Transformer_InPlace):
"""Create a unique list of anonymous terminals. Attempt to give meaningful names to them when we add them"""
def __init__(self, terminals):
self.terminals = terminals
self.term_set = {td.name for td in self.terminals}
self.term_reverse = {td.pattern: td for td in terminals}
self.i = 0
self.rule_options = None
@inline_args
def pattern(self, p):
value = p.value
if p in self.term_reverse and p.flags != self.term_reverse[p].pattern.flags:
raise GrammarError(u'Conflicting flags for the same terminal: %s' % p)
term_name = None
if isinstance(p, PatternStr):
try:
# If already defined, use the user-defined terminal name
term_name = self.term_reverse[p].name
except KeyError:
# Try to assign an indicative anon-terminal name
try:
term_name = _TERMINAL_NAMES[value]
except KeyError:
if is_id_continue(value) and is_id_start(value[0]) and value.upper() not in self.term_set:
term_name = value.upper()
if term_name in self.term_set:
term_name = None
elif isinstance(p, PatternRE):
if p in self.term_reverse: # Kind of a weird placement.name
term_name = self.term_reverse[p].name
else:
assert False, p
if term_name is None:
term_name = '__ANON_%d' % self.i
self.i += 1
if term_name not in self.term_set:
assert p not in self.term_reverse
self.term_set.add(term_name)
termdef = TerminalDef(term_name, p)
self.term_reverse[p] = termdef
self.terminals.append(termdef)
filter_out = False if self.rule_options and self.rule_options.keep_all_tokens else isinstance(p, PatternStr)
return Terminal(term_name, filter_out=filter_out)
class _ReplaceSymbols(Transformer_InPlace):
"""Helper for ApplyTemplates"""
def __init__(self):
self.names = {}
def value(self, c):
if len(c) == 1 and isinstance(c[0], Token) and c[0].value in self.names:
return self.names[c[0].value]
return self.__default__('value', c, None)
def template_usage(self, c):
if c[0] in self.names:
return self.__default__('template_usage', [self.names[c[0]].name] + c[1:], None)
return self.__default__('template_usage', c, None)
class ApplyTemplates(Transformer_InPlace):
"""Apply the templates, creating new rules that represent the used templates"""
def __init__(self, rule_defs):
self.rule_defs = rule_defs
self.replacer = _ReplaceSymbols()
self.created_templates = set()
def template_usage(self, c):
name = c[0]
args = c[1:]
result_name = "%s{%s}" % (name, ",".join(a.name for a in args))
if result_name not in self.created_templates:
self.created_templates.add(result_name)
(_n, params, tree, options) ,= (t for t in self.rule_defs if t[0] == name)
assert len(params) == len(args), args
result_tree = deepcopy(tree)
self.replacer.names = dict(zip(params, args))
self.replacer.transform(result_tree)
self.rule_defs.append((result_name, [], result_tree, deepcopy(options)))
return NonTerminal(result_name)
def _rfind(s, choices):
return max(s.rfind(c) for c in choices)
def eval_escaping(s):
w = ''
i = iter(s)
for n in i:
w += n
if n == '\\':
try:
n2 = next(i)
except StopIteration:
raise GrammarError("Literal ended unexpectedly (bad escaping): `%r`" % s)
if n2 == '\\':
w += '\\\\'
elif n2 not in 'uxnftr':
w += '\\'
w += n2
w = w.replace('\\"', '"').replace("'", "\\'")
to_eval = "u'''%s'''" % w
try:
s = literal_eval(to_eval)
except SyntaxError as e:
raise GrammarError(s, e)
return s
def _literal_to_pattern(literal):
v = literal.value
flag_start = _rfind(v, '/"')+1
assert flag_start > 0
flags = v[flag_start:]
assert all(f in _RE_FLAGS for f in flags), flags
if literal.type == 'STRING' and '\n' in v:
raise GrammarError('You cannot put newlines in string literals')
if literal.type == 'REGEXP' and '\n' in v and 'x' not in flags:
raise GrammarError('You can only use newlines in regular expressions '
'with the `x` (verbose) flag')
v = v[:flag_start]
assert v[0] == v[-1] and v[0] in '"/'
x = v[1:-1]
s = eval_escaping(x)
if literal.type == 'STRING':
s = s.replace('\\\\', '\\')
return PatternStr(s, flags, raw=literal.value)
elif literal.type == 'REGEXP':
return PatternRE(s, flags, raw=literal.value)
else:
assert False, 'Invariant failed: literal.type not in ["STRING", "REGEXP"]'
@inline_args
class PrepareLiterals(Transformer_InPlace):
def literal(self, literal):
return ST('pattern', [_literal_to_pattern(literal)])
def range(self, start, end):
assert start.type == end.type == 'STRING'
start = start.value[1:-1]
end = end.value[1:-1]
assert len(eval_escaping(start)) == len(eval_escaping(end)) == 1
regexp = '[%s-%s]' % (start, end)
return ST('pattern', [PatternRE(regexp)])
def _make_joined_pattern(regexp, flags_set):
# In Python 3.6, a new syntax for flags was introduced, that allows us to restrict the scope
# of flags to a specific regexp group. We are already using it in `lexer.Pattern._get_flags`
# However, for prior Python versions, we still need to use global flags, so we have to make sure
# that there are no flag collisions when we merge several terminals.
flags = ()
if not Py36:
if len(flags_set) > 1:
raise GrammarError("Lark doesn't support joining terminals with conflicting flags in python <3.6!")
elif len(flags_set) == 1:
flags ,= flags_set
return PatternRE(regexp, flags)
class TerminalTreeToPattern(Transformer):
def pattern(self, ps):
p ,= ps
return p
def expansion(self, items):
assert items
if len(items) == 1:
return items[0]
pattern = ''.join(i.to_regexp() for i in items)
return _make_joined_pattern(pattern, {i.flags for i in items})
def expansions(self, exps):
if len(exps) == 1:
return exps[0]
pattern = '(?:%s)' % ('|'.join(i.to_regexp() for i in exps))
return _make_joined_pattern(pattern, {i.flags for i in exps})
def expr(self, args):
inner, op = args[:2]
if op == '~':
if len(args) == 3:
op = "{%d}" % int(args[2])
else:
mn, mx = map(int, args[2:])
if mx < mn:
raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (inner, mn, mx))
op = "{%d,%d}" % (mn, mx)
else:
assert len(args) == 2
return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags)
def maybe(self, expr):
return self.expr(expr + ['?'])
def alias(self, t):
raise GrammarError("Aliasing not allowed in terminals (You used -> in the wrong place)")
def value(self, v):
return v[0]
class PrepareSymbols(Transformer_InPlace):
def value(self, v):
v ,= v
if isinstance(v, Tree):
return v
elif v.type == 'RULE':
return NonTerminal(Str(v.value))
elif v.type == 'TERMINAL':
return Terminal(Str(v.value), filter_out=v.startswith('_'))
assert False
def nr_deepcopy_tree(t):
"""Deepcopy tree `t` without recursion"""
return Transformer_NonRecursive(False).transform(t)
class Grammar:
def __init__(self, rule_defs, term_defs, ignore):
self.term_defs = term_defs
self.rule_defs = rule_defs
self.ignore = ignore
def compile(self, start, terminals_to_keep):
# We change the trees in-place (to support huge grammars)
# So deepcopy allows calling compile more than once.
term_defs = deepcopy(list(self.term_defs))
rule_defs = [(n,p,nr_deepcopy_tree(t),o) for n,p,t,o in self.rule_defs]
# ===================
# Compile Terminals
# ===================
# Convert terminal-trees to strings/regexps
for name, (term_tree, priority) in term_defs:
if term_tree is None: # Terminal added through %declare
continue
expansions = list(term_tree.find_data('expansion'))
if len(expansions) == 1 and not expansions[0].children:
raise GrammarError("Terminals cannot be empty (%s)" % name)
transformer = PrepareLiterals() * TerminalTreeToPattern()
terminals = [TerminalDef(name, transformer.transform(term_tree), priority)
for name, (term_tree, priority) in term_defs if term_tree]
# =================
# Compile Rules
# =================
# 1. Pre-process terminals
anon_tokens_transf = PrepareAnonTerminals(terminals)
transformer = PrepareLiterals() * PrepareSymbols() * anon_tokens_transf # Adds to terminals
# 2. Inline Templates
transformer *= ApplyTemplates(rule_defs)
# 3. Convert EBNF to BNF (and apply step 1 & 2)
ebnf_to_bnf = EBNF_to_BNF()
rules = []
i = 0
while i < len(rule_defs): # We have to do it like this because rule_defs might grow due to templates
name, params, rule_tree, options = rule_defs[i]
i += 1
if len(params) != 0: # Dont transform templates
continue
rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None
ebnf_to_bnf.rule_options = rule_options
ebnf_to_bnf.prefix = name
anon_tokens_transf.rule_options = rule_options
tree = transformer.transform(rule_tree)
res = ebnf_to_bnf.transform(tree)
rules.append((name, res, options))
rules += ebnf_to_bnf.new_rules
assert len(rules) == len({name for name, _t, _o in rules}), "Whoops, name collision"
# 4. Compile tree to Rule objects
rule_tree_to_text = RuleTreeToText()
simplify_rule = SimplifyRule_Visitor()
compiled_rules = []
for rule_content in rules:
name, tree, options = rule_content
simplify_rule.visit(tree)
expansions = rule_tree_to_text.transform(tree)
for i, (expansion, alias) in enumerate(expansions):
if alias and name.startswith('_'):
raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)"% (name, alias))
empty_indices = [x==_EMPTY for x in expansion]
if any(empty_indices):
exp_options = copy(options) or RuleOptions()
exp_options.empty_indices = empty_indices
expansion = [x for x in expansion if x!=_EMPTY]
else:
exp_options = options
assert all(isinstance(x, Symbol) for x in expansion), expansion
rule = Rule(NonTerminal(name), expansion, i, alias, exp_options)
compiled_rules.append(rule)
# Remove duplicates of empty rules, throw error for non-empty duplicates
if len(set(compiled_rules)) != len(compiled_rules):
duplicates = classify(compiled_rules, lambda x: x)
for dups in duplicates.values():
if len(dups) > 1:
if dups[0].expansion:
raise GrammarError("Rules defined twice: %s\n\n(Might happen due to colliding expansion of optionals: [] or ?)"
% ''.join('\n * %s' % i for i in dups))
# Empty rule; assert all other attributes are equal
assert len({(r.alias, r.order, r.options) for r in dups}) == len(dups)
# Remove duplicates
compiled_rules = list(set(compiled_rules))
# Filter out unused rules
while True:
c = len(compiled_rules)
used_rules = {s for r in compiled_rules
for s in r.expansion
if isinstance(s, NonTerminal)
and s != r.origin}
used_rules |= {NonTerminal(s) for s in start}
compiled_rules, unused = classify_bool(compiled_rules, lambda r: r.origin in used_rules)
for r in unused:
logger.debug("Unused rule: %s", r)
if len(compiled_rules) == c:
break
# Filter out unused terminals
used_terms = {t.name for r in compiled_rules
for t in r.expansion
if isinstance(t, Terminal)}
terminals, unused = classify_bool(terminals, lambda t: t.name in used_terms or t.name in self.ignore or t.name in terminals_to_keep)
if unused:
logger.debug("Unused terminals: %s", [t.name for t in unused])
return terminals, compiled_rules, self.ignore
PackageResource = namedtuple('PackageResource', 'pkg_name path')
class FromPackageLoader(object):
"""
Provides a simple way of creating custom import loaders that load from packages via ``pkgutil.get_data`` instead of using `open`.
This allows them to be compatible even from within zip files.
Relative imports are handled, so you can just freely use them.
pkg_name: The name of the package. You can probably provide `__name__` most of the time
search_paths: All the path that will be search on absolute imports.
"""
def __init__(self, pkg_name, search_paths=("", )):
self.pkg_name = pkg_name
self.search_paths = search_paths
def __repr__(self):
return "%s(%r, %r)" % (type(self).__name__, self.pkg_name, self.search_paths)
def __call__(self, base_path, grammar_path):
if base_path is None:
to_try = self.search_paths
else:
# Check whether or not the importing grammar was loaded by this module.
if not isinstance(base_path, PackageResource) or base_path.pkg_name != self.pkg_name:
# Technically false, but FileNotFound doesn't exist in python2.7, and this message should never reach the end user anyway
raise IOError()
to_try = [base_path.path]
for path in to_try:
full_path = os.path.join(path, grammar_path)
try:
text = pkgutil.get_data(self.pkg_name, full_path)
except IOError:
continue
else:
return PackageResource(self.pkg_name, full_path), text.decode()
raise IOError()
stdlib_loader = FromPackageLoader('lark', IMPORT_PATHS)
def resolve_term_references(term_dict):
# TODO Solve with transitive closure (maybe)
while True:
changed = False
for name, token_tree in term_dict.items():
if token_tree is None: # Terminal added through %declare
continue
for exp in token_tree.find_data('value'):
item ,= exp.children
if isinstance(item, Token):
if item.type == 'RULE':
raise GrammarError("Rules aren't allowed inside terminals (%s in %s)" % (item, name))
if item.type == 'TERMINAL':
try:
term_value = term_dict[item]
except KeyError:
raise GrammarError("Terminal used but not defined: %s" % item)
assert term_value is not None
exp.children[0] = term_value
changed = True
if not changed:
break
for name, term in term_dict.items():
if term: # Not just declared
for child in term.children:
ids = [id(x) for x in child.iter_subtrees()]
if id(term) in ids:
raise GrammarError("Recursion in terminal '%s' (recursion is only allowed in rules, not terminals)" % name)
def options_from_rule(name, params, *x):
if len(x) > 1:
priority, expansions = x
priority = int(priority)
else:
expansions ,= x
priority = None
params = [t.value for t in params.children] if params is not None else [] # For the grammar parser
keep_all_tokens = name.startswith('!')
name = name.lstrip('!')
expand1 = name.startswith('?')
name = name.lstrip('?')
return name, params, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority,
template_source=(name if params else None))
def symbols_from_strcase(expansion):
return [Terminal(x, filter_out=x.startswith('_')) if x.isupper() else NonTerminal(x) for x in expansion]
@inline_args
class PrepareGrammar(Transformer_InPlace):
def terminal(self, name):
return name
def nonterminal(self, name):
return name
def _find_used_symbols(tree):
assert tree.data == 'expansions'
return {t for x in tree.find_data('expansion')
for t in x.scan_values(lambda t: t.type in ('RULE', 'TERMINAL'))}
def _get_parser():
try:
return _get_parser.cache
except AttributeError:
terminals = [TerminalDef(name, PatternRE(value)) for name, value in TERMINALS.items()]
rules = [options_from_rule(name, None, x) for name, x in RULES.items()]
rules = [Rule(NonTerminal(r), symbols_from_strcase(x.split()), i, None, o)
for r, _p, xs, o in rules for i, x in enumerate(xs)]
callback = ParseTreeBuilder(rules, ST).create_callback()
import re
lexer_conf = LexerConf(terminals, re, ['WS', 'COMMENT'])
parser_conf = ParserConf(rules, callback, ['start'])
lexer_conf.lexer_type = 'standard'
parser_conf.parser_type = 'lalr'
_get_parser.cache = ParsingFrontend(lexer_conf, parser_conf, {})
return _get_parser.cache
GRAMMAR_ERRORS = [
('Incorrect type of value', ['a: 1\n']),
('Unclosed parenthesis', ['a: (\n']),
('Unmatched closing parenthesis', ['a: )\n', 'a: [)\n', 'a: (]\n']),
('Expecting rule or terminal definition (missing colon)', ['a\n', 'A\n', 'a->\n', 'A->\n', 'a A\n']),
('Illegal name for rules or terminals', ['Aa:\n']),
('Alias expects lowercase name', ['a: -> "a"\n']),
('Unexpected colon', ['a::\n', 'a: b:\n', 'a: B:\n', 'a: "a":\n']),
('Misplaced operator', ['a: b??', 'a: b(?)', 'a:+\n', 'a:?\n', 'a:*\n', 'a:|*\n']),
('Expecting option ("|") or a new rule or terminal definition', ['a:a\n()\n']),
('Terminal names cannot contain dots', ['A.B\n']),
('Expecting rule or terminal definition', ['"a"\n']),
('%import expects a name', ['%import "a"\n']),
('%ignore expects a value', ['%ignore %import\n']),
]
def _translate_parser_exception(parse, e):
error = e.match_examples(parse, GRAMMAR_ERRORS, use_accepts=True)
if error:
return error
elif 'STRING' in e.expected:
return "Expecting a value"
def _parse_grammar(text, name, start='start'):
try:
tree = _get_parser().parse(text + '\n', start)
except UnexpectedCharacters as e:
context = e.get_context(text)
raise GrammarError("Unexpected input at line %d column %d in %s: \n\n%s" %
(e.line, e.column, name, context))
except UnexpectedToken as e:
context = e.get_context(text)
error = _translate_parser_exception(_get_parser().parse, e)
if error:
raise GrammarError("%s, at line %s column %s\n\n%s" % (error, e.line, e.column, context))
raise
return PrepareGrammar().transform(tree)
def _error_repr(error):
if isinstance(error, UnexpectedToken):
error2 = _translate_parser_exception(_get_parser().parse, error)
if error2:
return error2
expected = ', '.join(error.accepts or error.expected)
return "Unexpected token %r. Expected one of: {%s}" % (str(error.token), expected)
else:
return str(error)
def _search_interactive_parser(interactive_parser, predicate):
def expand(node):
path, p = node
for choice in p.choices():
t = Token(choice, '')
try:
new_p = p.feed_token(t)
except ParseError: # Illegal
pass
else:
yield path + (choice,), new_p
for path, p in bfs_all_unique([((), interactive_parser)], expand):
if predicate(p):
return path, p
def find_grammar_errors(text, start='start'):
errors = []
def on_error(e):
errors.append((e, _error_repr(e)))
# recover to a new line
token_path, _ = _search_interactive_parser(e.interactive_parser.as_immutable(), lambda p: '_NL' in p.choices())
for token_type in token_path:
e.interactive_parser.feed_token(Token(token_type, ''))
e.interactive_parser.feed_token(Token('_NL', '\n'))
return True
_tree = _get_parser().parse(text + '\n', start, on_error=on_error)
errors_by_line = classify(errors, lambda e: e[0].line)
errors = [el[0] for el in errors_by_line.values()] # already sorted
for e in errors:
e[0].interactive_parser = None
return errors
def _get_mangle(prefix, aliases, base_mangle=None):
def mangle(s):
if s in aliases:
s = aliases[s]
else:
if s[0] == '_':
s = '_%s__%s' % (prefix, s[1:])
else:
s = '%s__%s' % (prefix, s)
if base_mangle is not None:
s = base_mangle(s)
return s
return mangle
def _mangle_exp(exp, mangle):
if mangle is None:
return exp
exp = deepcopy(exp) # TODO: is this needed
for t in exp.iter_subtrees():
for i, c in enumerate(t.children):
if isinstance(c, Token) and c.type in ('RULE', 'TERMINAL'):
t.children[i] = Token(c.type, mangle(c.value))
return exp
class GrammarBuilder:
def __init__(self, global_keep_all_tokens=False, import_paths=None, used_files=None):
self.global_keep_all_tokens = global_keep_all_tokens
self.import_paths = import_paths or []
self.used_files = used_files or {}
self._definitions = {}
self._ignore_names = []
def _is_term(self, name):
# Imported terminals are of the form `Path__to__Grammar__file__TERMINAL_NAME`
# Only the last part is the actual name, and the rest might contain mixed case
return name.rpartition('__')[-1].isupper()
def _grammar_error(self, msg, *names):
args = {}
for i, name in enumerate(names, start=1):
postfix = '' if i == 1 else str(i)
args['name' + postfix] = name
args['type' + postfix] = lowercase_type = ("rule", "terminal")[self._is_term(name)]
args['Type' + postfix] = lowercase_type.title()
raise GrammarError(msg.format(**args))
def _check_options(self, name, options):
if self._is_term(name):
if options is None:
options = 1
# if we don't use Integral here, we run into python2.7/python3 problems with long vs int
elif not isinstance(options, Integral):
raise GrammarError("Terminal require a single int as 'options' (e.g. priority), got %s" % (type(options),))
else:
if options is None:
options = RuleOptions()
elif not isinstance(options, RuleOptions):
raise GrammarError("Rules require a RuleOptions instance as 'options'")
if self.global_keep_all_tokens:
options.keep_all_tokens = True
return options
def _define(self, name, exp, params=(), options=None, override=False):
if name in self._definitions:
if not override:
self._grammar_error("{Type} '{name}' defined more than once", name)
elif override:
self._grammar_error("Cannot override a nonexisting {type} {name}", name)
if name.startswith('__'):
self._grammar_error('Names starting with double-underscore are reserved (Error at {name})', name)
self._definitions[name] = (params, exp, self._check_options(name, options))
def _extend(self, name, exp, params=(), options=None):
if name not in self._definitions:
self._grammar_error("Can't extend {type} {name} as it wasn't defined before", name)
if tuple(params) != tuple(self._definitions[name][0]):
self._grammar_error("Cannot extend {type} with different parameters: {name}", name)
# TODO: think about what to do with 'options'
base = self._definitions[name][1]
while len(base.children) == 2:
assert isinstance(base.children[0], Tree) and base.children[0].data == 'expansions', base
base = base.children[0]
base.children.insert(0, exp)
def _ignore(self, exp_or_name):
if isinstance(exp_or_name, str):
self._ignore_names.append(exp_or_name)
else:
assert isinstance(exp_or_name, Tree)
t = exp_or_name
if t.data == 'expansions' and len(t.children) == 1:
t2 ,= t.children
if t2.data=='expansion' and len(t2.children) == 1:
item ,= t2.children
if item.data == 'value':
item ,= item.children
if isinstance(item, Token) and item.type == 'TERMINAL':
self._ignore_names.append(item.value)
return
name = '__IGNORE_%d'% len(self._ignore_names)
self._ignore_names.append(name)
self._definitions[name] = ((), t, 1)
def _declare(self, *names):
for name in names:
self._define(name, None)
def _unpack_import(self, stmt, grammar_name):
if len(stmt.children) > 1:
path_node, arg1 = stmt.children
else:
path_node, = stmt.children
arg1 = None
if isinstance(arg1, Tree): # Multi import
dotted_path = tuple(path_node.children)
names = arg1.children
aliases = dict(zip(names, names)) # Can't have aliased multi import, so all aliases will be the same as names
else: # Single import
dotted_path = tuple(path_node.children[:-1])
if not dotted_path:
name ,= path_node.children
raise GrammarError("Nothing was imported from grammar `%s`" % name)
name = path_node.children[-1] # Get name from dotted path
aliases = {name.value: (arg1 or name).value} # Aliases if exist
if path_node.data == 'import_lib': # Import from library
base_path = None
else: # Relative import
if grammar_name == '<string>': # Import relative to script file path if grammar is coded in script
try:
base_file = os.path.abspath(sys.modules['__main__'].__file__)
except AttributeError:
base_file = None
else:
base_file = grammar_name # Import relative to grammar file path if external grammar file
if base_file:
if isinstance(base_file, PackageResource):
base_path = PackageResource(base_file.pkg_name, os.path.split(base_file.path)[0])
else:
base_path = os.path.split(base_file)[0]
else:
base_path = os.path.abspath(os.path.curdir)
return dotted_path, base_path, aliases
def _unpack_definition(self, tree, mangle):
if tree.data == 'rule':
name, params, exp, opts = options_from_rule(*tree.children)
else:
name = tree.children[0].value
params = () # TODO terminal templates
opts = int(tree.children[1]) if len(tree.children) == 3 else 1 # priority
exp = tree.children[-1]
if mangle is not None:
params = tuple(mangle(p) for p in params)
name = mangle(name)
exp = _mangle_exp(exp, mangle)
return name, exp, params, opts
def load_grammar(self, grammar_text, grammar_name="<?>", mangle=None):
tree = _parse_grammar(grammar_text, grammar_name)
imports = {}
for stmt in tree.children:
if stmt.data == 'import':
dotted_path, base_path, aliases = self._unpack_import(stmt, grammar_name)
try:
import_base_path, import_aliases = imports[dotted_path]
assert base_path == import_base_path, 'Inconsistent base_path for %s.' % '.'.join(dotted_path)
import_aliases.update(aliases)
except KeyError:
imports[dotted_path] = base_path, aliases
for dotted_path, (base_path, aliases) in imports.items():
self.do_import(dotted_path, base_path, aliases, mangle)
for stmt in tree.children:
if stmt.data in ('term', 'rule'):
self._define(*self._unpack_definition(stmt, mangle))
elif stmt.data == 'override':
r ,= stmt.children
self._define(*self._unpack_definition(r, mangle), override=True)
elif stmt.data == 'extend':
r ,= stmt.children
self._extend(*self._unpack_definition(r, mangle))
elif stmt.data == 'ignore':
# if mangle is not None, we shouldn't apply ignore, since we aren't in a toplevel grammar
if mangle is None:
self._ignore(*stmt.children)
elif stmt.data == 'declare':
names = [t.value for t in stmt.children]
if mangle is None:
self._declare(*names)
else:
self._declare(*map(mangle, names))
elif stmt.data == 'import':
pass
else:
assert False, stmt
term_defs = { name: exp
for name, (_params, exp, _options) in self._definitions.items()
if self._is_term(name)
}
resolve_term_references(term_defs)
def _remove_unused(self, used):
def rule_dependencies(symbol):
if self._is_term(symbol):
return []
try:
params, tree,_ = self._definitions[symbol]
except KeyError:
return []
return _find_used_symbols(tree) - set(params)
_used = set(bfs(used, rule_dependencies))
self._definitions = {k: v for k, v in self._definitions.items() if k in _used}
def do_import(self, dotted_path, base_path, aliases, base_mangle=None):
assert dotted_path
mangle = _get_mangle('__'.join(dotted_path), aliases, base_mangle)
grammar_path = os.path.join(*dotted_path) + EXT
to_try = self.import_paths + ([base_path] if base_path is not None else []) + [stdlib_loader]
for source in to_try:
try:
if callable(source):
joined_path, text = source(base_path, grammar_path)
else:
joined_path = os.path.join(source, grammar_path)
with open(joined_path, encoding='utf8') as f:
text = f.read()
except IOError:
continue
else:
h = hashlib.md5(text.encode('utf8')).hexdigest()
if self.used_files.get(joined_path, h) != h:
raise RuntimeError("Grammar file was changed during importing")
self.used_files[joined_path] = h
gb = GrammarBuilder(self.global_keep_all_tokens, self.import_paths, self.used_files)
gb.load_grammar(text, joined_path, mangle)
gb._remove_unused(map(mangle, aliases))
for name in gb._definitions:
if name in self._definitions:
raise GrammarError("Cannot import '%s' from '%s': Symbol already defined." % (name, grammar_path))
self._definitions.update(**gb._definitions)
break
else:
# Search failed. Make Python throw a nice error.
open(grammar_path, encoding='utf8')
assert False, "Couldn't import grammar %s, but a corresponding file was found at a place where lark doesn't search for it" % (dotted_path,)
def validate(self):
for name, (params, exp, _options) in self._definitions.items():
for i, p in enumerate(params):
if p in self._definitions:
raise GrammarError("Template Parameter conflicts with rule %s (in template %s)" % (p, name))
if p in params[:i]:
raise GrammarError("Duplicate Template Parameter %s (in template %s)" % (p, name))
if exp is None: # Remaining checks don't apply to abstract rules/terminals
continue
for temp in exp.find_data('template_usage'):
sym = temp.children[0]
args = temp.children[1:]
if sym not in params:
if sym not in self._definitions:
self._grammar_error("Template '%s' used but not defined (in {type} {name})" % sym, name)
if len(args) != len(self._definitions[sym][0]):
expected, actual = len(self._definitions[sym][0]), len(args)
self._grammar_error("Wrong number of template arguments used for {name} "
"(expected %s, got %s) (in {type2} {name2})" % (expected, actual), sym, name)
for sym in _find_used_symbols(exp):
if sym not in self._definitions and sym not in params:
self._grammar_error("{Type} '{name}' used but not defined (in {type2} {name2})", sym, name)
if not set(self._definitions).issuperset(self._ignore_names):
raise GrammarError("Terminals %s were marked to ignore but were not defined!" % (set(self._ignore_names) - set(self._definitions)))
def build(self):
self.validate()
rule_defs = []
term_defs = []
for name, (params, exp, options) in self._definitions.items():
if self._is_term(name):
assert len(params) == 0
term_defs.append((name, (exp, options)))
else:
rule_defs.append((name, params, exp, options))
# resolve_term_references(term_defs)
return Grammar(rule_defs, term_defs, self._ignore_names)
def verify_used_files(file_hashes):
for path, old in file_hashes.items():
text = None
if isinstance(path, str) and os.path.exists(path):
with open(path, encoding='utf8') as f:
text = f.read()
elif isinstance(path, PackageResource):
with suppress(IOError):
text = pkgutil.get_data(*path).decode('utf-8')
if text is None: # We don't know how to load the path. ignore it.
continue
current = hashlib.md5(text.encode()).hexdigest()
if old != current:
logger.info("File %r changed, rebuilding Parser" % path)
return False
return True
def load_grammar(grammar, source, import_paths, global_keep_all_tokens):
builder = GrammarBuilder(global_keep_all_tokens, import_paths)
builder.load_grammar(grammar, source)
return builder.build(), builder.used_files
|
en
| 0.810385
|
Parses and creates Grammar objects # Grammar Parser # a : b c+ d # --> # a : b _c d # _c : _c c | c; # a : b c* d # --> # a : b _c? d # _c : _c c | c; # rules_list unpacking # a : b (c|d) e # --> # a : b c e | b d e # # In AST terms: # expansion(b, expansions(c, d), e) # --> # expansions( expansion(b, c, e), expansion(b, d, e) ) # Ensure all children are unique # dedup is expensive, so try to minimize its use # Double alias not allowed Create a unique list of anonymous terminals. Attempt to give meaningful names to them when we add them # If already defined, use the user-defined terminal name # Try to assign an indicative anon-terminal name # Kind of a weird placement.name Helper for ApplyTemplates Apply the templates, creating new rules that represent the used templates %s # In Python 3.6, a new syntax for flags was introduced, that allows us to restrict the scope # of flags to a specific regexp group. We are already using it in `lexer.Pattern._get_flags` # However, for prior Python versions, we still need to use global flags, so we have to make sure # that there are no flag collisions when we merge several terminals. Deepcopy tree `t` without recursion # We change the trees in-place (to support huge grammars) # So deepcopy allows calling compile more than once. # =================== # Compile Terminals # =================== # Convert terminal-trees to strings/regexps # Terminal added through %declare # ================= # Compile Rules # ================= # 1. Pre-process terminals # Adds to terminals # 2. Inline Templates # 3. Convert EBNF to BNF (and apply step 1 & 2) # We have to do it like this because rule_defs might grow due to templates # Dont transform templates # 4. Compile tree to Rule objects # Remove duplicates of empty rules, throw error for non-empty duplicates # Empty rule; assert all other attributes are equal # Remove duplicates # Filter out unused rules # Filter out unused terminals Provides a simple way of creating custom import loaders that load from packages via ``pkgutil.get_data`` instead of using `open`. This allows them to be compatible even from within zip files. Relative imports are handled, so you can just freely use them. pkg_name: The name of the package. You can probably provide `__name__` most of the time search_paths: All the path that will be search on absolute imports. # Check whether or not the importing grammar was loaded by this module. # Technically false, but FileNotFound doesn't exist in python2.7, and this message should never reach the end user anyway # TODO Solve with transitive closure (maybe) # Terminal added through %declare # Not just declared # For the grammar parser # Illegal # recover to a new line # already sorted # TODO: is this needed # Imported terminals are of the form `Path__to__Grammar__file__TERMINAL_NAME` # Only the last part is the actual name, and the rest might contain mixed case # if we don't use Integral here, we run into python2.7/python3 problems with long vs int # TODO: think about what to do with 'options' # Multi import # Can't have aliased multi import, so all aliases will be the same as names # Single import # Get name from dotted path # Aliases if exist # Import from library # Relative import # Import relative to script file path if grammar is coded in script # Import relative to grammar file path if external grammar file # TODO terminal templates # priority # if mangle is not None, we shouldn't apply ignore, since we aren't in a toplevel grammar # Search failed. Make Python throw a nice error. # Remaining checks don't apply to abstract rules/terminals # resolve_term_references(term_defs) # We don't know how to load the path. ignore it.
| 2.373142
| 2
|
tricks and tips/multi args function.py
|
DharaniAnche/python-programs
| 1
|
6626191
|
<filename>tricks and tips/multi args function.py
#function that takes as many inputs as users wants
#user desired number of arguments - multi-args
def addition(*num):
#normal way
res=0
for i in num:
res=res+i
return res
def add(*num):
#using list_comprehension and walrus operator
total = 0
return [total := total + x for x in num].pop()#returns last element in a list
if __name__=='__main__':
print(f'by 1st method {addition(1,2,4,5,6,7,8,9,0,10)}')
print(f'by 2nd method {add(1,2)}')
|
<filename>tricks and tips/multi args function.py
#function that takes as many inputs as users wants
#user desired number of arguments - multi-args
def addition(*num):
#normal way
res=0
for i in num:
res=res+i
return res
def add(*num):
#using list_comprehension and walrus operator
total = 0
return [total := total + x for x in num].pop()#returns last element in a list
if __name__=='__main__':
print(f'by 1st method {addition(1,2,4,5,6,7,8,9,0,10)}')
print(f'by 2nd method {add(1,2)}')
|
en
| 0.788733
|
#function that takes as many inputs as users wants #user desired number of arguments - multi-args #normal way #using list_comprehension and walrus operator #returns last element in a list
| 4.346739
| 4
|
packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/PhactoriSampledCellInfo.py
|
tokusanya/seacas
| 0
|
6626192
|
<reponame>tokusanya/seacas
# Copyright(C) 1999-2020 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of NTESS nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from phactori import *
from .PhactoriVectorLibrary import *
from paraview.simple import *
from .PhactoriSampledCellInfo import *
import vtk
import json
from .PhactoriParaviewMultiBlockRecursion import *
#phactori_combine_to_single_python_file_subpiece_begin_1
def GetCellTestPoint(theCell):
theCellBounds = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
theCell.GetBounds(theCellBounds)
retXyz = [0.5 * (theCellBounds[0] + theCellBounds[1]),
0.5 * (theCellBounds[2] + theCellBounds[3]),
0.5 * (theCellBounds[4] + theCellBounds[5])]
return retXyz
def localGetCellijk(ii, jj, kk, inInputCsData, myExtent):
#returnCell = inInputCsData.GetCell(ii, jj, kk)
computedIndex = vtk.vtkStructuredData.ComputeCellIdForExtent(myExtent, [ii,jj,kk])
returnCell = inInputCsData.GetCell(computedIndex)
return returnCell
class PhactoriSampledCellInfo:
def __init__(self):
self.cellTestPoint = [0.0,0.0,0.0]
self.pid = -1
self.leafVisitCount = -1
self.ijk = [-1,-1,-1]
self.dataTuple = []
self.index = -1
self.segmentIndex = -1
self.collectionAxis = 2
def ToStr(self):
outStr = "PhactoriSampledCellInfo:\n" +\
"cellTestPoint: " + str(self.cellTestPoint) + "\n" +\
"pid: " + str(self.pid) + "\n" +\
"leafVisitCount: " + str(self.leafVisitCount) + "\n" +\
"ijk: " + str(self.ijk) + "\n" +\
"index: " + str(self.index) + "\n" +\
"segmentIndex: " + str(self.segmentIndex) + "\n" +\
"collectionAxis: " + str(self.collectionAxis) + "\n" +\
"dataTuple: " + str(self.dataTuple) + "\n"
return outStr
def SetFromList(self, newValues):
tripletVal = newValues[0]
self.cellTestPoint[0] = tripletVal[0]
self.cellTestPoint[1] = tripletVal[1]
self.cellTestPoint[2] = tripletVal[2]
tripletVal = newValues[1]
self.ijk[0] = tripletVal[0]
self.ijk[1] = tripletVal[1]
self.ijk[2] = tripletVal[2]
if newValues[2] == None:
self.dataTuple = None
else:
self.dataTuple = list(newValues[2])
self.pid = newValues[3]
self.leafVisitCount = newValues[4]
self.index = newValues[5]
self.segmentIndex = newValues[6]
self.collectionAxis = newValues[7]
def GetAsList(self):
return [self.cellTestPoint, self.ijk, self.dataTuple, self.pid, self.leafVisitCount, self.index, self.segmentIndex, self.collectionAxis]
def ToStrTerseOneLineList(self):
return str(self.GetAsList())
#outStr = "[" + \
# str(self.cellTestPoint) + "," + \
# str(self.ijk) + "," + \
# str(self.dataTuple) + "," + \
# str(self.pid) + "," + \
# str(self.index) + "," + \
# str(self.segmentIndex) + "," + \
# str(self.collectionAxis) + \
# "]"
#return outStr
@staticmethod
def TerseOneLineJsonFormatComment():
outStr = '{"PhactoriSampledCellInfo output format 1 info":[\n' + \
'" [cellTestPoint, ijk, dataTuple, pid, index, segmentIndex, collectionAxis]",\n' + \
'" cellTestPoint is [X, Y, Z], ijk is [i, j, k], dataTuple is [c1, c2, ... cN]"]}'
return outStr
def SetIndex(self, newIndex):
self.index = newIndex
def SetCollectionAxis(self, newCollectionAxis):
self.collectionAxis = newCollectionAxis
def AxisCrossesExtent(self, myExtent, whichAxis):
# i -> 0
# j -> 1
# k -> 2
# ij -> 3
# ik -> 4
# jk -> 5
if whichAxis < 3:
if whichAxis != 0:
if self.ijk[0] < myExtent[0]:
return False
if self.ijk[0] >= myExtent[1]:
return False
if whichAxis != 1:
if self.ijk[1] < myExtent[2]:
return False
if self.ijk[1] >= myExtent[3]:
return False
if whichAxis != 2:
if self.ijk[2] < myExtent[4]:
return False
if self.ijk[2] >= myExtent[5]:
return False
else:
if whichAxis == 3:
if self.ijk[2] < myExtent[4]:
return False
if self.ijk[2] >= myExtent[5]:
return False
if whichAxis == 4:
if self.ijk[1] < myExtent[2]:
return False
if self.ijk[1] >= myExtent[3]:
return False
if whichAxis == 5:
if self.ijk[0] < myExtent[0]:
return False
if self.ijk[0] >= myExtent[1]:
return False
return True
def GetIntersectingCollectionExtent(self, whichExtent, whichAxis):
# i -> 0
# j -> 1
# k -> 2
# ij -> 3
# ik -> 4
# jk -> 5
retext = list(whichExtent)
if self.AxisCrossesExtent(whichExtent, whichAxis) == False:
retExt[1] = retExt[0]
retExt[3] = retExt[2]
retExt[5] = retExt[4]
return retExt
if (whichAxis != 0) and (whichAxis != 3) and (whichAxis != 4):
retext[0] = self.ijk[0]
retext[1] = self.ijk[0] + 1
if (whichAxis != 1) and (whichAxis != 3) and (whichAxis != 5):
retext[2] = self.ijk[1]
retext[3] = self.ijk[1] + 1
if (whichAxis != 2) and (whichAxis != 4) and (whichAxis != 5):
retext[4] = self.ijk[2]
retext[5] = self.ijk[2] + 1
return retext
def SetCellTestPoint(self, inPt):
self.cellTestPoint[0] = inPt[0]
self.cellTestPoint[1] = inPt[1]
self.cellTestPoint[2] = inPt[2]
def SetIjk(self, ii, jj, kk):
self.ijk[0] = ii
self.ijk[1] = jj
self.ijk[2] = kk
def SetDataTuple(self, inDataTuple):
self.dataTuple = list(inDataTuple)
def SerializeAppendToFloatAndIntArray(self, outSerialFloatArray, outSerialIntArray, inTupleSize = 0):
outSerialFloatArray.append(self.cellTestPoint[0])
outSerialFloatArray.append(self.cellTestPoint[1])
outSerialFloatArray.append(self.cellTestPoint[2])
if self.dataTuple != None:
localTupleLen = len(self.dataTuple)
for ii in range(0,inTupleSize):
if ii >= localTupleLen:
outSerialFloatArray.append(0.0)
else:
outSerialFloatArray.append(self.dataTuple[ii])
else:
for ii in range(0,inTupleSize):
outSerialFloatArray.append(0.0)
outSerialIntArray.append(self.pid)
outSerialIntArray.append(self.leafVisitCount)
outSerialIntArray.append(self.ijk[0])
outSerialIntArray.append(self.ijk[1])
outSerialIntArray.append(self.ijk[2])
outSerialIntArray.append(self.segmentIndex)
def SerializeAppendToFloatAndIntArrayZeroVersion(self, outSerialFloatArray, outSerialIntArray, inTupleSize = -1):
outSerialFloatArray.append(0.0)
outSerialFloatArray.append(0.0)
outSerialFloatArray.append(0.0)
if inTupleSize < 0:
for ffval in self.dataTuple:
outSerialFloatArray.append(0.0)
else:
for ffval in range(0, inTupleSize):
outSerialFloatArray.append(0.0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
@staticmethod
def GetSerializeFloatAndIntSize(tupleSize = 0):
floatsize = 3 + tupleSize
intsize = 6
return floatsize, intsize
def SerializeSetFromFloatAndIntArray(self, inSerialFloatArray, inSerialIntArray, inIndex, inTupleSize = 0):
#floatsize, intsize = GetSerializeFloatAndIntSize()
floatsize = 3 + inTupleSize
intsize = 6
floatIndex = floatsize * inIndex
intIndex = intsize * inIndex
self.cellTestPoint[0] = inSerialFloatArray[floatIndex+0]
self.cellTestPoint[1] = inSerialFloatArray[floatIndex+1]
self.cellTestPoint[2] = inSerialFloatArray[floatIndex+2]
self.dataTuple = []
if inTupleSize > 0:
for ii in range(0, inTupleSize):
self.dataTuple.append(inSerialFloatArray[floatIndex+3+ii])
self.pid = inSerialIntArray[intIndex + 0]
self.leafVisitCount = inSerialIntArray[intIndex + 1]
self.ijk[0] = inSerialIntArray[intIndex + 2]
self.ijk[1] = inSerialIntArray[intIndex + 3]
self.ijk[2] = inSerialIntArray[intIndex + 4]
self.segmentIndex = inSerialIntArray[intIndex + 5]
def Populate1(self, ii, jj, kk, myPid, leafVisitCount, seedCellIndex,
inInputCsData, myExtent, numCells, outputCellArray, dataArrayNumCmpnts, defaultTuple):
#used during recursion to set up the cell based on the ii, jj, kk indices
self.SetIjk(ii,jj,kk)
self.pid = myPid
self.leafVisitCount = leafVisitCount
self.segmentIndex = seedCellIndex
#dataCell = inInputCsData.GetCell(ii, jj, kk)
dataCell = localGetCellijk(ii, jj, kk, inInputCsData, myExtent)
cellTestPoint = GetCellTestPoint(dataCell)
self.SetCellTestPoint(cellTestPoint)
cellId = vtk.vtkStructuredData.ComputeCellIdForExtent(myExtent, [ii,jj,kk])
self.SetIndex(cellId)
if dataArrayNumCmpnts > 0:
if (cellId < numCells) and (cellId >= 0):
dataTuple = outputCellArray.GetTuple(cellId)
self.SetDataTuple(dataTuple)
else:
self.SetDataTuple(defaultTuple)
class PhactoriFindCellWithMinMaxDataOnThisProcessRecursionParams:
def __init__(self):
self.leafVisitCount = 0
self.dataArrayName = "noname"
self.minCell = sys.float_info.max
self.maxCell = None
self.dataTotal = 0.0
self.dataCount = 0
self.currentMinVal = sys.float_info.max
self.currentMaxVal = -sys.float_info.max
def SetUpForRecursion(self, cellDataArrayName):
self.dataTotal = 0.0
self.dataCount = 0
self.currentMinVal = sys.float_info.max
self.currentMaxVal = -sys.float_info.max
self.dataArrayName = cellDataArrayName
self.minCell = PhactoriSampledCellInfo()
self.minCell.SetFromList([[0.0,0.0,0.0], [-1,-1,-1], [self.currentMinVal], -1, -1, -1, -1, -1])
self.maxCell = PhactoriSampledCellInfo()
self.maxCell.SetFromList([[0.0,0.0,0.0], [-1,-1,-1], [self.currentMaxVal], -1, -1, -1, -1, -1])
def PhactoriFindCellWithMinMaxDataOnThisProcessInBlock(recursionObject, inInputCsData, inParameters):
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock entered\n")
inParameters.leafVisitCount += 1
numCells = inInputCsData.GetNumberOfCells()
numPoints = inInputCsData.GetNumberOfPoints()
if (numCells == 0) or (numPoints == 0):
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock returning 1\n")
#no cells here
return
cellData = inInputCsData.GetCellData()
if cellData == None:
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock returning 2\n")
outputCellArray = None
outputCellArray = cellData.GetArray(inParameters.dataArrayName)
if outputCellArray == None:
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock returning 3\n")
myPid = SmartGetLocalProcessId()
for cellIndex in range(0,numCells):
thisCellDataTuple = outputCellArray.GetTuple(cellIndex)
cellDataVal = thisCellDataTuple[0]
inParameters.dataTotal += cellDataVal
inParameters.dataCount += 1
if cellDataVal < inParameters.currentMinVal:
inParameters.currentMinVal = cellDataVal
oneCell = inInputCsData.GetCell(cellIndex)
cellTestPoint = GetCellTestPoint(oneCell)
inParameters.minCell.index = cellIndex
inParameters.minCell.dataTuple[0] = cellDataVal
inParameters.minCell.cellTestPoint[0] = cellTestPoint[0]
inParameters.minCell.cellTestPoint[1] = cellTestPoint[1]
inParameters.minCell.cellTestPoint[2] = cellTestPoint[2]
inParameters.minCell.pid = myPid
inParameters.leafVisitCount = inParameters.leafVisitCount
if cellDataVal > inParameters.currentMaxVal:
inParameters.currentMaxVal = cellDataVal
oneCell = inInputCsData.GetCell(cellIndex)
cellTestPoint = GetCellTestPoint(oneCell)
inParameters.currentMinVal = cellDataVal
cellTestPoint = GetCellTestPoint(oneCell)
inParameters.maxCell.index = cellIndex
inParameters.maxCell.dataTuple[0] = cellDataVal
inParameters.maxCell.cellTestPoint[0] = cellTestPoint[0]
inParameters.maxCell.cellTestPoint[1] = cellTestPoint[1]
inParameters.maxCell.cellTestPoint[2] = cellTestPoint[2]
inParameters.maxCell.pid = myPid
inParameters.leafVisitCount = inParameters.leafVisitCount
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock returning\n")
def PhactoriFindCellWithMinMaxDataOnThisProcess(paraviewFilter, cellDataArrayName):
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcess entered\n" + \
"paraviewFilter: " + str(paraviewFilter) + "\n")
recursionParams = PhactoriFindCellWithMinMaxDataOnThisProcessRecursionParams()
recursionParams.SetUpForRecursion(cellDataArrayName)
recursionObj = PhactoriParaviewMultiBlockRecursionControl()
recursionObj.mParameters = recursionParams
recursionObj.mOperationToDoPerBlock = PhactoriFindCellWithMinMaxDataOnThisProcessInBlock
PhactoriRecusivelyDoMethodPerBlockFromParaViewFilter(recursionObj, paraviewFilter)
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcess returning\n")
return [recursionParams.minCell, recursionParams.maxCell]
def PhactoriFindCellWtihMinMaxDataUsingMPI(paraviewFilter, cellDataArrayName):
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWtihMinMaxDataUsingMPI entered\n" + \
"paraviewFilter: " + str(paraviewFilter) + "\n")
thisPidMinMaxCells = PhactoriFindCellWithMinMaxDataOnThisProcess(paraviewFilter, cellDataArrayName)
globalMinMaxCells = PhactoriLocalToGlobalCellsWithMinMaxDataUsingMPI(thisPidMinMaxCells, 1)
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWtihMinMaxDataUsingMPI returning\n")
return globalMinMaxCells
def PhactoriLocalToGlobalCellsWithMinMaxDataUsingMPI(localPidMinMaxCellPair, tupleSize):
if PhactoriDbg(100):
myDebugPrint3("PhactoriLocalToGlobalCellsWithMinMaxDataUsingMPI entered\n")
#find overall min/max
minVal = localPidMinMaxCellPair[0].dataTuple[0]
maxVal = localPidMinMaxCellPair[1].dataTuple[0]
if PhactoriDbg(100):
myDebugPrint3("local min/max, tupleSize: " + str([minVal, maxVal, tupleSize]) + "\n")
localMinMax = [-minVal, maxVal]
globalMinMax = UseReduceOnFloatList(localMinMax, 0)
globalMinVal = -globalMinMax[0]
globalMaxVal = globalMinMax[1]
if PhactoriDbg(100):
myDebugPrint3("global min/max, tupleSize: " + str([globalMinVal, globalMaxVal, tupleSize]) + "\n")
localPidMinMax = [-1,-1]
myPid = SmartGetLocalProcessId()
if globalMinVal == minVal:
localPidMinMax[0] = myPid
if globalMaxVal == maxVal:
localPidMinMax[1] = myPid
if PhactoriDbg(100):
myDebugPrint3("localPidMinMax: " + str(localPidMinMax) + "\n")
globalPidMinMax = UseReduceOnIntegerList(localPidMinMax, 0)
if PhactoriDbg(100):
myDebugPrint3("globalPidMinMax: " + str(globalPidMinMax) + "\n")
localSerializedFloatArray = []
localSerializedIntArray = []
if globalPidMinMax[0] == myPid:
localPidMinMaxCellPair[0].SerializeAppendToFloatAndIntArray(localSerializedFloatArray, localSerializedIntArray, tupleSize)
else:
localPidMinMaxCellPair[0].SerializeAppendToFloatAndIntArrayZeroVersion(localSerializedFloatArray, localSerializedIntArray, tupleSize)
if globalPidMinMax[1] == myPid:
localPidMinMaxCellPair[1].SerializeAppendToFloatAndIntArray(localSerializedFloatArray, localSerializedIntArray, tupleSize)
else:
localPidMinMaxCellPair[1].SerializeAppendToFloatAndIntArrayZeroVersion(localSerializedFloatArray, localSerializedIntArray, tupleSize)
globalSerializedFloatArray = UseReduceOnFloatList(localSerializedFloatArray, 2)
globalSerializedIntArray = UseReduceOnIntegerList(localSerializedIntArray, 2)
globalMinCell = PhactoriSampledCellInfo()
globalMaxCell = PhactoriSampledCellInfo()
globalMinCell.SerializeSetFromFloatAndIntArray(globalSerializedFloatArray, globalSerializedIntArray, 0, tupleSize)
globalMaxCell.SerializeSetFromFloatAndIntArray(globalSerializedFloatArray, globalSerializedIntArray, 1, tupleSize)
if PhactoriDbg(100):
myDebugPrint3("PhactoriLocalToGlobalCellsWithMinMaxDataUsingMPI returning\n")
return [globalMinCell, globalMaxCell]
#phactori_combine_to_single_python_file_subpiece_end_1
|
# Copyright(C) 1999-2020 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of NTESS nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from phactori import *
from .PhactoriVectorLibrary import *
from paraview.simple import *
from .PhactoriSampledCellInfo import *
import vtk
import json
from .PhactoriParaviewMultiBlockRecursion import *
#phactori_combine_to_single_python_file_subpiece_begin_1
def GetCellTestPoint(theCell):
theCellBounds = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
theCell.GetBounds(theCellBounds)
retXyz = [0.5 * (theCellBounds[0] + theCellBounds[1]),
0.5 * (theCellBounds[2] + theCellBounds[3]),
0.5 * (theCellBounds[4] + theCellBounds[5])]
return retXyz
def localGetCellijk(ii, jj, kk, inInputCsData, myExtent):
#returnCell = inInputCsData.GetCell(ii, jj, kk)
computedIndex = vtk.vtkStructuredData.ComputeCellIdForExtent(myExtent, [ii,jj,kk])
returnCell = inInputCsData.GetCell(computedIndex)
return returnCell
class PhactoriSampledCellInfo:
def __init__(self):
self.cellTestPoint = [0.0,0.0,0.0]
self.pid = -1
self.leafVisitCount = -1
self.ijk = [-1,-1,-1]
self.dataTuple = []
self.index = -1
self.segmentIndex = -1
self.collectionAxis = 2
def ToStr(self):
outStr = "PhactoriSampledCellInfo:\n" +\
"cellTestPoint: " + str(self.cellTestPoint) + "\n" +\
"pid: " + str(self.pid) + "\n" +\
"leafVisitCount: " + str(self.leafVisitCount) + "\n" +\
"ijk: " + str(self.ijk) + "\n" +\
"index: " + str(self.index) + "\n" +\
"segmentIndex: " + str(self.segmentIndex) + "\n" +\
"collectionAxis: " + str(self.collectionAxis) + "\n" +\
"dataTuple: " + str(self.dataTuple) + "\n"
return outStr
def SetFromList(self, newValues):
tripletVal = newValues[0]
self.cellTestPoint[0] = tripletVal[0]
self.cellTestPoint[1] = tripletVal[1]
self.cellTestPoint[2] = tripletVal[2]
tripletVal = newValues[1]
self.ijk[0] = tripletVal[0]
self.ijk[1] = tripletVal[1]
self.ijk[2] = tripletVal[2]
if newValues[2] == None:
self.dataTuple = None
else:
self.dataTuple = list(newValues[2])
self.pid = newValues[3]
self.leafVisitCount = newValues[4]
self.index = newValues[5]
self.segmentIndex = newValues[6]
self.collectionAxis = newValues[7]
def GetAsList(self):
return [self.cellTestPoint, self.ijk, self.dataTuple, self.pid, self.leafVisitCount, self.index, self.segmentIndex, self.collectionAxis]
def ToStrTerseOneLineList(self):
return str(self.GetAsList())
#outStr = "[" + \
# str(self.cellTestPoint) + "," + \
# str(self.ijk) + "," + \
# str(self.dataTuple) + "," + \
# str(self.pid) + "," + \
# str(self.index) + "," + \
# str(self.segmentIndex) + "," + \
# str(self.collectionAxis) + \
# "]"
#return outStr
@staticmethod
def TerseOneLineJsonFormatComment():
outStr = '{"PhactoriSampledCellInfo output format 1 info":[\n' + \
'" [cellTestPoint, ijk, dataTuple, pid, index, segmentIndex, collectionAxis]",\n' + \
'" cellTestPoint is [X, Y, Z], ijk is [i, j, k], dataTuple is [c1, c2, ... cN]"]}'
return outStr
def SetIndex(self, newIndex):
self.index = newIndex
def SetCollectionAxis(self, newCollectionAxis):
self.collectionAxis = newCollectionAxis
def AxisCrossesExtent(self, myExtent, whichAxis):
# i -> 0
# j -> 1
# k -> 2
# ij -> 3
# ik -> 4
# jk -> 5
if whichAxis < 3:
if whichAxis != 0:
if self.ijk[0] < myExtent[0]:
return False
if self.ijk[0] >= myExtent[1]:
return False
if whichAxis != 1:
if self.ijk[1] < myExtent[2]:
return False
if self.ijk[1] >= myExtent[3]:
return False
if whichAxis != 2:
if self.ijk[2] < myExtent[4]:
return False
if self.ijk[2] >= myExtent[5]:
return False
else:
if whichAxis == 3:
if self.ijk[2] < myExtent[4]:
return False
if self.ijk[2] >= myExtent[5]:
return False
if whichAxis == 4:
if self.ijk[1] < myExtent[2]:
return False
if self.ijk[1] >= myExtent[3]:
return False
if whichAxis == 5:
if self.ijk[0] < myExtent[0]:
return False
if self.ijk[0] >= myExtent[1]:
return False
return True
def GetIntersectingCollectionExtent(self, whichExtent, whichAxis):
# i -> 0
# j -> 1
# k -> 2
# ij -> 3
# ik -> 4
# jk -> 5
retext = list(whichExtent)
if self.AxisCrossesExtent(whichExtent, whichAxis) == False:
retExt[1] = retExt[0]
retExt[3] = retExt[2]
retExt[5] = retExt[4]
return retExt
if (whichAxis != 0) and (whichAxis != 3) and (whichAxis != 4):
retext[0] = self.ijk[0]
retext[1] = self.ijk[0] + 1
if (whichAxis != 1) and (whichAxis != 3) and (whichAxis != 5):
retext[2] = self.ijk[1]
retext[3] = self.ijk[1] + 1
if (whichAxis != 2) and (whichAxis != 4) and (whichAxis != 5):
retext[4] = self.ijk[2]
retext[5] = self.ijk[2] + 1
return retext
def SetCellTestPoint(self, inPt):
self.cellTestPoint[0] = inPt[0]
self.cellTestPoint[1] = inPt[1]
self.cellTestPoint[2] = inPt[2]
def SetIjk(self, ii, jj, kk):
self.ijk[0] = ii
self.ijk[1] = jj
self.ijk[2] = kk
def SetDataTuple(self, inDataTuple):
self.dataTuple = list(inDataTuple)
def SerializeAppendToFloatAndIntArray(self, outSerialFloatArray, outSerialIntArray, inTupleSize = 0):
outSerialFloatArray.append(self.cellTestPoint[0])
outSerialFloatArray.append(self.cellTestPoint[1])
outSerialFloatArray.append(self.cellTestPoint[2])
if self.dataTuple != None:
localTupleLen = len(self.dataTuple)
for ii in range(0,inTupleSize):
if ii >= localTupleLen:
outSerialFloatArray.append(0.0)
else:
outSerialFloatArray.append(self.dataTuple[ii])
else:
for ii in range(0,inTupleSize):
outSerialFloatArray.append(0.0)
outSerialIntArray.append(self.pid)
outSerialIntArray.append(self.leafVisitCount)
outSerialIntArray.append(self.ijk[0])
outSerialIntArray.append(self.ijk[1])
outSerialIntArray.append(self.ijk[2])
outSerialIntArray.append(self.segmentIndex)
def SerializeAppendToFloatAndIntArrayZeroVersion(self, outSerialFloatArray, outSerialIntArray, inTupleSize = -1):
outSerialFloatArray.append(0.0)
outSerialFloatArray.append(0.0)
outSerialFloatArray.append(0.0)
if inTupleSize < 0:
for ffval in self.dataTuple:
outSerialFloatArray.append(0.0)
else:
for ffval in range(0, inTupleSize):
outSerialFloatArray.append(0.0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
outSerialIntArray.append(0)
@staticmethod
def GetSerializeFloatAndIntSize(tupleSize = 0):
floatsize = 3 + tupleSize
intsize = 6
return floatsize, intsize
def SerializeSetFromFloatAndIntArray(self, inSerialFloatArray, inSerialIntArray, inIndex, inTupleSize = 0):
#floatsize, intsize = GetSerializeFloatAndIntSize()
floatsize = 3 + inTupleSize
intsize = 6
floatIndex = floatsize * inIndex
intIndex = intsize * inIndex
self.cellTestPoint[0] = inSerialFloatArray[floatIndex+0]
self.cellTestPoint[1] = inSerialFloatArray[floatIndex+1]
self.cellTestPoint[2] = inSerialFloatArray[floatIndex+2]
self.dataTuple = []
if inTupleSize > 0:
for ii in range(0, inTupleSize):
self.dataTuple.append(inSerialFloatArray[floatIndex+3+ii])
self.pid = inSerialIntArray[intIndex + 0]
self.leafVisitCount = inSerialIntArray[intIndex + 1]
self.ijk[0] = inSerialIntArray[intIndex + 2]
self.ijk[1] = inSerialIntArray[intIndex + 3]
self.ijk[2] = inSerialIntArray[intIndex + 4]
self.segmentIndex = inSerialIntArray[intIndex + 5]
def Populate1(self, ii, jj, kk, myPid, leafVisitCount, seedCellIndex,
inInputCsData, myExtent, numCells, outputCellArray, dataArrayNumCmpnts, defaultTuple):
#used during recursion to set up the cell based on the ii, jj, kk indices
self.SetIjk(ii,jj,kk)
self.pid = myPid
self.leafVisitCount = leafVisitCount
self.segmentIndex = seedCellIndex
#dataCell = inInputCsData.GetCell(ii, jj, kk)
dataCell = localGetCellijk(ii, jj, kk, inInputCsData, myExtent)
cellTestPoint = GetCellTestPoint(dataCell)
self.SetCellTestPoint(cellTestPoint)
cellId = vtk.vtkStructuredData.ComputeCellIdForExtent(myExtent, [ii,jj,kk])
self.SetIndex(cellId)
if dataArrayNumCmpnts > 0:
if (cellId < numCells) and (cellId >= 0):
dataTuple = outputCellArray.GetTuple(cellId)
self.SetDataTuple(dataTuple)
else:
self.SetDataTuple(defaultTuple)
class PhactoriFindCellWithMinMaxDataOnThisProcessRecursionParams:
def __init__(self):
self.leafVisitCount = 0
self.dataArrayName = "noname"
self.minCell = sys.float_info.max
self.maxCell = None
self.dataTotal = 0.0
self.dataCount = 0
self.currentMinVal = sys.float_info.max
self.currentMaxVal = -sys.float_info.max
def SetUpForRecursion(self, cellDataArrayName):
self.dataTotal = 0.0
self.dataCount = 0
self.currentMinVal = sys.float_info.max
self.currentMaxVal = -sys.float_info.max
self.dataArrayName = cellDataArrayName
self.minCell = PhactoriSampledCellInfo()
self.minCell.SetFromList([[0.0,0.0,0.0], [-1,-1,-1], [self.currentMinVal], -1, -1, -1, -1, -1])
self.maxCell = PhactoriSampledCellInfo()
self.maxCell.SetFromList([[0.0,0.0,0.0], [-1,-1,-1], [self.currentMaxVal], -1, -1, -1, -1, -1])
def PhactoriFindCellWithMinMaxDataOnThisProcessInBlock(recursionObject, inInputCsData, inParameters):
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock entered\n")
inParameters.leafVisitCount += 1
numCells = inInputCsData.GetNumberOfCells()
numPoints = inInputCsData.GetNumberOfPoints()
if (numCells == 0) or (numPoints == 0):
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock returning 1\n")
#no cells here
return
cellData = inInputCsData.GetCellData()
if cellData == None:
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock returning 2\n")
outputCellArray = None
outputCellArray = cellData.GetArray(inParameters.dataArrayName)
if outputCellArray == None:
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock returning 3\n")
myPid = SmartGetLocalProcessId()
for cellIndex in range(0,numCells):
thisCellDataTuple = outputCellArray.GetTuple(cellIndex)
cellDataVal = thisCellDataTuple[0]
inParameters.dataTotal += cellDataVal
inParameters.dataCount += 1
if cellDataVal < inParameters.currentMinVal:
inParameters.currentMinVal = cellDataVal
oneCell = inInputCsData.GetCell(cellIndex)
cellTestPoint = GetCellTestPoint(oneCell)
inParameters.minCell.index = cellIndex
inParameters.minCell.dataTuple[0] = cellDataVal
inParameters.minCell.cellTestPoint[0] = cellTestPoint[0]
inParameters.minCell.cellTestPoint[1] = cellTestPoint[1]
inParameters.minCell.cellTestPoint[2] = cellTestPoint[2]
inParameters.minCell.pid = myPid
inParameters.leafVisitCount = inParameters.leafVisitCount
if cellDataVal > inParameters.currentMaxVal:
inParameters.currentMaxVal = cellDataVal
oneCell = inInputCsData.GetCell(cellIndex)
cellTestPoint = GetCellTestPoint(oneCell)
inParameters.currentMinVal = cellDataVal
cellTestPoint = GetCellTestPoint(oneCell)
inParameters.maxCell.index = cellIndex
inParameters.maxCell.dataTuple[0] = cellDataVal
inParameters.maxCell.cellTestPoint[0] = cellTestPoint[0]
inParameters.maxCell.cellTestPoint[1] = cellTestPoint[1]
inParameters.maxCell.cellTestPoint[2] = cellTestPoint[2]
inParameters.maxCell.pid = myPid
inParameters.leafVisitCount = inParameters.leafVisitCount
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcessInBlock returning\n")
def PhactoriFindCellWithMinMaxDataOnThisProcess(paraviewFilter, cellDataArrayName):
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcess entered\n" + \
"paraviewFilter: " + str(paraviewFilter) + "\n")
recursionParams = PhactoriFindCellWithMinMaxDataOnThisProcessRecursionParams()
recursionParams.SetUpForRecursion(cellDataArrayName)
recursionObj = PhactoriParaviewMultiBlockRecursionControl()
recursionObj.mParameters = recursionParams
recursionObj.mOperationToDoPerBlock = PhactoriFindCellWithMinMaxDataOnThisProcessInBlock
PhactoriRecusivelyDoMethodPerBlockFromParaViewFilter(recursionObj, paraviewFilter)
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWithMinMaxDataOnThisProcess returning\n")
return [recursionParams.minCell, recursionParams.maxCell]
def PhactoriFindCellWtihMinMaxDataUsingMPI(paraviewFilter, cellDataArrayName):
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWtihMinMaxDataUsingMPI entered\n" + \
"paraviewFilter: " + str(paraviewFilter) + "\n")
thisPidMinMaxCells = PhactoriFindCellWithMinMaxDataOnThisProcess(paraviewFilter, cellDataArrayName)
globalMinMaxCells = PhactoriLocalToGlobalCellsWithMinMaxDataUsingMPI(thisPidMinMaxCells, 1)
if PhactoriDbg(100):
myDebugPrint3("PhactoriFindCellWtihMinMaxDataUsingMPI returning\n")
return globalMinMaxCells
def PhactoriLocalToGlobalCellsWithMinMaxDataUsingMPI(localPidMinMaxCellPair, tupleSize):
if PhactoriDbg(100):
myDebugPrint3("PhactoriLocalToGlobalCellsWithMinMaxDataUsingMPI entered\n")
#find overall min/max
minVal = localPidMinMaxCellPair[0].dataTuple[0]
maxVal = localPidMinMaxCellPair[1].dataTuple[0]
if PhactoriDbg(100):
myDebugPrint3("local min/max, tupleSize: " + str([minVal, maxVal, tupleSize]) + "\n")
localMinMax = [-minVal, maxVal]
globalMinMax = UseReduceOnFloatList(localMinMax, 0)
globalMinVal = -globalMinMax[0]
globalMaxVal = globalMinMax[1]
if PhactoriDbg(100):
myDebugPrint3("global min/max, tupleSize: " + str([globalMinVal, globalMaxVal, tupleSize]) + "\n")
localPidMinMax = [-1,-1]
myPid = SmartGetLocalProcessId()
if globalMinVal == minVal:
localPidMinMax[0] = myPid
if globalMaxVal == maxVal:
localPidMinMax[1] = myPid
if PhactoriDbg(100):
myDebugPrint3("localPidMinMax: " + str(localPidMinMax) + "\n")
globalPidMinMax = UseReduceOnIntegerList(localPidMinMax, 0)
if PhactoriDbg(100):
myDebugPrint3("globalPidMinMax: " + str(globalPidMinMax) + "\n")
localSerializedFloatArray = []
localSerializedIntArray = []
if globalPidMinMax[0] == myPid:
localPidMinMaxCellPair[0].SerializeAppendToFloatAndIntArray(localSerializedFloatArray, localSerializedIntArray, tupleSize)
else:
localPidMinMaxCellPair[0].SerializeAppendToFloatAndIntArrayZeroVersion(localSerializedFloatArray, localSerializedIntArray, tupleSize)
if globalPidMinMax[1] == myPid:
localPidMinMaxCellPair[1].SerializeAppendToFloatAndIntArray(localSerializedFloatArray, localSerializedIntArray, tupleSize)
else:
localPidMinMaxCellPair[1].SerializeAppendToFloatAndIntArrayZeroVersion(localSerializedFloatArray, localSerializedIntArray, tupleSize)
globalSerializedFloatArray = UseReduceOnFloatList(localSerializedFloatArray, 2)
globalSerializedIntArray = UseReduceOnIntegerList(localSerializedIntArray, 2)
globalMinCell = PhactoriSampledCellInfo()
globalMaxCell = PhactoriSampledCellInfo()
globalMinCell.SerializeSetFromFloatAndIntArray(globalSerializedFloatArray, globalSerializedIntArray, 0, tupleSize)
globalMaxCell.SerializeSetFromFloatAndIntArray(globalSerializedFloatArray, globalSerializedIntArray, 1, tupleSize)
if PhactoriDbg(100):
myDebugPrint3("PhactoriLocalToGlobalCellsWithMinMaxDataUsingMPI returning\n")
return [globalMinCell, globalMaxCell]
#phactori_combine_to_single_python_file_subpiece_end_1
|
en
| 0.669321
|
# Copyright(C) 1999-2020 National Technology & Engineering Solutions # of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with # NTESS, the U.S. Government retains certain rights in this software. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of NTESS nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #phactori_combine_to_single_python_file_subpiece_begin_1 #returnCell = inInputCsData.GetCell(ii, jj, kk) #outStr = "[" + \ # str(self.cellTestPoint) + "," + \ # str(self.ijk) + "," + \ # str(self.dataTuple) + "," + \ # str(self.pid) + "," + \ # str(self.index) + "," + \ # str(self.segmentIndex) + "," + \ # str(self.collectionAxis) + \ # "]" #return outStr # i -> 0 # j -> 1 # k -> 2 # ij -> 3 # ik -> 4 # jk -> 5 # i -> 0 # j -> 1 # k -> 2 # ij -> 3 # ik -> 4 # jk -> 5 #floatsize, intsize = GetSerializeFloatAndIntSize() #used during recursion to set up the cell based on the ii, jj, kk indices #dataCell = inInputCsData.GetCell(ii, jj, kk) #no cells here #find overall min/max #phactori_combine_to_single_python_file_subpiece_end_1
| 0.822171
| 1
|
daal4py/sklearn/model_selection/_split.py
|
yumorozov/scikit-learn-intelex
| 1
|
6626193
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from sklearn.model_selection._split import _validate_shuffle_split
import daal4py as d4p
import numpy as np
from daal4py.sklearn._utils import PatchingConditionsChain
import platform
from .._device_offload import support_usm_ndarray
try:
from sklearn.utils import _safe_indexing as safe_indexing
except ImportError:
from sklearn.utils import safe_indexing
try:
import mkl_random
mkl_random_is_imported = True
except (ImportError, ModuleNotFoundError):
mkl_random_is_imported = False
try:
import pandas as pd
pandas_is_imported = True
except ImportError:
pandas_is_imported = False
def get_dtypes(data):
if hasattr(data, 'dtype'):
return [data.dtype]
if hasattr(data, 'dtypes'):
return list(data.dtypes)
if hasattr(data, 'values'):
return [data.values.dtype]
return None
@support_usm_ndarray(freefunc=True)
def _daal_train_test_split(*arrays, **options):
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
shuffle = options.pop('shuffle', True)
rng = options.pop('rng', 'OPTIMIZED_MT19937')
available_rngs = ['default', 'MT19937', 'SFMT19937', 'MT2203', 'R250',
'WH', 'MCG31', 'MCG59', 'MRG32K3A', 'PHILOX4X32X10',
'NONDETERM', 'OPTIMIZED_MT19937']
if rng not in available_rngs:
raise ValueError(
"Wrong random numbers generator is chosen. "
"Available generators: %s" % str(available_rngs)[1:-1])
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(
n_samples, test_size, train_size, default_test_size=0.25
)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for shuffle=False")
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
cv = StratifiedShuffleSplit(
test_size=n_test,
train_size=n_train,
random_state=random_state
)
train, test = next(cv.split(X=arrays[0], y=stratify))
else:
if mkl_random_is_imported and \
rng not in ['default', 'OPTIMIZED_MT19937'] and \
(isinstance(random_state, int) or random_state is None):
random_state = mkl_random.RandomState(random_state, rng)
indexes = random_state.permutation(n_samples)
test, train = indexes[:n_test], indexes[n_test:(
n_test + n_train)]
elif rng == 'OPTIMIZED_MT19937' and \
(isinstance(random_state, int) or random_state is None) and \
platform.system() != 'Windows':
indexes = np.empty(
shape=(n_samples,),
dtype=np.int64 if n_train + n_test > 2 ** 31 - 1 else np.int32
)
random_state = np.random.RandomState(random_state)
random_state = random_state.get_state()[1]
d4p.daal_generate_shuffled_indices([indexes], [random_state])
test, train = indexes[:n_test], indexes[n_test:(
n_test + n_train)]
else:
cv = ShuffleSplit(
test_size=n_test,
train_size=n_train,
random_state=random_state
)
train, test = next(cv.split(X=arrays[0], y=stratify))
res = []
for arr in arrays:
_patching_status = PatchingConditionsChain(
"sklearn.model_selection.train_test_split")
# input format check
_patching_status.and_conditions([
(isinstance(arr, np.ndarray), "The input is not a np.ndarray object.")])
if pandas_is_imported:
_patching_status.or_conditions([
(isinstance(arr, pd.core.frame.DataFrame),
"The input is not a pd.DataFrame object."),
(isinstance(arr, pd.core.series.Series),
"The input is not a pd.Series object.")
], conditions_merging=any)
# dimensions check
_dal_ready = _patching_status.and_conditions([
(hasattr(arr, 'ndim'), "The input does not have 'ndim' attribute.")])
if hasattr(arr, 'ndim'):
_patching_status.and_conditions([
(arr.ndim <= 2, "The input has more than 2 dimensions.")])
# data types check
dtypes = get_dtypes(arr)
_dal_ready = _patching_status.and_conditions([
(dtypes is not None, "Unable to parse input data types.")])
if dtypes is not None:
incorrect_dtype = None
for i, dtype in enumerate(dtypes):
if 'float' not in str(dtype) and 'int' not in str(dtype):
incorrect_dtype = str(dtype)
break
_dal_ready = _patching_status.and_conditions([
(incorrect_dtype is None,
f"Input has incorrect data type '{incorrect_dtype}'. "
"Only integer and floating point types are supported.")])
_patching_status.write_log()
if not _dal_ready:
res.append(safe_indexing(arr, train))
res.append(safe_indexing(arr, test))
else:
if len(arr.shape) == 2:
n_cols = arr.shape[1]
reshape_later = False
else:
n_cols = 1
reshape_later = True
arr_copy = d4p.get_data(arr)
if not isinstance(arr_copy, list):
arr_copy = arr_copy.reshape(
(arr_copy.shape[0], n_cols),
order='A',
)
if isinstance(arr_copy, np.ndarray):
order = 'C' if arr_copy.flags['C_CONTIGUOUS'] else 'F'
train_arr = np.empty(
shape=(n_train, n_cols),
dtype=arr_copy.dtype,
order=order,
)
test_arr = np.empty(
shape=(n_test, n_cols),
dtype=arr_copy.dtype,
order=order,
)
d4p.daal_train_test_split(
arr_copy, train_arr, test_arr, [train], [test]
)
if reshape_later:
train_arr, test_arr = train_arr.reshape(
(n_train,)), test_arr.reshape((n_test,))
elif isinstance(arr_copy, list):
train_arr = [
np.empty(
shape=(n_train,),
dtype=el.dtype,
order='C' if el.flags['C_CONTIGUOUS'] else 'F',
) for el in arr_copy
]
test_arr = [
np.empty(
shape=(n_test,),
dtype=el.dtype,
order='C' if el.flags['C_CONTIGUOUS'] else 'F'
) for el in arr_copy
]
d4p.daal_train_test_split(
arr_copy, train_arr, test_arr, [train], [test])
train_arr = {col: train_arr[i]
for i, col in enumerate(arr.columns)}
test_arr = {col: test_arr[i]
for i, col in enumerate(arr.columns)}
else:
raise ValueError('Array can\'t be converted to needed format')
if pandas_is_imported:
if isinstance(arr, pd.core.frame.DataFrame):
train_arr, test_arr = pd.DataFrame(train_arr, columns=arr.columns), \
pd.DataFrame(test_arr, columns=arr.columns)
if isinstance(arr, pd.core.series.Series):
train_arr, test_arr = \
train_arr.reshape(n_train), test_arr.reshape(n_test)
train_arr, test_arr = pd.Series(train_arr, name=arr.name), \
pd.Series(test_arr, name=arr.name)
if hasattr(arr, 'index'):
train_arr.index = train
test_arr.index = test
res.append(train_arr)
res.append(test_arr)
return res
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from sklearn.model_selection._split import _validate_shuffle_split
import daal4py as d4p
import numpy as np
from daal4py.sklearn._utils import PatchingConditionsChain
import platform
from .._device_offload import support_usm_ndarray
try:
from sklearn.utils import _safe_indexing as safe_indexing
except ImportError:
from sklearn.utils import safe_indexing
try:
import mkl_random
mkl_random_is_imported = True
except (ImportError, ModuleNotFoundError):
mkl_random_is_imported = False
try:
import pandas as pd
pandas_is_imported = True
except ImportError:
pandas_is_imported = False
def get_dtypes(data):
if hasattr(data, 'dtype'):
return [data.dtype]
if hasattr(data, 'dtypes'):
return list(data.dtypes)
if hasattr(data, 'values'):
return [data.values.dtype]
return None
@support_usm_ndarray(freefunc=True)
def _daal_train_test_split(*arrays, **options):
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
shuffle = options.pop('shuffle', True)
rng = options.pop('rng', 'OPTIMIZED_MT19937')
available_rngs = ['default', 'MT19937', 'SFMT19937', 'MT2203', 'R250',
'WH', 'MCG31', 'MCG59', 'MRG32K3A', 'PHILOX4X32X10',
'NONDETERM', 'OPTIMIZED_MT19937']
if rng not in available_rngs:
raise ValueError(
"Wrong random numbers generator is chosen. "
"Available generators: %s" % str(available_rngs)[1:-1])
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(
n_samples, test_size, train_size, default_test_size=0.25
)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for shuffle=False")
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
cv = StratifiedShuffleSplit(
test_size=n_test,
train_size=n_train,
random_state=random_state
)
train, test = next(cv.split(X=arrays[0], y=stratify))
else:
if mkl_random_is_imported and \
rng not in ['default', 'OPTIMIZED_MT19937'] and \
(isinstance(random_state, int) or random_state is None):
random_state = mkl_random.RandomState(random_state, rng)
indexes = random_state.permutation(n_samples)
test, train = indexes[:n_test], indexes[n_test:(
n_test + n_train)]
elif rng == 'OPTIMIZED_MT19937' and \
(isinstance(random_state, int) or random_state is None) and \
platform.system() != 'Windows':
indexes = np.empty(
shape=(n_samples,),
dtype=np.int64 if n_train + n_test > 2 ** 31 - 1 else np.int32
)
random_state = np.random.RandomState(random_state)
random_state = random_state.get_state()[1]
d4p.daal_generate_shuffled_indices([indexes], [random_state])
test, train = indexes[:n_test], indexes[n_test:(
n_test + n_train)]
else:
cv = ShuffleSplit(
test_size=n_test,
train_size=n_train,
random_state=random_state
)
train, test = next(cv.split(X=arrays[0], y=stratify))
res = []
for arr in arrays:
_patching_status = PatchingConditionsChain(
"sklearn.model_selection.train_test_split")
# input format check
_patching_status.and_conditions([
(isinstance(arr, np.ndarray), "The input is not a np.ndarray object.")])
if pandas_is_imported:
_patching_status.or_conditions([
(isinstance(arr, pd.core.frame.DataFrame),
"The input is not a pd.DataFrame object."),
(isinstance(arr, pd.core.series.Series),
"The input is not a pd.Series object.")
], conditions_merging=any)
# dimensions check
_dal_ready = _patching_status.and_conditions([
(hasattr(arr, 'ndim'), "The input does not have 'ndim' attribute.")])
if hasattr(arr, 'ndim'):
_patching_status.and_conditions([
(arr.ndim <= 2, "The input has more than 2 dimensions.")])
# data types check
dtypes = get_dtypes(arr)
_dal_ready = _patching_status.and_conditions([
(dtypes is not None, "Unable to parse input data types.")])
if dtypes is not None:
incorrect_dtype = None
for i, dtype in enumerate(dtypes):
if 'float' not in str(dtype) and 'int' not in str(dtype):
incorrect_dtype = str(dtype)
break
_dal_ready = _patching_status.and_conditions([
(incorrect_dtype is None,
f"Input has incorrect data type '{incorrect_dtype}'. "
"Only integer and floating point types are supported.")])
_patching_status.write_log()
if not _dal_ready:
res.append(safe_indexing(arr, train))
res.append(safe_indexing(arr, test))
else:
if len(arr.shape) == 2:
n_cols = arr.shape[1]
reshape_later = False
else:
n_cols = 1
reshape_later = True
arr_copy = d4p.get_data(arr)
if not isinstance(arr_copy, list):
arr_copy = arr_copy.reshape(
(arr_copy.shape[0], n_cols),
order='A',
)
if isinstance(arr_copy, np.ndarray):
order = 'C' if arr_copy.flags['C_CONTIGUOUS'] else 'F'
train_arr = np.empty(
shape=(n_train, n_cols),
dtype=arr_copy.dtype,
order=order,
)
test_arr = np.empty(
shape=(n_test, n_cols),
dtype=arr_copy.dtype,
order=order,
)
d4p.daal_train_test_split(
arr_copy, train_arr, test_arr, [train], [test]
)
if reshape_later:
train_arr, test_arr = train_arr.reshape(
(n_train,)), test_arr.reshape((n_test,))
elif isinstance(arr_copy, list):
train_arr = [
np.empty(
shape=(n_train,),
dtype=el.dtype,
order='C' if el.flags['C_CONTIGUOUS'] else 'F',
) for el in arr_copy
]
test_arr = [
np.empty(
shape=(n_test,),
dtype=el.dtype,
order='C' if el.flags['C_CONTIGUOUS'] else 'F'
) for el in arr_copy
]
d4p.daal_train_test_split(
arr_copy, train_arr, test_arr, [train], [test])
train_arr = {col: train_arr[i]
for i, col in enumerate(arr.columns)}
test_arr = {col: test_arr[i]
for i, col in enumerate(arr.columns)}
else:
raise ValueError('Array can\'t be converted to needed format')
if pandas_is_imported:
if isinstance(arr, pd.core.frame.DataFrame):
train_arr, test_arr = pd.DataFrame(train_arr, columns=arr.columns), \
pd.DataFrame(test_arr, columns=arr.columns)
if isinstance(arr, pd.core.series.Series):
train_arr, test_arr = \
train_arr.reshape(n_train), test_arr.reshape(n_test)
train_arr, test_arr = pd.Series(train_arr, name=arr.name), \
pd.Series(test_arr, name=arr.name)
if hasattr(arr, 'index'):
train_arr.index = train
test_arr.index = test
res.append(train_arr)
res.append(test_arr)
return res
|
en
| 0.706353
|
#!/usr/bin/env python #=============================================================================== # Copyright 2014 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #=============================================================================== # input format check # dimensions check # data types check
| 2.067617
| 2
|
progressivis/table/tablechanges_base.py
|
jdfekete/progressivis
| 51
|
6626194
|
"""
Base class for object keeping track of changes in a Table/Column
"""
from abc import ABCMeta, abstractmethod
class BaseChanges(metaclass=ABCMeta):
"Base class for object keeping track of changes in a Table"
def __str__(self):
return type(self)
@abstractmethod
def add_created(self, locs):
"Add ids of items created in the Table"
pass
@abstractmethod
def add_updated(self, locs):
"Add ids of items updated in the Table"
pass
@abstractmethod
def add_deleted(self, locs):
"Add ids of items deleted from the Table"
pass
@abstractmethod
def compute_updates(self, last, now, mid, cleanup=True):
"Compute and return the list of changes as an IndexUpdate or None"
return None
|
"""
Base class for object keeping track of changes in a Table/Column
"""
from abc import ABCMeta, abstractmethod
class BaseChanges(metaclass=ABCMeta):
"Base class for object keeping track of changes in a Table"
def __str__(self):
return type(self)
@abstractmethod
def add_created(self, locs):
"Add ids of items created in the Table"
pass
@abstractmethod
def add_updated(self, locs):
"Add ids of items updated in the Table"
pass
@abstractmethod
def add_deleted(self, locs):
"Add ids of items deleted from the Table"
pass
@abstractmethod
def compute_updates(self, last, now, mid, cleanup=True):
"Compute and return the list of changes as an IndexUpdate or None"
return None
|
en
| 0.872343
|
Base class for object keeping track of changes in a Table/Column
| 3.593965
| 4
|
dataloader.py
|
Rhcsky/cifar100-classification
| 0
|
6626195
|
import os
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
# import albumentations as A
# from albumentations.pytorch import ToTensorV2
def get_dataloader(args):
train_dir = './data/train_loader'
test_dir = './data/test_loader'
if os.path.exists(train_dir) and os.path.exists(test_dir):
print("Data already exist.")
train_loader = torch.load(train_dir)
test_loader = torch.load(test_dir)
return train_loader, test_loader
normalize = transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2761))
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# transform_albumentations = A.Compose([
# A.Resize(36, 36),
# A.RandomCrop(32, 32),
# A.OneOf([
# A.HorizontalFlip(p=1),
# A.RandomRotate90(p=1),
# A.VerticalFlip(p=1)
# ], p=1),
# A.OneOf([
# A.MotionBlur(p=1),
# A.OpticalDistortion(p=1),
# A.GaussNoise(p=1)
# ], p=1),
# normalize,
# ToTensorV2(),
# ])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
train_dataset = datasets.CIFAR100('./data', train=True, download=True, transform=transform_train)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, pin_memory=True)
test_loader = DataLoader(datasets.CIFAR100('./data', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
torch.save(train_loader, train_dir)
torch.save(test_loader, test_dir)
print("Save train loader and test loader in './data/'")
return train_loader, test_loader
class CustomDataset(Dataset):
def __init__(self, images, labels):
self.images = images
self.labels = labels
self.length = len(self.images)
def __len__(self):
return self.length
def __getitem__(self, index):
image = self.images[index]
label = self.labels[index]
return image, label
|
import os
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
# import albumentations as A
# from albumentations.pytorch import ToTensorV2
def get_dataloader(args):
train_dir = './data/train_loader'
test_dir = './data/test_loader'
if os.path.exists(train_dir) and os.path.exists(test_dir):
print("Data already exist.")
train_loader = torch.load(train_dir)
test_loader = torch.load(test_dir)
return train_loader, test_loader
normalize = transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2761))
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# transform_albumentations = A.Compose([
# A.Resize(36, 36),
# A.RandomCrop(32, 32),
# A.OneOf([
# A.HorizontalFlip(p=1),
# A.RandomRotate90(p=1),
# A.VerticalFlip(p=1)
# ], p=1),
# A.OneOf([
# A.MotionBlur(p=1),
# A.OpticalDistortion(p=1),
# A.GaussNoise(p=1)
# ], p=1),
# normalize,
# ToTensorV2(),
# ])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
train_dataset = datasets.CIFAR100('./data', train=True, download=True, transform=transform_train)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, pin_memory=True)
test_loader = DataLoader(datasets.CIFAR100('./data', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
torch.save(train_loader, train_dir)
torch.save(test_loader, test_dir)
print("Save train loader and test loader in './data/'")
return train_loader, test_loader
class CustomDataset(Dataset):
def __init__(self, images, labels):
self.images = images
self.labels = labels
self.length = len(self.images)
def __len__(self):
return self.length
def __getitem__(self, index):
image = self.images[index]
label = self.labels[index]
return image, label
|
en
| 0.337882
|
# import albumentations as A # from albumentations.pytorch import ToTensorV2 # transform_albumentations = A.Compose([ # A.Resize(36, 36), # A.RandomCrop(32, 32), # A.OneOf([ # A.HorizontalFlip(p=1), # A.RandomRotate90(p=1), # A.VerticalFlip(p=1) # ], p=1), # A.OneOf([ # A.MotionBlur(p=1), # A.OpticalDistortion(p=1), # A.GaussNoise(p=1) # ], p=1), # normalize, # ToTensorV2(), # ])
| 2.43962
| 2
|
tests/extractcode/test_archive.py
|
TechnicallyMay/scancode-toolkit
| 0
|
6626196
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import io
import ntpath
import os
import posixpath
from unittest.case import expectedFailure
from unittest.case import skipIf
import commoncode.date
from commoncode.testcase import FileBasedTesting
from commoncode import filetype
from commoncode import fileutils
from commoncode.system import on_linux
from commoncode.system import on_mac
from commoncode.system import on_windows
import typecode.contenttype
from extractcode_assert_utils import check_files
from extractcode_assert_utils import check_size
import extractcode
from extractcode import archive
from extractcode.archive import get_best_handler
from extractcode import ExtractErrorFailedToExtract
from extractcode import libarchive2
from extractcode import sevenzip
"""
For each archive type --when possible-- we are testing extraction of:
- basic, plain archive, no tricks
- with trailing data appended to archive
- broken, either truncated or with extra junk inserted
- with hardlinks and symlinks, either valid or broken when supported
- with hardlinks and symlinks loops (aka. tarbomb) when supported
- with FIFO, character, sparse and other special files when supported
- with relative paths pointing outside of the archive when supported
- with absolute paths when supported
- with invalid paths or mixed slash paths when supported
- with unicode or binary path names
- with duplicate names or paths when case is ignored
- password-protected when supported
"""
class TestSmokeTest(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def check_get_extractors(self, test_file, expected, kinds=()):
test_loc = self.get_test_loc(test_file)
if kinds:
extractors = archive.get_extractors(test_loc, kinds)
else:
extractors = archive.get_extractors(test_loc)
ft = typecode.contenttype.get_type(test_loc).filetype_file
mt = typecode.contenttype.get_type(test_loc).mimetype_file
fe = fileutils.file_extension(test_loc).lower()
em = ', '.join(e.__module__ + '.' + e.__name__ for e in extractors)
msg = ('%(expected)r == %(extractors)r for %(test_file)s\n'
'with ft:%(ft)r, mt:%(mt)r, fe:%(fe)r, em:%(em)s' % locals())
assert expected == extractors, msg
def test_get_extractors_1(self):
test_file = 'archive/zip/basic.zip'
expected = [archive.extract_zip]
self.check_get_extractors(test_file, expected)
def test_get_extractors_2(self):
test_file = 'archive/rar/basic.rar'
expected = [archive.extract_rar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_3(self):
test_file = 'archive/deb/adduser_3.112ubuntu1_all.deb'
expected = [archive.extract_ar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_4(self):
test_file = 'archive/cpio/elfinfo-1.0-1.fc9.src.cpio'
expected = [archive.extract_cpio]
self.check_get_extractors(test_file, expected)
def test_get_extractors_5(self):
test_file = 'archive/rpm/elfinfo-1.0-1.fc9.src.rpm'
expected = [archive.extract_rpm, archive.extract_cpio]
self.check_get_extractors(test_file, expected)
def test_get_extractors_6(self):
test_file = 'archive/gzip/file_4.26-1.diff.gz'
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected)
def test_get_extractors_7(self):
test_file = 'archive/ar/liby.a'
expected = [archive.extract_ar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_8(self):
test_file = 'archive/bz2/single_file_not_tarred.bz2'
expected = [archive.uncompress_bzip2]
self.check_get_extractors(test_file, expected)
def test_get_extractors_9(self):
test_file = 'archive/tar/tarred.tar'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_10(self):
test_file = 'archive/tbz/tarred_bzipped.bz'
expected = [archive.uncompress_bzip2]
self.check_get_extractors(test_file, expected)
def test_get_extractors_11(self):
test_file = 'archive/tbz/tarred_bzipped.tar.bz2'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_12(self):
test_file = 'archive/tbz/tarred_bzipped.tbz'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_13(self):
test_file = 'archive/tgz/tarred_gzipped.gz'
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected)
def test_get_extractors_14(self):
test_file = 'archive/tgz/tarred_gzipped.tar.gz'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_15(self):
test_file = 'archive/tgz/tarred_gzipped.tgz'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_16(self):
test_file = 'archive/7z/z.7z'
expected = [archive.extract_7z]
self.check_get_extractors(test_file, expected)
def test_get_extractors_17(self):
test_file = 'archive/Z/tr2tex.Z'
expected = [archive.extract_Z, ]
self.check_get_extractors(test_file, expected)
def test_get_extractors_18(self):
test_file = 'archive/Z/tkWWW-0.11.tar.Z'
expected = [archive.extract_Z, archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_19(self):
test_file = 'archive/xar/xar-1.4.xar'
expected = [archive.extract_xarpkg]
self.check_get_extractors(test_file, expected)
def test_get_extractor_with_kinds_deb(self):
test_file = 'archive/deb/adduser_3.112ubuntu1_all.deb'
expected = [archive.extract_deb]
self.check_get_extractors(test_file, expected, (archive.package,))
def test_get_extractor_with_kinds_rpm(self):
test_file = 'archive/rpm/elfinfo-1.0-1.fc9.src.rpm'
kinds = (archive.regular, archive.file_system, archive.docs)
expected = []
self.check_get_extractors(test_file, expected, kinds)
def test_get_extractor_with_kinds_rpm_2(self):
test_file = 'archive/rpm/elfinfo-1.0-1.fc9.src.rpm'
kinds = (archive.regular, archive.file_system, archive.docs, archive.package)
expected = [sevenzip.extract, libarchive2.extract]
self.check_get_extractors(test_file, expected, kinds)
def test_get_extractor_with_kinds_deb2(self):
test_file = 'archive/deb/adduser_3.112ubuntu1_all.deb'
expected = []
self.check_get_extractors(test_file, expected, (archive.regular,))
def test_get_extractor_with_kinds_ar(self):
test_file = 'archive/ar/liby.a'
kinds = (archive.regular, archive.file_system, archive.docs)
expected = []
self.check_get_extractors(test_file, expected, kinds)
def test_get_extractor_with_kinds_bzip(self):
test_file = 'archive/tbz/tarred_bzipped.tar.bz2'
expected = []
self.check_get_extractors(test_file, expected, (archive.package,))
def test_get_extractor_with_kinds_plain_tar(self):
test_file = 'archive/tar/tarred.tar'
expected = []
self.check_get_extractors(test_file, expected, (archive.package,))
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected, (archive.regular,))
def test_get_extractor_for_graffle_docs(self):
test_file = 'archive/graffle/example.graffle'
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected, (archive.docs,))
expected = []
self.check_get_extractors(test_file, expected, kinds=extractcode.default_kinds)
def test_get_extractor_for_compressed_svgz_docs(self):
test_file = 'archive/svgz/insert-emptyframe.svgz'
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected, (archive.docs,))
expected = []
self.check_get_extractors(test_file, expected, kinds=extractcode.default_kinds)
def test_get_extractor_for_dia(self):
test_file = self.get_test_loc('archive/dia/dia.dia')
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected, kinds=extractcode.all_kinds)
expected = []
self.check_get_extractors(test_file, expected, kinds=extractcode.default_kinds)
def test_get_handlers(self):
test_data = [
('archive/deb/adduser_3.112ubuntu1_all.deb', ['Debian package']),
('archive/rpm/elfinfo-1.0-1.fc9.src.rpm', ['RPM package']),
('archive/ar/liby.a', ['ar archive', 'Static Library']),
('archive/tar/tarred.tar', ['Tar', 'Ruby Gem package']),
('archive/tbz/tarred_bzipped.tar.bz2', ['bzip2', 'Tar bzip2']),
('archive/tbz/tarred_bzipped.bz', ['bzip2', 'Tar bzip2']),
('archive/tgz/tarred_gzipped.gz', ['Tar gzip', 'Gzip']),
('archive/gzip/mysql-arch.ARZ', ['Tar gzip', 'Gzip']),
]
for test_file, expected in test_data:
test_loc = self.get_test_loc(test_file)
handlers = archive.get_handlers(test_loc)
assert expected == [h[0].name for h in handlers]
def test_score_handlers(self):
test_data = [
('archive/deb/adduser_3.112ubuntu1_all.deb', [(31, 'Debian package')]),
('archive/rpm/elfinfo-1.0-1.fc9.src.rpm', [(32, 'RPM package')]),
('archive/ar/liby.a', [(31, 'Static Library'), (17, 'ar archive')]),
('archive/tar/tarred.tar', [(29, 'Tar'), (19, 'Ruby Gem package')]),
('archive/tbz/tarred_bzipped.tar.bz2', [(30, 'Tar bzip2'), (29, 'bzip2')]),
('archive/tbz/tarred_bzipped.bz', [(29, 'bzip2'), (18, 'Tar bzip2')]),
('archive/tgz/tarred_gzipped.gz', [(29, 'Gzip'), (18, 'Tar gzip')]),
('archive/gzip/mysql-arch.ARZ', [(29, 'Gzip'), (18, 'Tar gzip')]),
]
for test_file, expected in test_data:
test_loc = self.get_test_loc(test_file)
handlers = archive.get_handlers(test_loc)
scored = archive.score_handlers(handlers)
assert expected == sorted([(h[0], h[1].name) for h in scored], reverse=True)
def test_no_handler_is_selected_for_a_non_archive(self):
# failed because of libmagic bug: http://bugs.gw.com/view.php?id=467
# passing by introducing strict flag for handlers
test_loc = self.get_test_loc('archive/not_archive/hashfile')
assert [] == list(archive.get_handlers(test_loc))
assert None == archive.get_extractor(test_loc)
assert None == archive.get_extractor(test_loc, kinds=extractcode.all_kinds)
assert not archive.should_extract(test_loc, kinds=extractcode.default_kinds)
def test_no_handler_is_selected_for_a_non_archive2(self):
# FWIW there is a related libmagic bug: http://bugs.gw.com/view.php?id=473
test_loc = self.get_test_loc('archive/not_archive/wildtest.txt')
assert [] == list(archive.get_handlers(test_loc))
assert None == archive.get_extractor(test_loc)
assert None == archive.get_extractor(test_loc, kinds=extractcode.all_kinds)
assert not archive.should_extract(test_loc, kinds=extractcode.default_kinds)
def test_no_handler_is_selected_for_a_non_archive3(self):
test_loc = self.get_test_loc('archive/not_archive/savetransfer.c')
assert [] == list(archive.get_handlers(test_loc))
assert None == archive.get_extractor(test_loc)
assert None == archive.get_extractor(test_loc, kinds=extractcode.all_kinds)
assert not archive.should_extract(test_loc, kinds=extractcode.default_kinds)
def test_7zip_extract_can_extract_to_relative_paths(self):
# The setup is a tad complex because we want to have a relative dir
# to the base dir where we run tests from, ie the scancode-toolkit/ dir
# To use relative paths, we use our tmp dir at the root of the code tree
from os.path import dirname, join, abspath
import tempfile
import shutil
from extractcode.sevenzip import extract
test_file = self.get_test_loc('archive/relative_path/basic.zip')
scancode_root = dirname(dirname(dirname(__file__)))
scancode_tmp = join(scancode_root, 'tmp')
fileutils.create_dir(scancode_tmp)
scancode_root_abs = abspath(scancode_root)
test_src_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
test_tgt_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
shutil.copy(test_file, test_src_dir)
test_src_file = join(test_src_dir, 'basic.zip')
result = list(extract(test_src_file, test_tgt_dir))
assert [] == result
expected = ['c/a/a.txt', 'c/b/a.txt', 'c/c/a.txt']
check_files(test_tgt_dir, expected)
def test_libarchive_extract_can_extract_to_relative_paths(self):
# The setup is a tad complex because we want to have a relative dir
# to the base dir where we run tests from, ie the scancode-toolkit/ dir
# To use relative paths, we use our tmp dir at the root of the code tree
from os.path import dirname, join, abspath
import tempfile
import shutil
from extractcode.libarchive2 import extract
test_file = self.get_test_loc('archive/relative_path/basic.zip')
scancode_root = dirname(dirname(dirname(__file__)))
scancode_tmp = join(scancode_root, 'tmp')
fileutils.create_dir(scancode_tmp)
scancode_root_abs = abspath(scancode_root)
test_src_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
test_tgt_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
shutil.copy(test_file, test_src_dir)
test_src_file = join(test_src_dir, 'basic.zip')
result = list(extract(test_src_file, test_tgt_dir))
assert [] == result
expected = ['c/a/a.txt', 'c/b/a.txt', 'c/c/a.txt']
check_files(test_tgt_dir, expected)
def test_windows_media_player_skins_are_zip(self):
test_file = self.get_test_loc('archive/wmz/Go.wmz')
extractors = archive.get_extractors(test_file)
assert [archive.extract_zip] == extractors
def test_windows_ntfs_wmz_are_sometimes_gzip(self):
test_file = self.get_test_loc('archive/wmz/image003.wmz')
extractors = archive.get_extractors(test_file)
assert [archive.uncompress_gzip] == extractors
class BaseArchiveTestCase(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def assertRaisesInstance(self, excInstance, callableObj,
*args, **kwargs):
"""
This assertion accepts an instance instead of a class for refined
exception testing.
"""
excClass = excInstance.__class__
try:
callableObj(*args, **kwargs)
except excClass as e:
self.assertEqual(str(excInstance), str(e))
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException('%s not raised' % excName)
def check_extract(self, test_function, test_file, expected, expected_warnings=None, check_all=False):
"""
Run the extraction `test_function` on `test_file` checking that a map of
expected paths --> size exist in the extracted target directory.
Does not test the presence of all files unless `check_all` is True.
"""
test_file = self.get_test_loc(test_file)
test_dir = self.get_temp_dir()
warnings = test_function(test_file, test_dir)
if expected_warnings is not None:
assert expected_warnings == warnings
if check_all:
len_test_dir = len(test_dir)
extracted = {path[len_test_dir:]: filetype.get_size(path) for path in fileutils.resource_iter(test_dir, with_dirs=False)}
expected = {os.path.join(test_dir, exp_path): exp_size for exp_path, exp_size in expected.items()}
assert sorted(expected.items()) == sorted(extracted.items())
else:
for exp_path, exp_size in expected.items():
exp_loc = os.path.join(test_dir, exp_path)
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to find expected path: %(exp_loc)s'''
assert os.path.exists(exp_loc), msg % locals()
if exp_size is not None:
res_size = os.stat(exp_loc).st_size
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to assert the correct size %(exp_size)d
Got instead: %(res_size)d
for expected path: %(exp_loc)s'''
assert exp_size == res_size, msg % locals()
def collect_extracted_path(self, test_dir):
result = []
td = fileutils.as_posixpath(test_dir)
for t, dirs, files in os.walk(test_dir):
t = fileutils.as_posixpath(t)
for d in dirs:
nd = posixpath.join(t, d).replace(td, '') + '/'
result.append(nd)
for f in files:
nf = posixpath.join(t, f).replace(td, '')
result.append(nf)
result = sorted(result)
return result
def assertExceptionContains(self, text, callableObj, *args, **kwargs):
try:
callableObj(*args, **kwargs)
except Exception as e:
if text not in str(e):
raise self.failureException(
'Exception %(e)r raised, '
'it should contain the text %(text)r '
'and does not' % locals())
else:
raise self.failureException(
'Exception containing %(text)r not raised' % locals())
class TestTarGzip(BaseArchiveTestCase):
def test_extract_targz_basic(self):
test_file = self.get_test_loc('archive/tgz/tarred_gzipped.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_targz_with_trailing_data(self):
test_file = self.get_test_loc('archive/tgz/trailing.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'a.txt')
assert os.path.exists(result)
def test_extract_targz_broken(self):
test_file = self.get_test_loc('archive/tgz/broken.tar.gz')
test_dir = self.get_temp_dir()
expected = Exception('Unrecognized archive format')
self.assertRaisesInstance(expected, archive.extract_tar, test_file, test_dir)
def test_extract_targz_with_absolute_path(self):
non_result = '/tmp/subdir'
assert not os.path.exists(non_result)
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/tgz/absolute_path.tar.gz')
archive.extract_tar(test_file, test_dir)
assert not os.path.exists(non_result)
result = os.path.join(test_dir, 'tmp/subdir/a.txt')
assert os.path.exists(result)
def test_extract_targz_with_relative_path(self):
test_file = self.get_test_loc('archive/tgz/relative.tar.gz')
"""
This test file was created with:
import tarfile
tar = tarfile.open("TarTest.tar.gz", "w:gz")
tar.add('a.txt', '../a_parent_folder.txt')
tar.add('b.txt', '../../another_folder/b_two_root.txt')
tar.add('b.txt', '../folder/subfolder/b_subfolder.txt')
tar.close()
"""
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
non_result = os.path.join(test_dir, '../a_parent_folder.txt')
assert not os.path.exists(non_result)
expected = [
'dotdot/dotdot/another_folder/b_two_root.txt',
'dotdot/a_parent_folder.txt',
'dotdot/folder/subfolder/b_subfolder.txt'
]
check_files(test_dir, expected)
def test_extract_targz_with_trailing_data2(self):
test_dir1 = self.get_temp_dir()
test_file = self.get_test_loc('archive/tgz/trailing2.tar.gz')
archive.extract_tar(test_file, test_dir1)
test_dir2 = self.get_temp_dir()
test_file2 = self.get_test_loc('archive/tgz/no_trailing.tar.gz')
archive.extract_tar(test_file2, test_dir2)
assert commoncode.testcase.is_same(test_dir1, test_dir2)
def test_extract_targz_with_mixed_case_and_symlink(self):
test_file = self.get_test_loc('archive/tgz/mixed_case_and_symlink.tgz')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
assert [] == result
import json
exp_file = self.get_test_loc('archive/tgz/mixed_case_and_symlink.tgz.expected')
with io.open(exp_file, encoding='utf-8') as ef:
expected_files = json.load(ef)
check_files(test_dir, map(str, expected_files))
def test_extract_targz_symlinks(self):
test_file = self.get_test_loc('archive/tgz/symlink.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
expected = [
'z/x/a',
# these are skipped
# this is a link: a -> ../x/a
# 'z/y/a',
# this is a broken link: x.a -> ../x.a
# 'z/y/x.a',
# this is a broken link: broken -> ../x/broken
# 'z/z/broken',
]
check_files(test_dir, expected)
def test_extract_targz_from_apache_should_not_return_errors(self):
# from http://archive.apache.org/dist/commons/logging/source/commons-logging-1.1.2-src.tar.gz
# failed with ReadError('not a bzip2 file',)
test_file = self.get_test_loc('archive/tgz/commons-logging-1.1.2-src.tar.gz')
test_dir = self.get_temp_dir()
extractor = archive.get_extractor(test_file)
assert archive.extract_tar == extractor
result = archive.extract_tar(test_file, test_dir)
assert [] == result
assert os.listdir(test_dir)
def test_extract_targz_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/tgz/tgz_unicode.tgz')
test_dir = self.get_temp_dir()
extractor = archive.get_extractor(test_file)
assert archive.extract_tar == extractor
result = archive.extract_tar(test_file, test_dir)
assert [] == result
assert os.listdir(test_dir)
class TestGzip(BaseArchiveTestCase):
def test_uncompress_gzip_basic(self):
test_file = self.get_test_loc('archive/gzip/file_4.26-1.diff.gz')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'file_4.26-1.diff.gz-extract')
assert os.path.exists(result)
def test_uncompress_concatenated_gzip(self):
# Archive created with:
# echo "f1content" > f1
# echo "f2content" > f2
# gzip -k f1
# gzip -k -c f2 >> twofiles.gz
test_file = self.get_test_loc('archive/gzip/twofiles.gz')
test_dir = self.get_temp_dir()
warnings = archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'twofiles.gz-extract')
assert os.path.exists(result)
assert b'f1content\nf2content\n' == open(result, 'rb').read()
assert [] == warnings
def test_uncompress_gzip_with_trailing_data(self):
test_file = self.get_test_loc('archive/gzip/trailing_data.gz')
test_dir = self.get_temp_dir()
warnings = archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'trailing_data.gz-extract')
assert os.path.exists(result)
assert [] == warnings
def test_uncompress_gzip_with_leading_data(self):
# even though we do not fail when there is invalid trailing data we
# should still fail on invalid leading data
test_file = self.get_test_loc('archive/gzip/leading_data.gz')
test_dir = self.get_temp_dir()
expected = Exception('Not a gzipped file')
self.assertRaisesInstance(expected, archive.uncompress_gzip, test_file, test_dir)
def test_uncompress_gzip_with_random_data(self):
test_file = self.get_test_loc('archive/gzip/random_binary.data')
test_dir = self.get_temp_dir()
expected = Exception('Not a gzipped file')
self.assertRaisesInstance(expected, archive.uncompress_gzip, test_file, test_dir)
def test_uncompress_gzip_with_backslash_in_path(self):
# weirdly enough, gzip keeps the original path/name
test_file = self.get_test_loc('archive/gzip/backslash_path.gz')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'backslash_path.gz-extract')
assert os.path.exists(result)
def test_uncompress_gzip_can_uncompress_windows_ntfs_wmz(self):
test_file = self.get_test_loc('archive/wmz/image003.wmz')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'image003.wmz-extract')
assert os.path.exists(result)
def test_uncompress_gzip_can_uncompress_mysql_arz(self):
test_file = self.get_test_loc('archive/gzip/mysql-arch.ARZ')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'mysql-arch.ARZ-extract')
assert os.path.exists(result)
class TestTarBz2(BaseArchiveTestCase):
def test_extract_tar_bz2_basic(self):
test_file = self.get_test_loc('archive/tbz/tarred_bzipped.tar.bz2')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_basic_bz(self):
test_file = self.get_test_loc('archive/tbz/tarred_bzipped.bz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_with_trailing_data__and_wrong_extension(self):
test_file = self.get_test_loc('archive/tbz/single_file_trailing_data.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'a.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_broken(self):
test_file = self.get_test_loc('archive/tbz/tarred_bzipped_broken.tar.bz2')
test_dir = self.get_temp_dir()
expected = Exception('bzip decompression failed')
self.assertRaisesInstance(expected, archive.extract_tar, test_file, test_dir)
def test_extract_tar_bz2_absolute_path(self):
assert not os.path.exists('/tmp/subdir')
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/tbz/absolute_path.tar.bz2')
archive.extract_tar(test_file, test_dir)
assert not os.path.exists('/tmp/subdir')
result = os.path.join(test_dir, 'tmp/subdir/a.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_relative_path(self):
test_file = self.get_test_loc('archive/tbz/bz2withtar_relative.tar.bz2')
"""
This test file was created with:
import tarfile
tar = tarfile.open("TarTest.tar.gz", "w:bz")
tar.add('a.txt', '../a_parent_folder.txt')
tar.add('b.txt', '../../another_folder/b_two_root.txt')
tar.add('b.txt', '../folder/subfolder/b_subfolder.txt')
tar.close()
"""
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
non_result = os.path.join(test_dir, '../a_parent_folder.txt')
assert not os.path.exists(non_result)
result = os.path.join(test_dir, 'dotdot/folder/subfolder/b_subfolder.txt')
assert os.path.exists(result)
result = os.path.join(test_dir, 'dotdot', 'a_parent_folder.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_iproute(self):
test_file = self.get_test_loc('archive/tbz/iproute2.tar.bz2')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'iproute2/README')
assert os.path.exists(result)
def test_extract_tar_bz2_multistream(self):
test_file = self.get_test_loc('archive/tbz/bzip2_multistream/example-file.csv.tar.bz2')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
expected = self.get_test_loc('archive/tbz/bzip2_multistream/example-file.csv')
result = os.path.join(test_dir, 'example-file.csv')
assert open(expected, 'rb').read() == open(result, 'rb').read()
class TestBz2(BaseArchiveTestCase):
def test_uncompress_bzip2_basic(self):
test_file = self.get_test_loc('archive/bz2/single_file_not_tarred.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
result = os.path.join(test_dir, 'single_file_not_tarred.bz2-extract')
assert os.path.exists(result)
def test_uncompress_bzip2_with_trailing_data(self):
test_file = self.get_test_loc('archive/bz2/single_file_trailing_data.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
result = os.path.join(test_dir, 'single_file_trailing_data.bz2-extract')
assert os.path.exists(result)
def test_uncompress_bzip2_broken(self):
test_file = self.get_test_loc('archive/bz2/bz2_not_tarred_broken.bz2')
test_dir = self.get_temp_dir()
expected = Exception('invalid data stream')
self.assertRaisesInstance(expected, archive.uncompress_bzip2,
test_file, test_dir)
def test_uncompress_bzip2_with_invalid_path(self):
test_file = self.get_test_loc('archive/bz2/bz_invalidpath.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
result = os.path.join(test_dir, 'bz_invalidpath.bz2-extract')
assert os.path.exists(result)
def test_uncompress_bzip2_multistream(self):
test_file = self.get_test_loc('archive/bz2/bzip2_multistream/example-file.csv.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
expected = self.get_test_loc('archive/bz2/bzip2_multistream/expected.csv')
result = os.path.join(test_dir, 'example-file.csv.bz2-extract')
assert open(expected, 'rb').read() == open(result, 'rb').read()
def test_sevenzip_extract_can_handle_bz2_multistream_differently(self):
test_file = self.get_test_loc('archive/bz2/bzip2_multistream/example-file.csv.bz2')
test_dir = self.get_temp_dir()
sevenzip.extract(test_file, test_dir)
expected = self.get_test_loc('archive/bz2/bzip2_multistream/expected.csv')
# the extraction dir is not created with suffix by z7
result = os.path.join(test_dir, 'example-file.csv')
expected_extracted = open(expected, 'rb').read()
expected_result = open(result, 'rb').read()
assert expected_extracted == expected_result
class TestShellArchives(BaseArchiveTestCase):
def test_extract_springboot(self):
# a self executable springboot Jar is a zip with a shell script prefix
test_file = self.get_test_loc('archive/shar/demo-spring-boot.jar')
test_dir = self.get_temp_dir()
result = archive.extract_springboot(test_file, test_dir)
assert [] == result
expected = ['META-INF/MANIFEST.MF', 'application.properties']
check_files(test_dir, expected)
def test_springboot_is_not_recognized_without_jar_extension(self):
test_file = self.get_test_loc('archive/shar/demo-spring-boot.sh')
handler = get_best_handler(test_file)
assert None == handler
def test_springboot_is_recognized_with_jar_extension(self):
test_file = self.get_test_loc('archive/shar/demo-spring-boot.jar')
handler = get_best_handler(test_file)
assert handler.name == 'Springboot Java Jar package'
class TestZip(BaseArchiveTestCase):
def test_extract_zip_basic(self):
test_file = self.get_test_loc('archive/zip/basic.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
expected = ['c/a/a.txt', 'c/b/a.txt', 'c/c/a.txt']
check_files(test_dir, expected)
def test_extract_zip_broken(self):
test_file = self.get_test_loc('archive/zip/zip_broken.zip')
test_dir = self.get_temp_dir()
self.assertRaises(Exception, archive.extract_zip, test_file, test_dir)
# note: broken zip opens and extracts with 7z with exceptions sometimes
# something is extracted in latest 7z
# result = os.path.join(test_dir, 'a.txt')
# print(test_dir)
# assert os.path.exists(result)
def test_extract_zip_with_invalid_path(self):
test_file = self.get_test_loc('archive/zip/zip_invalidpath.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
result = os.path.join(test_dir, 'this/that')
assert os.path.exists(result)
def test_extract_zip_with_trailing_data(self):
test_file = self.get_test_loc('archive/zip/zip_trailing_data.zip')
test_dir = self.get_temp_dir()
try:
archive.extract_zip(test_file, test_dir)
except libarchive2.ArchiveError as ae:
assert 'Invalid central directory signature' in str(ae)
# fails because of https://github.com/libarchive/libarchive/issues/545
result = os.path.join(test_dir, 'a.txt')
assert os.path.exists(result)
def test_extract_zip_with_trailing_data2(self):
# test archive created on cygwin with:
# $ echo "test content" > f1
# $ zip test f1
# $ echo "some junk" >> test.zip
test_file = self.get_test_loc('archive/zip/zip_trailing_data2.zip')
test_dir = self.get_temp_dir()
try:
archive.extract_zip(test_file, test_dir)
except libarchive2.ArchiveError as ae:
assert 'Invalid central directory signature' in str(ae)
# fails because of https://github.com/libarchive/libarchive/issues/545
result = os.path.join(test_dir, 'f1')
assert os.path.exists(result)
def test_extract_zip_with_relative_path_simple(self):
# The test files for this test and the next one were created with:
# from zipfile import ZipFile
# f = open('/tmp/a.txt', 'w')
# f.write('some data')
# f.close()
# f = open('/tmp/b.txt', 'w')
# f.write('some data')
# f.close()
# f = ZipFile(os.path.join(self.get_test_loc('archive'), 'relative_parent_folders.zip'), 'w')
# f.write('/tmp/a.txt', '../a_parent_folder.txt')
# f.write('/tmp/b.txt', '../../another_folder/b_two_root.txt')
# f.write('/tmp/b.txt', '../folder/subfolder/b_subfolder.txt')
# f.close()
# f = ZipFile(os.path.join(self.get_test_loc('archive'), 'high_ancest.zip'), 'w')
# f.write('/tmp/a.txt', ('../' * 12) + 'a_parent_folder.txt')
# f.write('/tmp/a.txt', ('../' * 12) + ('sub/' * 6) + 'a_parent_folder_in_sub_1.txt')
# f.write('/tmp/a.txt', ('../' * 6) + ('sub/' * 12) + 'a_parent_folder_in_sub_2.txt')
# f.write('/tmp/a.txt', ('../' * 12) + ('sub/' * 12) + 'a_parent_folder_in_sub_3.txt')
# f.close()
test_file = self.get_test_loc('archive/zip/relative_parent_folders.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
abs_path = os.path.join(test_dir , '../a_parent_folder.txt')
assert not os.path.exists(abs_path)
result = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/a_parent_folder.txt',
'/dotdot/dotdot/',
'/dotdot/dotdot/another_folder/',
'/dotdot/dotdot/another_folder/b_two_root.txt',
'/dotdot/folder/',
'/dotdot/folder/subfolder/',
'/dotdot/folder/subfolder/b_subfolder.txt'
]
assert expected == result
expected_deeply_nested_relative_path = [
'/dotdot/',
'/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/a_parent_folder.txt',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_1.txt',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_3.txt',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_2.txt'
]
# somehow Windows fails randomly and only on certain windows machines at Appveyor
# so we retest with a skinny expectation
expected_deeply_nested_relative_path_alternative = [
u'/a_parent_folder.txt',
u'/sub/',
u'/sub/sub/',
u'/sub/sub/sub/',
u'/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_1.txt',
u'/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_2.txt',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_3.txt']
def test_extract_zip_with_relative_path_deeply_nested(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
result = self.collect_extracted_path(test_dir)
try:
assert self.expected_deeply_nested_relative_path == result
except:
assert self.expected_deeply_nested_relative_path_alternative == result
@skipIf(on_windows, 'Expectation are different on Windows')
def test_extract_zip_with_relative_path_deeply_nested_with_7zip_posix(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
test_dir = self.get_temp_dir()
try:
sevenzip.extract(test_file, test_dir)
self.fail('Shoul raise an exception')
except ExtractErrorFailedToExtract as e:
assert 'Unknown extraction error' == str(e)
@skipIf(not on_windows, 'Expectation are different on Windows')
def test_extract_zip_with_relative_path_deeply_nested_with_7zip_windows(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
test_dir = self.get_temp_dir()
sevenzip.extract(test_file, test_dir)
result = self.collect_extracted_path(test_dir)
assert self.expected_deeply_nested_relative_path_alternative == result
def test_list_zip_with_relative_path_deeply_nested_with_7zip(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
result = []
for entry in sevenzip.list_entries(test_file):
if on_windows:
entry.path = entry.path.replace('\\', '/')
result.append(entry.to_dict())
expected = [
{u'is_broken_link': False,
u'is_dir': False,
u'is_file': True,
u'is_hardlink': False,
u'is_special': False,
u'is_symlink': False,
u'link_target': None,
u'path': '../../../../../../../../../../../../a_parent_folder.txt',
u'size': '9'},
{u'is_broken_link': False,
u'is_dir': False,
u'is_file': True,
u'is_hardlink': False,
u'is_special': False,
u'is_symlink': False,
u'link_target': None,
u'path': '../../../../../../../../../../../../sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_1.txt',
u'size': '9'},
{u'is_broken_link': False,
u'is_dir': False,
u'is_file': True,
u'is_hardlink': False,
u'is_special': False,
u'is_symlink': False,
u'link_target': None,
u'path': '../../../../../../sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_2.txt',
u'size': '9'},
{u'is_broken_link': False,
u'is_dir': False,
u'is_file': True,
u'is_hardlink': False,
u'is_special': False,
u'is_symlink': False,
u'link_target': None,
u'path': '../../../../../../../../../../../../sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_3.txt',
u'size': '9'}]
assert expected == result
def test_extract_zip_with_relative_path_deeply_nested_with_libarchive(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
test_dir = self.get_temp_dir()
libarchive2.extract(test_file, test_dir)
result = self.collect_extracted_path(test_dir)
assert self.expected_deeply_nested_relative_path == result
def test_extract_zip_with_password(self):
test_file = self.get_test_loc('archive/zip/zip_password_nexb.zip')
test_dir = self.get_temp_dir()
try:
archive.extract_zip(test_file, test_dir)
except Exception as e:
assert isinstance(e, ExtractErrorFailedToExtract)
assert 'Password protected archive, unable to extract' in str(e)
def test_extract_zip_java_jar(self):
test_file = self.get_test_loc('archive/zip/jar/simple.jar')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = [
'/META-INF/',
'/META-INF/MANIFEST.MF',
'/org/',
'/org/jvnet/',
'/org/jvnet/glassfish/',
'/org/jvnet/glassfish/comms/',
'/org/jvnet/glassfish/comms/sipagent/',
'/org/jvnet/glassfish/comms/sipagent/actions/',
'/org/jvnet/glassfish/comms/sipagent/actions/Bundle.properties',
'/org/jvnet/glassfish/comms/sipagent/actions/SipAgentCookieAction.class',
'/org/jvnet/glassfish/comms/sipagent/actions/bd.png',
'/org/jvnet/glassfish/comms/sipagent/actions/bd24.png',
'/org/jvnet/glassfish/comms/sipagent/org-jvnet-glassfish-comms-sipagent-actions-SipAgentCookieAction.instance',
'/org/jvnet/glassfish/comms/sipagent/org-jvnet-glassfish-comms-sipagent-actions-SipAgentCookieAction_1.instance'
]
assert sorted(expected) == sorted(extracted)
def test_extract_zip_with_duplicated_lowercase_paths(self):
test_file = self.get_test_loc('archive/zip/dup_names.zip')
expected = {'META-INF/license/': None, # a directory
'META-INF/license/LICENSE.base64.txt': 1618,
'META-INF/LICENSE_1': 11366}
self.check_extract(archive.extract_zip, test_file, expected)
def test_extract_zip_with_timezone(self):
test_file = self.get_test_loc('archive/zip/timezone/c.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
expected = [
(os.path.join(test_dir, 'c/a/a.txt'), '2008-07-29'),
(os.path.join(test_dir, 'c/b/a.txt'), '2008-07-29'),
(os.path.join(test_dir, 'c/c/a.txt'), '2008-07-29'),
]
# DST sends a monkey wrench.... so we only test the date, not the time
for loc, expected_date in expected:
result = commoncode.date.get_file_mtime(loc)
assert result.startswith(expected_date)
def test_extract_zip_with_timezone_2(self):
test_file = self.get_test_loc('archive/zip/timezone/projecttest.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
# DST sends a monkey wrench.... so we only test the date, not the time
# and we accept some varation in the date ...
expected = [
(os.path.join(test_dir, 'primes.txt'), ('2009-12-05', '2009-12-06',)),
(os.path.join(test_dir, 'primes2.txt'), ('2009-12-05', '2009-12-06',))
]
for loc, expected_date in expected:
result = commoncode.date.get_file_mtime(loc)
assert result.startswith(expected_date)
def test_extract_zip_with_backslash_in_path_1(self):
test_file = self.get_test_loc('archive/zip/backslash/backslash1.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
# Info-ZIP 'zip' displays:
# warning: booxw-1202-bin.distribution.zip appears to use
# backslashes as path separators (which is the right thing to do)
expected = ['scripts/AutomaticClose.int']
check_files(test_dir, expected)
result = os.path.join(test_dir, 'scripts/AutomaticClose.int')
assert os.path.exists(result)
def test_extract_zip_with_backslash_in_path_2(self):
test_file = self.get_test_loc('archive/zip/backslash/AspectJTest.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
expected = '''
AspectJTest/.classpath
AspectJTest/.project
AspectJTest/src/META-INF/aop.xml
AspectJTest/src/p3/ExpertFlyable.java
AspectJTest/src/p3/MakeFlyableAspect.java
AspectJTest/src/p3/Flyable.java
AspectJTest/src/p3/MakeFlyable.java
AspectJTest/src/p3/Main2.java
AspectJTest/src/p3/p4/Person.java
AspectJTest/src/p2/MyLoggingAspect.java
AspectJTest/src/p1/MyService.java
AspectJTest/src/p1/Main1.java
AspectJTest/bin/META-INF/aop.xml
AspectJTest/bin/p3/MakeFlyableAspect.class
AspectJTest/bin/p3/ExpertFlyable.class
AspectJTest/bin/p3/Flyable.class
AspectJTest/bin/p3/Main2.class
AspectJTest/bin/p3/MakeFlyable.class
AspectJTest/bin/p3/p4/Person.class
AspectJTest/bin/p2/MyLoggingAspect.class
AspectJTest/bin/p1/Main1.class
AspectJTest/bin/p1/MyService.class
'''.split()
check_files(test_dir, expected)
def test_extract_zip_with_backslash_in_path_3(self):
test_file = self.get_test_loc('archive/zip/backslash/boo-0.3-src.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
result = os.path.join(test_dir, 'src/Boo.Lang.Compiler/TypeSystem/InternalCallableType.cs')
assert os.path.exists(result)
def test_get_best_handler_nuget_is_selected_over_zip(self):
test_file = self.get_test_loc('archive/zip/moq.4.2.1507.118.nupkg')
handler = get_best_handler(test_file)
assert archive.NugetHandler == handler
def test_get_best_handler_nuget_is_selected_over_zip2(self):
test_file = self.get_test_loc('archive/zip/exceptionhero.javascript.1.0.5.nupkg')
handler = get_best_handler(test_file)
assert archive.NugetHandler == handler
def test_get_best_handler_nuget_is_selected_over_zip3(self):
test_file = self.get_test_loc('archive/zip/javascript-fastclass.1.1.729.121805.nupkg')
handler = get_best_handler(test_file)
assert archive.NugetHandler == handler
def test_extract_zip_can_extract_windows_media_player_skins(self):
test_file = self.get_test_loc('archive/wmz/Go.wmz')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
expected = ['32px.png', 'go.js', 'go.wms']
check_files(test_dir, expected)
def test_extract_zip_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/zip/zip_unicode.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
assert os.listdir(test_dir)
def test_extract_zip_can_extract_zip_with_directory_not_marked_with_trailing_slash(self):
test_file = self.get_test_loc('archive/zip/directory-with-no-trailing-slash.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
expected = ['online_upgrade_img/machine_type']
check_files(test_dir, expected)
class TestLibarch(BaseArchiveTestCase):
def test_extract_zip_with_relative_path_libarchive(self):
test_file = self.get_test_loc('archive/zip/relative_parent_folders.zip')
test_dir = self.get_temp_dir()
result = libarchive2.extract(test_file, test_dir)
assert [] == result
abs_path = os.path.join(test_dir , '../a_parent_folder.txt')
assert not os.path.exists(abs_path)
result = os.path.join(test_dir, 'dotdot/folder/subfolder/b_subfolder.txt')
assert os.path.exists(result)
result = os.path.join(test_dir, 'dotdot/a_parent_folder.txt')
assert os.path.exists(result)
result = os.path.join(test_dir, 'dotdot/dotdot/another_folder/b_two_root.txt')
assert os.path.exists(result)
class TestTar(BaseArchiveTestCase):
def test_extract_tar_basic(self):
test_file = self.get_test_loc('archive/tar/tarred.tar')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_tar_broken(self):
test_file = self.get_test_loc('archive/tar/tarred_broken.tar')
test_dir = self.get_temp_dir()
expected = Exception("Unrecognized archive format")
self.assertRaisesInstance(expected, archive.extract_tar,
test_file, test_dir)
def test_extract_tar_absolute_path(self):
non_result = '/home/li/Desktop/absolute_folder'
assert not os.path.exists(non_result)
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/tar/tar_absolute.tar')
archive.extract_tar(test_file, test_dir)
assert not os.path.exists(non_result)
result = os.path.join(test_dir, 'home/li/Desktop/absolute_folder/absolute_file')
assert os.path.exists(result)
def test_extract_tar_with_absolute_path2(self):
assert not os.path.exists('/tmp/subdir')
test_file = self.get_test_loc('archive/tar/absolute_path.tar')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
assert not os.path.exists('/tmp/subdir')
result = os.path.join(test_dir, 'tmp/subdir/a.txt')
assert os.path.exists(result)
def test_extract_tar_with_relative_path(self):
test_file = self.get_test_loc('archive/tar/tar_relative.tar')
"""
This test file was created with:
import tarfile
tar = tarfile.open("TarTest.tar.gz", "w")
tar.add('a.txt', '../a_parent_folder.txt')
tar.add('b.txt', '../../another_folder/b_two_root.txt')
tar.add('b.txt', '../folder/subfolder/b_subfolder.txt')
tar.close()
"""
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
non_result = os.path.abspath(test_file + '/../a_parent_folder.txt')
assert not os.path.exists(non_result)
extracted = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/dotdot/',
'/dotdot/a_parent_folder.txt',
'/dotdot/dotdot/another_folder/',
'/dotdot/dotdot/another_folder/b_two_root.txt',
'/dotdot/folder/',
'/dotdot/folder/subfolder/',
'/dotdot/folder/subfolder/b_subfolder.txt'
]
assert sorted(expected) == sorted(extracted)
def test_extract_tar_archive_with_special_files(self):
test_file = self.get_test_loc('archive/tar/special.tar')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
expected = [
'0-REGTYPE',
'0-REGTYPE-TEXT',
'0-REGTYPE-VEEEERY_LONG_NAME_____________________________________________________________________________________________________________________155',
# '1-LNKTYPE', links are skipped
'S-SPARSE',
'S-SPARSE-WITH-NULLS',
]
check_files(test_dir, expected)
# special files are skipped too
# '2-SYMTYPE: Skipping broken link to: testtar/0-REGTYPE',
# '3-CHRTYPE: Skipping special file.',
# '6-FIFOTYPE: Skipping special file.'
assert [] == result
@skipIf(on_windows, 'Unicode and/or Long paths are not handled well yet on windows')
def test_extract_python_testtar_tar_archive_with_special_files(self):
test_file = self.get_test_loc('archive/tar/testtar.tar')
# this is from:
# https://hg.python.org/cpython/raw-file/bff88c866886/Lib/test/testtar.tar
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
expected_warnings = ["'pax/bad-pax-\\xe4\\xf6\\xfc': \nPathname can't be converted from UTF-8 to current locale."]
assert sorted(expected_warnings) == sorted(result)
expected = [
'gnu/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/longname',
'gnu/regtype-gnu-uid',
'gnu/sparse',
'gnu/sparse-0.0',
'gnu/sparse-0.1',
'gnu/sparse-1.0',
'misc/eof',
'misc/regtype-hpux-signed-chksum-AOUaouss',
'misc/regtype-old-v7',
'misc/regtype-old-v7-signed-chksum-AOUaouss',
'misc/regtype-suntar',
'misc/regtype-xstar',
'pax/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/longname',
'pax/bad-pax-aou',
'pax/hdrcharset-aou',
'pax/regtype1',
'pax/regtype2',
'pax/regtype3',
'pax/regtype4',
'pax/umlauts-AOUaouss',
'ustar/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/1234567/longname',
'ustar/conttype',
'ustar/linktest1/regtype',
'ustar/regtype',
'ustar/sparse',
'ustar/umlauts-AOUaouss'
]
if on_linux:
expected = [bytes(e) for e in expected]
check_files(test_dir, expected)
class TestDebian(BaseArchiveTestCase):
def test_extract_deb_package_1(self):
test_file = self.get_test_loc('archive/deb/adduser_3.112ubuntu1_all.deb')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
check_size(110198, os.path.join(test_dir, 'data.tar.gz'))
def test_extract_deb_package_2(self):
test_file = self.get_test_loc('archive/deb/adduser_3.113+nmu3ubuntu3_all.deb')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
check_size(158441, os.path.join(test_dir, 'data.tar.gz'))
def test_get_best_handler_deb_package_is_an_archive(self):
test_file = self.get_test_loc('archive/deb/libjama-dev_1.2.4-2_all.deb')
handler = get_best_handler(test_file)
assert archive.DebHandler == handler
def test_extract_deb_package_3(self):
test_file = self.get_test_loc('archive/deb/wget-el_0.5.0-8_all.deb')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
check_size(36376, os.path.join(test_dir, 'data.tar.gz'))
class TestAr(BaseArchiveTestCase):
def test_extract_ar_basic_7z(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = ['1.txt', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_basic(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_verify_dates(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
expected = [
(os.path.join(test_dir, 'main.o'), '2007-06-12'),
(os.path.join(test_dir, 'yyerror.o'), '2007-06-12'),
]
# DST sends a monkey wrench.... so we only test the date, not the time
for loc, expected_date in expected:
result = commoncode.date.get_file_mtime(loc)
assert result.startswith(expected_date)
def test_extract_ar_broken_7z(self):
test_file = self.get_test_loc('archive/ar/liby-corrupted.a')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_broken(self):
test_file = self.get_test_loc('archive/ar/liby-corrupted.a')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o']
check_files(test_dir, expected)
assert ['None: \nIncorrect file header signature'] == result
def test_extract_ar_with_invalid_path(self):
test_file = self.get_test_loc('archive/ar/ar_invalidpath.ar')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['this/that']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_relative_path_7z(self):
test_file = self.get_test_loc('archive/ar/winlib/htmlhelp.lib')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = [
'1.txt',
'2.txt',
'release/init.obj'
]
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_relative_path_libarch(self):
test_file = self.get_test_loc('archive/ar/winlib/htmlhelp.lib')
test_dir = self.get_temp_dir()
result = archive.libarchive2.extract(test_file, test_dir)
expected_warns = [
"'//': \nInvalid string table",
"'/0': \nCan't find long filename for entry"
]
assert expected_warns == result
# inccorrect for now: need this: ['__.SYMDEF', 'release/init.obj']
expected = ['0', 'dot', 'dot_1', 'dot_2']
check_files(test_dir, expected)
def test_extract_ar_with_relative_path_and_backslashes_in_names_libarch(self):
test_file = self.get_test_loc('archive/ar/winlib/freetype.lib')
test_dir = self.get_temp_dir()
result = archive.libarchive2.extract(test_file, test_dir)
expected_warns = [
u"'//': \nInvalid string table",
u"'/0': \nCan't find long filename for entry",
u"'/34': \nCan't find long filename for entry",
u"'/68': \nCan't find long filename for entry",
u"'/104': \nCan't find long filename for entry",
u"'/137': \nCan't find long filename for entry",
u"'/173': \nCan't find long filename for entry",
u"'/205': \nCan't find long filename for entry",
u"'/239': \nCan't find long filename for entry",
u"'/275': \nCan't find long filename for entry",
u"'/311': \nCan't find long filename for entry",
u"'/344': \nCan't find long filename for entry",
u"'/375': \nCan't find long filename for entry",
u"'/406': \nCan't find long filename for entry",
u"'/442': \nCan't find long filename for entry",
u"'/477': \nCan't find long filename for entry",
u"'/512': \nCan't find long filename for entry",
u"'/545': \nCan't find long filename for entry",
u"'/577': \nCan't find long filename for entry",
u"'/611': \nCan't find long filename for entry",
u"'/645': \nCan't find long filename for entry",
u"'/681': \nCan't find long filename for entry",
u"'/717': \nCan't find long filename for entry",
u"'/750': \nCan't find long filename for entry",
u"'/784': \nCan't find long filename for entry",
u"'/818': \nCan't find long filename for entry",
u"'/853': \nCan't find long filename for entry",
u"'/888': \nCan't find long filename for entry",
u"'/923': \nCan't find long filename for entry",
u"'/957': \nCan't find long filename for entry",
u"'/993': \nCan't find long filename for entry",
u"'/1027': \nCan't find long filename for entry",
u"'/1058': \nCan't find long filename for entry",
u"'/1089': \nCan't find long filename for entry"
]
assert expected_warns == result
# 7zip is better, but has a security bug for now
# GNU ar works fine otherwise, but there are portability issues
expected = [
'0',
'1027',
'104',
'1058',
'1089',
'137',
'173',
'205',
'239',
'275',
'311',
'34',
'344',
'375',
'406',
'442',
'477',
'512',
'545',
'577',
'611',
'645',
'68',
'681',
'717',
'750',
'784',
'818',
'853',
'888',
'923',
'957',
'993',
'dot',
'dot_1',
'dot_2'
]
if on_linux:
expected = [bytes(e) for e in expected]
check_files(test_dir, expected)
def test_extract_ar_with_relative_path_and_backslashes_in_names_7z(self):
test_file = self.get_test_loc('archive/ar/winlib/freetype.lib')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
assert [] == result
expected = [
'1.txt',
'2.txt',
'objs/debug_mt/autofit.obj',
'objs/debug_mt/bdf.obj',
'objs/debug_mt/cff.obj',
'objs/debug_mt/ftbase.obj',
'objs/debug_mt/ftbbox.obj',
'objs/debug_mt/ftbitmap.obj',
'objs/debug_mt/ftcache.obj',
'objs/debug_mt/ftdebug.obj',
'objs/debug_mt/ftgasp.obj',
'objs/debug_mt/ftglyph.obj',
'objs/debug_mt/ftgzip.obj',
'objs/debug_mt/ftinit.obj',
'objs/debug_mt/ftlzw.obj',
'objs/debug_mt/ftmm.obj',
'objs/debug_mt/ftpfr.obj',
'objs/debug_mt/ftstroke.obj',
'objs/debug_mt/ftsynth.obj',
'objs/debug_mt/ftsystem.obj',
'objs/debug_mt/fttype1.obj',
'objs/debug_mt/ftwinfnt.obj',
'objs/debug_mt/pcf.obj',
'objs/debug_mt/pfr.obj',
'objs/debug_mt/psaux.obj',
'objs/debug_mt/pshinter.obj',
'objs/debug_mt/psmodule.obj',
'objs/debug_mt/raster.obj',
'objs/debug_mt/sfnt.obj',
'objs/debug_mt/smooth.obj',
'objs/debug_mt/truetype.obj',
'objs/debug_mt/type1.obj',
'objs/debug_mt/type1cid.obj',
'objs/debug_mt/type42.obj',
'objs/debug_mt/winfnt.obj'
]
check_files(test_dir, expected)
def test_extract_ar_static_library_does_not_delete_symdefs_7z(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
# the symdef file is 1.txt with 7z
expected = ['1.txt', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_static_library_does_not_delete_symdefs(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_trailing_data(self):
test_file = self.get_test_loc('archive/ar/ar_trailing.a')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
result = os.path.join(test_dir, 'main.o')
assert os.path.exists(result)
result = os.path.join(test_dir, 'yyerror.o')
assert os.path.exists(result)
def test_extract_ar_with_permissions_7z(self):
test_file = self.get_test_loc('archive/ar/winlib/zlib.lib')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = ['1.txt', '1.zlib.pyd', '2.txt', '2.zlib.pyd', '3.zlib.pyd', '4.zlib.pyd']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_permissions(self):
# this behavior is not correct: 7z is better, but has security flaws for now
test_file = self.get_test_loc('archive/ar/winlib/zlib.lib')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
assert [] == result
expected = ['dot', 'dot_1']
check_files(test_dir, expected)
class TestCpio(BaseArchiveTestCase):
def test_extract_cpio_basic(self):
test_file = self.get_test_loc('archive/cpio/elfinfo-1.0-1.fc9.src.cpio')
test_dir = self.get_temp_dir()
archive.extract_cpio(test_file, test_dir)
result = os.path.join(test_dir, 'elfinfo-1.0.tar.gz')
assert os.path.exists(result)
def test_extract_cpio_with_trailing_data(self):
test_file = self.get_test_loc('archive/cpio/cpio_trailing.cpio')
test_dir = self.get_temp_dir()
archive.extract_cpio(test_file, test_dir)
result = os.path.join(test_dir, 'elfinfo-1.0.tar.gz')
assert os.path.exists(result)
def test_extract_cpio_broken_7z(self):
test_file = self.get_test_loc('archive/cpio/cpio_broken.cpio')
test_dir = self.get_temp_dir()
self.assertRaisesInstance(Exception('Unknown extraction error'), sevenzip.extract, test_file, test_dir)
def test_extract_cpio_broken2(self):
test_file = self.get_test_loc('archive/cpio/cpio_broken.cpio')
test_dir = self.get_temp_dir()
result = archive.extract_cpio(test_file, test_dir)
expected = sorted(['elfinfo-1.0.tar.gz', 'elfinfo.spec'])
if on_linux:
expected = [bytes(e) for e in expected]
assert expected == sorted(os.listdir(test_dir))
assert ["'elfinfo.spec': \nSkipped 72 bytes before finding valid header"] == result
def test_extract_cpio_with_absolute_path(self):
assert not os.path.exists('/tmp/subdir')
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/cpio/cpio_absolute.cpio')
archive.extract_cpio(test_file, test_dir)
assert not os.path.exists('/tmp/subdir')
result = os.path.join(test_dir, 'home/li/Desktop/absolute_folder/absolute_file')
assert os.path.exists(result)
def test_extract_cpio_with_relative_path(self):
# test file is created by cmd: find ../.. - |cpio -ov >relative.cpio
# We should somehow add a "parent" folder to extract relative paths
test_file = self.get_test_loc('archive/cpio/cpio_relative.cpio')
test_dir = self.get_temp_dir()
result = archive.extract_cpio(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/dotdot/',
'/dotdot/dotdot/2folder/',
'/dotdot/dotdot/2folder/3folder/',
'/dotdot/dotdot/2folder/3folder/cpio_relative.cpio',
'/dotdot/dotdot/2folder/3folder/relative_file',
'/dotdot/dotdot/2folder/3folder/relative_file~',
'/dotdot/dotdot/2folder/relative_file',
'/dotdot/dotdot/relative_file'
]
assert expected == extracted
def test_extract_cpio_with_invalidpath(self):
test_file = self.get_test_loc('archive/cpio/cpio-invalidpath.cpio')
test_dir = self.get_temp_dir()
archive.extract_cpio(test_file, test_dir)
result = os.path.join(test_dir, 'backup')
assert os.path.exists(result)
result = os.path.join(test_dir, 'this/that')
assert os.path.exists(result)
def test_extract_cpio_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cpio/t.cpio.foo')
test_dir = self.get_temp_dir()
result = archive.extract_cpio(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestRpm(BaseArchiveTestCase):
def test_extract_rpm_basic_1(self):
test_file = self.get_test_loc('archive/rpm/elfinfo-1.0-1.fc9.src.rpm')
test_dir = self.get_temp_dir()
archive.extract_rpm(test_file, test_dir)
result = os.path.join(test_dir, 'elfinfo-1.0-1.fc9.src.cpio.gz')
assert os.path.exists(result)
def test_extract_rpm_basic_2(self):
test_file = self.get_test_loc('archive/rpm/python-glc-0.7.1-1.src.rpm')
test_dir = self.get_temp_dir()
archive.extract_rpm(test_file, test_dir)
result = os.path.join(test_dir, 'python-glc-0.7.1-1.src.cpio.gz')
assert os.path.exists(result)
def test_extract_rpm_nested_correctly(self):
test_file = self.get_test_loc('archive/rpm/extract_once/libsqueeze0.2_0-0.2.3-8mdv2010.0.i586.rpm')
test_dir = self.get_temp_dir()
archive.extract_rpm(test_file, test_dir)
result = os.path.join(test_dir, 'libsqueeze0.2_0-0.2.3-8mdv2010.0.i586.cpio.lzma')
assert os.path.exists(result)
def test_extract_rpm_with_trailing_data(self):
test_file = self.get_test_loc('archive/rpm/rpm_trailing.rpm')
test_dir = self.get_temp_dir()
result = archive.extract_rpm(test_file, test_dir)
expected = ['elfinfo-1.0-1.fc9.src.cpio.gz']
check_files(test_dir, expected)
assert [] == result
def test_extract_rpm_with_renamed_content(self):
# When the RPM is renamed, we should still be able to find the cpio
test_file = self.get_test_loc('archive/rpm/renamed.rpm')
test_dir = self.get_temp_dir()
result = archive.extract_rpm(test_file, test_dir)
expected = ['python-glc-0.7.1-1.src.cpio.gz']
check_files(test_dir, expected)
assert [] == result
def test_extract_rpm_broken(self):
test_file = self.get_test_loc('archive/rpm/broken.rpm')
test_dir = self.get_temp_dir()
expected = Exception('Unknown extraction error')
self.assertRaisesInstance(expected, archive.extract_rpm,
test_file, test_dir)
class TestExtractTwice(BaseArchiveTestCase):
def test_extract_twice_with_rpm_with_xz_compressed_cpio(self):
test_file = self.get_test_loc('archive/rpm/xz-compressed-cpio.rpm')
test_dir = self.get_temp_dir()
# this will return an extractor that extracts twice
extractor = archive.get_extractor(test_file)
result = list(extractor(test_file, test_dir))
assert [] == result
expected = [
'etc/abrt/abrt-action-save-package-data.conf',
'etc/abrt/abrt.conf',
'etc/abrt/gpg_keys',
'etc/dbus-1/system.d/dbus-abrt.conf',
'etc/libreport/events.d/abrt_event.conf',
'etc/libreport/events.d/smart_event.conf',
'etc/rc.d/init.d/abrtd',
'usr/bin/abrt-action-save-package-data',
'usr/bin/abrt-handle-upload',
'usr/libexec/abrt-handle-event',
'usr/libexec/abrt1-to-abrt2',
'usr/sbin/abrt-dbus',
'usr/sbin/abrt-server',
'usr/sbin/abrtd',
'usr/share/dbus-1/system-services/com.redhat.abrt.service',
'usr/share/doc/abrt-2.0.8/COPYING',
'usr/share/doc/abrt-2.0.8/README',
'usr/share/locale/ar/LC_MESSAGES/abrt.mo',
'usr/share/locale/as/LC_MESSAGES/abrt.mo',
'usr/share/locale/ast/LC_MESSAGES/abrt.mo',
'usr/share/locale/bg/LC_MESSAGES/abrt.mo',
'usr/share/locale/bn_IN/LC_MESSAGES/abrt.mo',
'usr/share/locale/ca/LC_MESSAGES/abrt.mo',
'usr/share/locale/cs/LC_MESSAGES/abrt.mo',
'usr/share/locale/da/LC_MESSAGES/abrt.mo',
'usr/share/locale/de/LC_MESSAGES/abrt.mo',
'usr/share/locale/el/LC_MESSAGES/abrt.mo',
'usr/share/locale/en_GB/LC_MESSAGES/abrt.mo',
'usr/share/locale/es/LC_MESSAGES/abrt.mo',
'usr/share/locale/fa/LC_MESSAGES/abrt.mo',
'usr/share/locale/fi/LC_MESSAGES/abrt.mo',
'usr/share/locale/fr/LC_MESSAGES/abrt.mo',
'usr/share/locale/gu/LC_MESSAGES/abrt.mo',
'usr/share/locale/he/LC_MESSAGES/abrt.mo',
'usr/share/locale/hi/LC_MESSAGES/abrt.mo',
'usr/share/locale/hu/LC_MESSAGES/abrt.mo',
'usr/share/locale/id/LC_MESSAGES/abrt.mo',
'usr/share/locale/it/LC_MESSAGES/abrt.mo',
'usr/share/locale/ja/LC_MESSAGES/abrt.mo',
'usr/share/locale/kn/LC_MESSAGES/abrt.mo',
'usr/share/locale/ko/LC_MESSAGES/abrt.mo',
'usr/share/locale/ml/LC_MESSAGES/abrt.mo',
'usr/share/locale/mr/LC_MESSAGES/abrt.mo',
'usr/share/locale/nb/LC_MESSAGES/abrt.mo',
'usr/share/locale/nl/LC_MESSAGES/abrt.mo',
'usr/share/locale/or/LC_MESSAGES/abrt.mo',
'usr/share/locale/pa/LC_MESSAGES/abrt.mo',
'usr/share/locale/pl/LC_MESSAGES/abrt.mo',
'usr/share/locale/pt/LC_MESSAGES/abrt.mo',
'usr/share/locale/pt_BR/LC_MESSAGES/abrt.mo',
'usr/share/locale/ru/LC_MESSAGES/abrt.mo',
'usr/share/locale/sk/LC_MESSAGES/abrt.mo',
'usr/share/locale/sr/LC_MESSAGES/abrt.mo',
'usr/share/locale/sr@latin/LC_MESSAGES/abrt.mo',
'usr/share/locale/sv/LC_MESSAGES/abrt.mo',
'usr/share/locale/ta/LC_MESSAGES/abrt.mo',
'usr/share/locale/te/LC_MESSAGES/abrt.mo',
'usr/share/locale/uk/LC_MESSAGES/abrt.mo',
'usr/share/locale/zh_CN/LC_MESSAGES/abrt.mo',
'usr/share/locale/zh_TW/LC_MESSAGES/abrt.mo',
'usr/share/man/man1/abrt-action-save-package-data.1.gz',
'usr/share/man/man1/abrt-handle-upload.1.gz',
'usr/share/man/man1/abrt-server.1.gz',
'usr/share/man/man5/abrt-action-save-package-data.conf.5.gz',
'usr/share/man/man5/abrt.conf.5.gz',
'usr/share/man/man8/abrt-dbus.8.gz',
'usr/share/man/man8/abrtd.8.gz'
]
check_files(test_dir, expected)
def test_extract_twice_can_extract_to_relative_paths(self):
# The setup is a tad complex because we want to have a relative dir
# to the base dir where we run tests from, ie the scancode-toolkit/ dir
# To use relative paths, we use our tmp dir at the root of the code tree
from os.path import dirname, join, abspath, exists
import shutil
import tempfile
test_file = self.get_test_loc('archive/rpm/xz-compressed-cpio.rpm')
# this will return an extractor that extracts twice
extractor = archive.get_extractor(test_file)
scancode_root = dirname(dirname(dirname(__file__)))
scancode_tmp = join(scancode_root, 'tmp')
fileutils.create_dir(scancode_tmp)
scancode_root_abs = abspath(scancode_root)
test_src_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
test_tgt_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
shutil.copy(test_file, test_src_dir)
test_src_file = join(test_src_dir, 'xz-compressed-cpio.rpm')
result = list(extractor(test_src_file, test_tgt_dir))
assert [] == result
assert exists(join(test_tgt_dir, 'usr/sbin/abrt-dbus'))
class TestRar(BaseArchiveTestCase):
def test_extract_rar_basic(self):
test_file = self.get_test_loc('archive/rar/basic.rar')
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
result = os.path.join(test_dir, 'd', 'b', 'a.txt')
assert os.path.exists(result)
def test_extract_rar_with_invalid_path(self):
test_file = self.get_test_loc('archive/rar/rar_invalidpath.rar')
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
result = os.path.join(test_dir, 'this/that')
assert os.path.exists(result)
def test_extract_rar_with_trailing_data(self):
test_file = self.get_test_loc('archive/rar/rar_trailing.rar')
test_dir = self.get_temp_dir()
Exception('Unknown extraction error')
archive.extract_rar(test_file, test_dir)
result = os.path.join(test_dir, 'd', 'b', 'a.txt')
assert os.path.exists(result)
def test_extract_rar_broken(self):
test_file = self.get_test_loc('archive/rar/broken.rar')
test_dir = self.get_temp_dir()
expected = Exception('Header CRC error')
self.assertRaisesInstance(expected, archive.extract_rar, test_file, test_dir)
def test_extract_rar_with_relative_path(self):
# FIXME: this file may not have a real relative path
test_file = self.get_test_loc('archive/rar/rar_relative.rar', copy=True)
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
result = os.path.abspath(test_file + '/../a_parent_folder.txt')
assert not os.path.exists(result)
result = os.path.join(test_dir, '2folder/relative_file')
assert os.path.exists(result)
result = os.path.join(test_dir, '2folder/3folder/relative_file')
assert os.path.exists(result)
def test_extract_rar_with_absolute_path(self):
# FIXME: this file may not have a real absolute path
assert not os.path.exists('/home/li/Desktop/zip_folder')
test_file = self.get_test_loc('archive/rar/rar_absolute.rar', copy=True)
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
assert not os.path.exists('/home/li/Desktop/absolute_folder')
result = os.path.join(test_dir, 'home/li/Desktop',
'absolute_folder/absolute_file')
assert os.path.exists(result)
def test_extract_rar_with_password(self):
test_file = self.get_test_loc('archive/rar/rar_password.rar')
test_dir = self.get_temp_dir()
expected = Exception('RAR encryption support unavailable.')
self.assertRaisesInstance(expected, archive.extract_rar,
test_file, test_dir)
def test_extract_rar_with_non_ascii_path(self):
test_file = self.get_test_loc('archive/rar/non_ascii_corrupted.rar')
# The bug only occurs if the path was given as Unicode
test_file = unicode(test_file)
test_dir = self.get_temp_dir()
# raise an exception but still extracts some
expected = Exception('Prefix found')
self.assertRaisesInstance(expected, archive.extract_rar, test_file, test_dir)
result = os.path.join(test_dir, 'EdoProject_java/WebContent/WEB-INF/lib/cos.jar')
assert os.path.exists(result)
class TestSevenZip(BaseArchiveTestCase):
def test_extract_7z_basic(self):
test_file = self.get_test_loc('archive/7z/z.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
expected = ['z/a/a.txt', 'z/b/a.txt', 'z/c/a.txt']
check_files(test_dir, expected)
def test_extract_7z_with_trailing_data(self):
test_file = self.get_test_loc('archive/7z/7zip_trailing.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
expected = ['z/a/a.txt', 'z/b/a.txt', 'z/c/a.txt']
check_files(test_dir, expected)
def test_extract_7z_with_broken_archive_with7z(self):
test_file = self.get_test_loc('archive/7z/corrupted7z.7z')
test_dir = self.get_temp_dir()
msg = 'Unknown extraction error'
self.assertRaisesInstance(ExtractErrorFailedToExtract(msg), sevenzip.extract, test_file, test_dir)
def test_extract_7z_with_broken_archive_does_not_fail_when_using_fallback(self):
test_file = self.get_test_loc('archive/7z/corrupted7z.7z')
test_dir = self.get_temp_dir()
msg = 'Unknown extraction error'
self.assertRaisesInstance(ExtractErrorFailedToExtract(msg), archive.extract_7z, test_file, test_dir)
def test_extract_7z_with_non_existing_archive(self):
test_file = 'archive/7z/I_DO_NOT_EXIST.zip'
test_dir = self.get_temp_dir()
msg = 'Unknown extraction error'
self.assertExceptionContains(msg, sevenzip.extract, test_file, test_dir)
def test_extract_7z_with_invalid_path_using_7z(self):
test_file = self.get_test_loc('archive/7z/7zip_invalidpath.7z')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = ['/this/', '/this/that']
assert expected == extracted
def test_extract_7z_with_invalid_path(self):
test_file = self.get_test_loc('archive/7z/7zip_invalidpath.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = ['/this/', '/this/that']
assert expected == extracted
def test_extract_7z_with_relative_path(self):
test_file = self.get_test_loc('archive/7z/7zip_relative.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
non_result = os.path.join(test_dir, '../a_parent_folder.txt')
assert not os.path.exists(non_result)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/2folder/',
'/dotdot/2folder/3folder/',
'/dotdot/2folder/3folder/relative_file',
'/dotdot/2folder/3folder/relative_file~',
'/dotdot/2folder/relative_file',
'/dotdot/relative_file'
]
assert expected == extracted
def test_extract_7z_with_password_with_7z(self):
test_file = self.get_test_loc('archive/7z/7zip_password.7z')
test_dir = self.get_temp_dir()
expected = Exception('Password protected archive, unable to extract')
self.assertRaisesInstance(expected, sevenzip.extract, test_file, test_dir)
def test_extract_7z_with_password(self):
test_file = self.get_test_loc('archive/7z/7zip_password.7z')
test_dir = self.get_temp_dir()
expected = Exception('Password protected archive, unable to extract')
self.assertRaisesInstance(expected, archive.extract_7z, test_file, test_dir)
def test_extract_7zip_native_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/7z/7zip_unicode.7z')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
assert [] == result
assert 2 == len(os.listdir(os.path.join(test_dir, 'zip')))
def test_extract_7zip_with_fallback_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/7z/7zip_unicode.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
assert 2 == len(os.listdir(os.path.join(test_dir, 'zip')))
def test_extract_7zip_libarchive_with_unicode_path_extracts_with_errors(self):
test_file = self.get_test_loc('archive/7z/7zip_unicode.7z')
test_dir = self.get_temp_dir()
try:
archive.extract_7z(test_file, test_dir)
except libarchive2.ArchiveError as e:
assert 'Damaged 7-Zip archive' in e.msg
def test_extract_7z_basic_with_space_in_file_name(self):
test_file = self.get_test_loc('archive/7z/t .7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
expected = ['t/t.txt']
check_files(test_dir, expected)
class TestIso(BaseArchiveTestCase):
def test_extract_iso_basic(self):
test_file = self.get_test_loc('archive/iso/small.iso')
test_dir = self.get_temp_dir()
archive.extract_iso(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = [
'/ChangeLog',
'/ChangeLog (copy)',
'/freebase.ABOUT',
'/this/',
'/this/that'
]
assert sorted(expected) == sorted(extracted)
def test_get_extractor_not_iso_text_is_not_mistaken_for_an_iso_image(self):
test_file = self.get_test_loc('archive/iso/ChangeLog')
extractor = archive.get_extractor(test_file)
assert not extractor
def test_extract_iso_basic_with_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/iso/t.iso.foo')
test_dir = self.get_temp_dir()
archive.extract_iso(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestXzLzma(BaseArchiveTestCase):
def check_lzma_extract(self, extract_fun, test_file, expected):
"""
Run the 'extract_fun' function using the 'test_file' file as an input
and verifies that the 'expected' file has been extracted correctly.
"""
test_file = self.get_test_loc(test_file)
extract_dir = self.get_temp_dir()
expected_file = os.path.join(extract_dir, expected)
extract_fun(test_file, extract_dir)
assert os.path.exists(expected_file), (
'%(expected_file)s file was not extracted '
'correctly from archive %(test_file)s'
% locals())
def test_extract_archive_tar_xz_1(self):
test_file = 'archive/lzma_xz/basic/texlive-core-patches-20.tar.xz'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected='texlive-core-patches-20.tar')
def test_extract_archive_tar_xz_2(self):
test_file = 'archive/lzma_xz/all/texlive-core-patches-20.tar.xz'
expected = 'texlive-core-patches-20.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_xz_3(self):
test_file = 'archive/lzma_xz/all/binutils-2.22.52.0.3-patches-1.0.tar.xz'
expected = 'binutils-2.22.52.0.3-patches-1.0.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_xz_4(self):
test_file = 'archive/lzma_xz/all/bdsup2sub-4.0.0.tar.xz'
expected = 'bdsup2sub-4.0.0.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_xz_5(self):
test_file = 'archive/lzma_xz/all/desktop-file-utils-0.19.tar.xz'
expected = 'desktop-file-utils-0.19.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_1(self):
test_file = 'archive/lzma_xz/basic/coreutils-8.5-patches-1.tar.lzma'
expected = 'coreutils-8.5-patches-1.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_2(self):
test_file = 'archive/lzma_xz/all/orionsocket-1.0.9.tar.lzma'
expected = 'orionsocket-1.0.9.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_3(self):
test_file = 'archive/lzma_xz/all/MinGW-5.1.6.exe-src.tar.lzma'
expected = 'MinGW-5.1.6.exe-src.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_4(self):
test_file = 'archive/lzma_xz/all/dnsmasq-2.57.tar.lzma'
expected = 'dnsmasq-2.57.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_lzma_1(self):
test_file = 'archive/lzma_xz/all/cromwell-2.40-r3-cvs-fixes.patch.lzma'
expected = 'cromwell-2.40-r3-cvs-fixes.patch'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_5(self):
test_file = 'archive/lzma_xz/all/coreutils-8.5-patches-1.tar.lzma'
expected = 'coreutils-8.5-patches-1.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
class TestDia(BaseArchiveTestCase):
def test_extract_dia_basic(self):
test_file = self.get_test_loc('archive/dia/dia.dia')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'dia.dia-extract')
assert os.path.exists(result)
def test_extract_dia_with_trailing_data(self):
test_file = self.get_test_loc('archive/dia/dia_trailing.dia')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'dia_trailing.dia-extract')
assert os.path.exists(result)
def test_extract_dia_broken_1(self):
test_file = self.get_test_loc('archive/dia/dia_broken.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('CRC check failed',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_dia_broken_2(self):
test_file = self.get_test_loc('archive/dia/broken/PublisherUML.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('invalid distance too far back',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_dia_broken_3(self):
test_file = self.get_test_loc('archive/dia/broken/schedulerClassDiagram.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('invalid distance too far back',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_dia_broken_4(self):
test_file = self.get_test_loc('archive/dia/broken/ServletProxyGenerator.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('invalid distance too far back',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_can_get_extractor_and_uncompress_dia_files(self):
test_file = self.get_test_loc('archive/dia/guess/infoset-doc.dia')
test_dir = self.get_temp_dir()
archive.get_extractor(test_file)(test_file, test_dir)
result = os.path.join(test_dir, 'infoset-doc.dia-extract')
assert os.path.exists(result)
class TestTarZ(BaseArchiveTestCase):
def test_extract_tarz_compress_basic(self):
test_file = self.get_test_loc('archive/Z/tkWWW-0.11.tar.Z')
test_dir = self.get_temp_dir()
archive.extract_Z(test_file, test_dir)
result = os.path.join(test_dir, 'tkWWW-0.11.tar')
assert os.path.exists(result)
def test_extract_z_compress_basic(self):
test_file = self.get_test_loc('archive/Z/tr2tex.Z')
test_dir = self.get_temp_dir()
archive.extract_Z(test_file, test_dir)
result = os.path.join(test_dir, 'tr2tex')
assert os.path.exists(result)
class TestXar(BaseArchiveTestCase):
def test_extract_xar_basic(self):
test_file = self.get_test_loc('archive/xar/xar-1.4.xar')
test_dir = self.get_temp_dir()
archive.extract_Z(test_file, test_dir)
result = os.path.join(test_dir, '[TOC].xml')
assert os.path.exists(result)
result = os.path.join(test_dir, 'xar-1.4', 'Makefile.in')
assert os.path.exists(result)
class TestCb7(BaseArchiveTestCase):
def test_get_extractor_cb7(self):
test_file = self.get_test_loc('archive/cb7/t .cb7')
result = archive.get_extractor(test_file)
expected = archive.extract_7z
assert expected == result
def test_extract_cb7_basic_with_space_in_file_name(self):
test_file = self.get_test_loc('archive/cb7/t .cb7')
test_dir = self.get_temp_dir()
archive.extract_7z(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cb7_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cb7/t.cb7.foo')
test_dir = self.get_temp_dir()
archive.extract_7z(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCab(BaseArchiveTestCase):
def test_get_extractor_cab(self):
test_file = self.get_test_loc('archive/cab/basic.cab')
result = archive.get_extractor(test_file)
expected = archive.extract_cab
assert expected == result
def test_extract_cab_basic(self):
test_file = self.get_test_loc('archive/cab/basic.cab')
test_dir = self.get_temp_dir()
archive.extract_cab(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/TREEHELP.TXT']
assert expected == extracted
def test_extract_cab_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cab/t.cab.foo')
test_dir = self.get_temp_dir()
archive.extract_cab(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCbr(BaseArchiveTestCase):
def test_get_extractor_cbr(self):
test_file = self.get_test_loc('archive/cbr/t.cbr')
result = archive.get_extractor(test_file)
# we do not handle these rare extensions (this is a RAR)
expected = None # archive.extract_rar
assert expected == result
def test_extract_cbr_basic(self):
test_file = self.get_test_loc('archive/cbr/t.cbr')
test_dir = self.get_temp_dir()
libarchive2.extract(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cbr_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cbr/t.cbr.foo')
test_dir = self.get_temp_dir()
libarchive2.extract(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCbt(BaseArchiveTestCase):
def test_get_extractor_cbt(self):
test_file = self.get_test_loc('archive/cbt/t.cbt')
result = archive.get_extractor(test_file)
expected = archive.extract_tar
assert expected == result
def test_extract_cbt_basic(self):
test_file = self.get_test_loc('archive/cbt/t.cbt')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cbt_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cbt/t.cbt.foo')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCbz(BaseArchiveTestCase):
def test_get_extractor_cbz(self):
test_file = self.get_test_loc('archive/cbz/t.cbz')
result = archive.get_extractor(test_file)
expected = archive.extract_zip
assert expected == result
def test_extract_cbz_basic(self):
test_file = self.get_test_loc('archive/cbz/t.cbz')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cbz_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cbz/t.cbz.foo')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
# Note: this series of test is not easy to grasp but unicode archives on multiple OS
# are hard to tests. So we have one test class for each libarchive and sevenzip on
# each of the three OSses which makes siz test classes each duplicated with
# eventually different expectations on each OS. Then each test class has a subclass
# with check_warnings set to True to tests only possible warnings separately.
# The code tries to avoid too much duplication, but this is at the cost of readability
def is_posixpath(location):
"""
Return True if the `location` path is likely a POSIX-like path using POSIX path
separators (slash or "/")or has no path separator.
Return False if the `location` path is likely a Windows-like path using backslash
as path separators (e.g. "\").
"""
has_slashes = '/' in location
has_backslashes = '\\' in location
# windows paths with drive
if location:
drive, _ = ntpath.splitdrive(location)
if drive:
return False
# a path is always POSIX unless it contains ONLY backslahes
# which is a rough approximation (it could still be posix)
is_posix = True
if has_backslashes and not has_slashes:
is_posix = False
return is_posix
def to_posix(path):
"""
Return a path using the posix path separator given a path that may contain posix
or windows separators, converting \ to /. NB: this path will still be valid in
the windows explorer (except as a UNC or share name). It will be a valid path
everywhere in Python. It will not be valid for windows command line operations.
"""
is_unicode = isinstance(path, unicode)
ntpath_sep = is_unicode and u'\\' or '\\'
posixpath_sep = is_unicode and u'/' or '/'
if is_posixpath(path):
if on_windows:
return path.replace(ntpath_sep, posixpath_sep)
else:
return path
return path.replace(ntpath_sep, posixpath_sep)
class ExtractArchiveWithIllegalFilenamesTestCase(BaseArchiveTestCase):
check_only_warnings = False
def check_extract(self, test_function, test_file, expected_suffix, expected_warnings=None, regen=False):
"""
Run the extraction `test_function` on `test_file` checking that the paths
listed in the `test_file.excepted` file exist in the extracted target
directory. Regen expected file if True.
"""
if not isinstance(test_file, unicode):
test_file = unicode(test_file)
test_file = self.get_test_loc(test_file)
test_dir = self.get_temp_dir()
warnings = test_function(test_file, test_dir)
# shortcut if check of warnings are requested
if self.check_only_warnings and expected_warnings is not None:
assert sorted(expected_warnings) == sorted(warnings)
return
len_test_dir = len(test_dir)
extracted = sorted(path[len_test_dir:] for path in fileutils.resource_iter(test_dir, with_dirs=False))
extracted = [unicode(p) for p in extracted]
extracted = [to_posix(p) for p in extracted]
if on_linux:
os_suffix = 'linux'
elif on_mac:
os_suffix = 'mac'
elif on_windows:
os_suffix = 'win'
expected_file = test_file + '_' + expected_suffix + '_' + os_suffix + '.expected'
import json
if regen:
with open(expected_file, 'wb') as ef:
ef.write(json.dumps(extracted, indent=2))
expected = json.loads(open(expected_file).read())
expected = [p for p in expected if p.strip()]
assert expected == extracted
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnLinux(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_ar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
warns = ['None: \nIncorrect file header signature']
self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch')
def test_extract_cpio_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_tar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnLinuxWarnings(TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnLinux):
check_only_warnings = True
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnMac(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_ar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
warns = ['None: \nIncorrect file header signature']
self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch')
def test_extract_cpio_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_tar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnMacWarnings(TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnMac):
check_only_warnings = True
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnWindows(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_ar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
warns = [u'None: \nIncorrect file header signature']
self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch')
def test_extract_cpio_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_tar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnWindowsWarnings(TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnWindows):
check_only_warnings = True
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnLinux(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_ar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_cpio_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_iso_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.iso')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we now use libarchive for these
def test_extract_rar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.rar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_tar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnLinuxWarnings(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnLinux):
check_only_warnings = True
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = True
def test_extract_7zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_ar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_cpio_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_iso_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.iso')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure
def test_extract_rar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.rar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_tar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMac(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings):
check_only_warnings = False
# not a problem: we use libarchive for these
test_extract_7zip_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_7zip_with_weird_filenames_with_sevenzip)
# not a problem: we use libarchive for these
test_extract_ar_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_ar_with_weird_filenames_with_sevenzip)
# not a problem: we use libarchive for these
test_extract_cpio_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_cpio_with_weird_filenames_with_sevenzip)
# This is a problem
test_extract_iso_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_iso_with_weird_filenames_with_sevenzip)
# not a problem: we use libarchive for these
test_extract_tar_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_tar_with_weird_filenames_with_sevenzip)
# not a problem: we use libarchive for these
test_extract_zip_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_zip_with_weird_filenames_with_sevenzip)
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_ar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_cpio_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_iso_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.iso')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_rar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.rar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_tar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWinWarning(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin):
check_only_warnings = True
# The results are not correct but not a problem: we use libarchive for these
test_extract_7zip_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin
.test_extract_7zip_with_weird_filenames_with_sevenzip)
class TestZipSlip(BaseArchiveTestCase):
def test_extract_zipslip_zip_posix(self):
test_file = self.get_test_loc('archive/zipslip/zip-slip.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
if on_windows:
expected = [u'good.txt', u'tmp/evil.txt']
else:
expected = [
'dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/tmp/evil.txt',
'good.txt'
]
check_files(test_dir, expected)
@skipIf(on_windows, 'Fails with WindowsError: [Error 206] The filename or extension is too long:')
def test_extract_zipslip_tar_posix(self):
test_file = self.get_test_loc('archive/zipslip/zip-slip.tar')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
assert [] == result
expected = [
'dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/tmp/evil.txt',
'good.txt'
]
check_files(test_dir, expected)
def test_extract_zipslip_zip_win(self):
test_file = self.get_test_loc('archive/zipslip/zip-slip-win.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
if on_windows:
expected = [u'Temp/evil.txt', u'good.txt']
else:
expected = [
'dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/Temp/evil.txt',
'good.txt'
]
check_files(test_dir, expected)
@skipIf(on_windows, 'Fails with WindowsError: [Error 206] The filename or extension is too long:')
def test_extract_zipslip_tar_win(self):
test_file = self.get_test_loc('archive/zipslip/zip-slip-win.tar')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
assert [] == result
expected = [
'dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/Temp/evil.txt',
'good.txt'
]
check_files(test_dir, expected)
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import io
import ntpath
import os
import posixpath
from unittest.case import expectedFailure
from unittest.case import skipIf
import commoncode.date
from commoncode.testcase import FileBasedTesting
from commoncode import filetype
from commoncode import fileutils
from commoncode.system import on_linux
from commoncode.system import on_mac
from commoncode.system import on_windows
import typecode.contenttype
from extractcode_assert_utils import check_files
from extractcode_assert_utils import check_size
import extractcode
from extractcode import archive
from extractcode.archive import get_best_handler
from extractcode import ExtractErrorFailedToExtract
from extractcode import libarchive2
from extractcode import sevenzip
"""
For each archive type --when possible-- we are testing extraction of:
- basic, plain archive, no tricks
- with trailing data appended to archive
- broken, either truncated or with extra junk inserted
- with hardlinks and symlinks, either valid or broken when supported
- with hardlinks and symlinks loops (aka. tarbomb) when supported
- with FIFO, character, sparse and other special files when supported
- with relative paths pointing outside of the archive when supported
- with absolute paths when supported
- with invalid paths or mixed slash paths when supported
- with unicode or binary path names
- with duplicate names or paths when case is ignored
- password-protected when supported
"""
class TestSmokeTest(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def check_get_extractors(self, test_file, expected, kinds=()):
test_loc = self.get_test_loc(test_file)
if kinds:
extractors = archive.get_extractors(test_loc, kinds)
else:
extractors = archive.get_extractors(test_loc)
ft = typecode.contenttype.get_type(test_loc).filetype_file
mt = typecode.contenttype.get_type(test_loc).mimetype_file
fe = fileutils.file_extension(test_loc).lower()
em = ', '.join(e.__module__ + '.' + e.__name__ for e in extractors)
msg = ('%(expected)r == %(extractors)r for %(test_file)s\n'
'with ft:%(ft)r, mt:%(mt)r, fe:%(fe)r, em:%(em)s' % locals())
assert expected == extractors, msg
def test_get_extractors_1(self):
test_file = 'archive/zip/basic.zip'
expected = [archive.extract_zip]
self.check_get_extractors(test_file, expected)
def test_get_extractors_2(self):
test_file = 'archive/rar/basic.rar'
expected = [archive.extract_rar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_3(self):
test_file = 'archive/deb/adduser_3.112ubuntu1_all.deb'
expected = [archive.extract_ar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_4(self):
test_file = 'archive/cpio/elfinfo-1.0-1.fc9.src.cpio'
expected = [archive.extract_cpio]
self.check_get_extractors(test_file, expected)
def test_get_extractors_5(self):
test_file = 'archive/rpm/elfinfo-1.0-1.fc9.src.rpm'
expected = [archive.extract_rpm, archive.extract_cpio]
self.check_get_extractors(test_file, expected)
def test_get_extractors_6(self):
test_file = 'archive/gzip/file_4.26-1.diff.gz'
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected)
def test_get_extractors_7(self):
test_file = 'archive/ar/liby.a'
expected = [archive.extract_ar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_8(self):
test_file = 'archive/bz2/single_file_not_tarred.bz2'
expected = [archive.uncompress_bzip2]
self.check_get_extractors(test_file, expected)
def test_get_extractors_9(self):
test_file = 'archive/tar/tarred.tar'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_10(self):
test_file = 'archive/tbz/tarred_bzipped.bz'
expected = [archive.uncompress_bzip2]
self.check_get_extractors(test_file, expected)
def test_get_extractors_11(self):
test_file = 'archive/tbz/tarred_bzipped.tar.bz2'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_12(self):
test_file = 'archive/tbz/tarred_bzipped.tbz'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_13(self):
test_file = 'archive/tgz/tarred_gzipped.gz'
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected)
def test_get_extractors_14(self):
test_file = 'archive/tgz/tarred_gzipped.tar.gz'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_15(self):
test_file = 'archive/tgz/tarred_gzipped.tgz'
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_16(self):
test_file = 'archive/7z/z.7z'
expected = [archive.extract_7z]
self.check_get_extractors(test_file, expected)
def test_get_extractors_17(self):
test_file = 'archive/Z/tr2tex.Z'
expected = [archive.extract_Z, ]
self.check_get_extractors(test_file, expected)
def test_get_extractors_18(self):
test_file = 'archive/Z/tkWWW-0.11.tar.Z'
expected = [archive.extract_Z, archive.extract_tar]
self.check_get_extractors(test_file, expected)
def test_get_extractors_19(self):
test_file = 'archive/xar/xar-1.4.xar'
expected = [archive.extract_xarpkg]
self.check_get_extractors(test_file, expected)
def test_get_extractor_with_kinds_deb(self):
test_file = 'archive/deb/adduser_3.112ubuntu1_all.deb'
expected = [archive.extract_deb]
self.check_get_extractors(test_file, expected, (archive.package,))
def test_get_extractor_with_kinds_rpm(self):
test_file = 'archive/rpm/elfinfo-1.0-1.fc9.src.rpm'
kinds = (archive.regular, archive.file_system, archive.docs)
expected = []
self.check_get_extractors(test_file, expected, kinds)
def test_get_extractor_with_kinds_rpm_2(self):
test_file = 'archive/rpm/elfinfo-1.0-1.fc9.src.rpm'
kinds = (archive.regular, archive.file_system, archive.docs, archive.package)
expected = [sevenzip.extract, libarchive2.extract]
self.check_get_extractors(test_file, expected, kinds)
def test_get_extractor_with_kinds_deb2(self):
test_file = 'archive/deb/adduser_3.112ubuntu1_all.deb'
expected = []
self.check_get_extractors(test_file, expected, (archive.regular,))
def test_get_extractor_with_kinds_ar(self):
test_file = 'archive/ar/liby.a'
kinds = (archive.regular, archive.file_system, archive.docs)
expected = []
self.check_get_extractors(test_file, expected, kinds)
def test_get_extractor_with_kinds_bzip(self):
test_file = 'archive/tbz/tarred_bzipped.tar.bz2'
expected = []
self.check_get_extractors(test_file, expected, (archive.package,))
def test_get_extractor_with_kinds_plain_tar(self):
test_file = 'archive/tar/tarred.tar'
expected = []
self.check_get_extractors(test_file, expected, (archive.package,))
expected = [archive.extract_tar]
self.check_get_extractors(test_file, expected, (archive.regular,))
def test_get_extractor_for_graffle_docs(self):
test_file = 'archive/graffle/example.graffle'
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected, (archive.docs,))
expected = []
self.check_get_extractors(test_file, expected, kinds=extractcode.default_kinds)
def test_get_extractor_for_compressed_svgz_docs(self):
test_file = 'archive/svgz/insert-emptyframe.svgz'
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected, (archive.docs,))
expected = []
self.check_get_extractors(test_file, expected, kinds=extractcode.default_kinds)
def test_get_extractor_for_dia(self):
test_file = self.get_test_loc('archive/dia/dia.dia')
expected = [archive.uncompress_gzip]
self.check_get_extractors(test_file, expected, kinds=extractcode.all_kinds)
expected = []
self.check_get_extractors(test_file, expected, kinds=extractcode.default_kinds)
def test_get_handlers(self):
test_data = [
('archive/deb/adduser_3.112ubuntu1_all.deb', ['Debian package']),
('archive/rpm/elfinfo-1.0-1.fc9.src.rpm', ['RPM package']),
('archive/ar/liby.a', ['ar archive', 'Static Library']),
('archive/tar/tarred.tar', ['Tar', 'Ruby Gem package']),
('archive/tbz/tarred_bzipped.tar.bz2', ['bzip2', 'Tar bzip2']),
('archive/tbz/tarred_bzipped.bz', ['bzip2', 'Tar bzip2']),
('archive/tgz/tarred_gzipped.gz', ['Tar gzip', 'Gzip']),
('archive/gzip/mysql-arch.ARZ', ['Tar gzip', 'Gzip']),
]
for test_file, expected in test_data:
test_loc = self.get_test_loc(test_file)
handlers = archive.get_handlers(test_loc)
assert expected == [h[0].name for h in handlers]
def test_score_handlers(self):
test_data = [
('archive/deb/adduser_3.112ubuntu1_all.deb', [(31, 'Debian package')]),
('archive/rpm/elfinfo-1.0-1.fc9.src.rpm', [(32, 'RPM package')]),
('archive/ar/liby.a', [(31, 'Static Library'), (17, 'ar archive')]),
('archive/tar/tarred.tar', [(29, 'Tar'), (19, 'Ruby Gem package')]),
('archive/tbz/tarred_bzipped.tar.bz2', [(30, 'Tar bzip2'), (29, 'bzip2')]),
('archive/tbz/tarred_bzipped.bz', [(29, 'bzip2'), (18, 'Tar bzip2')]),
('archive/tgz/tarred_gzipped.gz', [(29, 'Gzip'), (18, 'Tar gzip')]),
('archive/gzip/mysql-arch.ARZ', [(29, 'Gzip'), (18, 'Tar gzip')]),
]
for test_file, expected in test_data:
test_loc = self.get_test_loc(test_file)
handlers = archive.get_handlers(test_loc)
scored = archive.score_handlers(handlers)
assert expected == sorted([(h[0], h[1].name) for h in scored], reverse=True)
def test_no_handler_is_selected_for_a_non_archive(self):
# failed because of libmagic bug: http://bugs.gw.com/view.php?id=467
# passing by introducing strict flag for handlers
test_loc = self.get_test_loc('archive/not_archive/hashfile')
assert [] == list(archive.get_handlers(test_loc))
assert None == archive.get_extractor(test_loc)
assert None == archive.get_extractor(test_loc, kinds=extractcode.all_kinds)
assert not archive.should_extract(test_loc, kinds=extractcode.default_kinds)
def test_no_handler_is_selected_for_a_non_archive2(self):
# FWIW there is a related libmagic bug: http://bugs.gw.com/view.php?id=473
test_loc = self.get_test_loc('archive/not_archive/wildtest.txt')
assert [] == list(archive.get_handlers(test_loc))
assert None == archive.get_extractor(test_loc)
assert None == archive.get_extractor(test_loc, kinds=extractcode.all_kinds)
assert not archive.should_extract(test_loc, kinds=extractcode.default_kinds)
def test_no_handler_is_selected_for_a_non_archive3(self):
test_loc = self.get_test_loc('archive/not_archive/savetransfer.c')
assert [] == list(archive.get_handlers(test_loc))
assert None == archive.get_extractor(test_loc)
assert None == archive.get_extractor(test_loc, kinds=extractcode.all_kinds)
assert not archive.should_extract(test_loc, kinds=extractcode.default_kinds)
def test_7zip_extract_can_extract_to_relative_paths(self):
# The setup is a tad complex because we want to have a relative dir
# to the base dir where we run tests from, ie the scancode-toolkit/ dir
# To use relative paths, we use our tmp dir at the root of the code tree
from os.path import dirname, join, abspath
import tempfile
import shutil
from extractcode.sevenzip import extract
test_file = self.get_test_loc('archive/relative_path/basic.zip')
scancode_root = dirname(dirname(dirname(__file__)))
scancode_tmp = join(scancode_root, 'tmp')
fileutils.create_dir(scancode_tmp)
scancode_root_abs = abspath(scancode_root)
test_src_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
test_tgt_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
shutil.copy(test_file, test_src_dir)
test_src_file = join(test_src_dir, 'basic.zip')
result = list(extract(test_src_file, test_tgt_dir))
assert [] == result
expected = ['c/a/a.txt', 'c/b/a.txt', 'c/c/a.txt']
check_files(test_tgt_dir, expected)
def test_libarchive_extract_can_extract_to_relative_paths(self):
# The setup is a tad complex because we want to have a relative dir
# to the base dir where we run tests from, ie the scancode-toolkit/ dir
# To use relative paths, we use our tmp dir at the root of the code tree
from os.path import dirname, join, abspath
import tempfile
import shutil
from extractcode.libarchive2 import extract
test_file = self.get_test_loc('archive/relative_path/basic.zip')
scancode_root = dirname(dirname(dirname(__file__)))
scancode_tmp = join(scancode_root, 'tmp')
fileutils.create_dir(scancode_tmp)
scancode_root_abs = abspath(scancode_root)
test_src_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
test_tgt_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
shutil.copy(test_file, test_src_dir)
test_src_file = join(test_src_dir, 'basic.zip')
result = list(extract(test_src_file, test_tgt_dir))
assert [] == result
expected = ['c/a/a.txt', 'c/b/a.txt', 'c/c/a.txt']
check_files(test_tgt_dir, expected)
def test_windows_media_player_skins_are_zip(self):
test_file = self.get_test_loc('archive/wmz/Go.wmz')
extractors = archive.get_extractors(test_file)
assert [archive.extract_zip] == extractors
def test_windows_ntfs_wmz_are_sometimes_gzip(self):
test_file = self.get_test_loc('archive/wmz/image003.wmz')
extractors = archive.get_extractors(test_file)
assert [archive.uncompress_gzip] == extractors
class BaseArchiveTestCase(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def assertRaisesInstance(self, excInstance, callableObj,
*args, **kwargs):
"""
This assertion accepts an instance instead of a class for refined
exception testing.
"""
excClass = excInstance.__class__
try:
callableObj(*args, **kwargs)
except excClass as e:
self.assertEqual(str(excInstance), str(e))
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException('%s not raised' % excName)
def check_extract(self, test_function, test_file, expected, expected_warnings=None, check_all=False):
"""
Run the extraction `test_function` on `test_file` checking that a map of
expected paths --> size exist in the extracted target directory.
Does not test the presence of all files unless `check_all` is True.
"""
test_file = self.get_test_loc(test_file)
test_dir = self.get_temp_dir()
warnings = test_function(test_file, test_dir)
if expected_warnings is not None:
assert expected_warnings == warnings
if check_all:
len_test_dir = len(test_dir)
extracted = {path[len_test_dir:]: filetype.get_size(path) for path in fileutils.resource_iter(test_dir, with_dirs=False)}
expected = {os.path.join(test_dir, exp_path): exp_size for exp_path, exp_size in expected.items()}
assert sorted(expected.items()) == sorted(extracted.items())
else:
for exp_path, exp_size in expected.items():
exp_loc = os.path.join(test_dir, exp_path)
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to find expected path: %(exp_loc)s'''
assert os.path.exists(exp_loc), msg % locals()
if exp_size is not None:
res_size = os.stat(exp_loc).st_size
msg = '''When extracting: %(test_file)s
With function: %(test_function)r
Failed to assert the correct size %(exp_size)d
Got instead: %(res_size)d
for expected path: %(exp_loc)s'''
assert exp_size == res_size, msg % locals()
def collect_extracted_path(self, test_dir):
result = []
td = fileutils.as_posixpath(test_dir)
for t, dirs, files in os.walk(test_dir):
t = fileutils.as_posixpath(t)
for d in dirs:
nd = posixpath.join(t, d).replace(td, '') + '/'
result.append(nd)
for f in files:
nf = posixpath.join(t, f).replace(td, '')
result.append(nf)
result = sorted(result)
return result
def assertExceptionContains(self, text, callableObj, *args, **kwargs):
try:
callableObj(*args, **kwargs)
except Exception as e:
if text not in str(e):
raise self.failureException(
'Exception %(e)r raised, '
'it should contain the text %(text)r '
'and does not' % locals())
else:
raise self.failureException(
'Exception containing %(text)r not raised' % locals())
class TestTarGzip(BaseArchiveTestCase):
def test_extract_targz_basic(self):
test_file = self.get_test_loc('archive/tgz/tarred_gzipped.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_targz_with_trailing_data(self):
test_file = self.get_test_loc('archive/tgz/trailing.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'a.txt')
assert os.path.exists(result)
def test_extract_targz_broken(self):
test_file = self.get_test_loc('archive/tgz/broken.tar.gz')
test_dir = self.get_temp_dir()
expected = Exception('Unrecognized archive format')
self.assertRaisesInstance(expected, archive.extract_tar, test_file, test_dir)
def test_extract_targz_with_absolute_path(self):
non_result = '/tmp/subdir'
assert not os.path.exists(non_result)
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/tgz/absolute_path.tar.gz')
archive.extract_tar(test_file, test_dir)
assert not os.path.exists(non_result)
result = os.path.join(test_dir, 'tmp/subdir/a.txt')
assert os.path.exists(result)
def test_extract_targz_with_relative_path(self):
test_file = self.get_test_loc('archive/tgz/relative.tar.gz')
"""
This test file was created with:
import tarfile
tar = tarfile.open("TarTest.tar.gz", "w:gz")
tar.add('a.txt', '../a_parent_folder.txt')
tar.add('b.txt', '../../another_folder/b_two_root.txt')
tar.add('b.txt', '../folder/subfolder/b_subfolder.txt')
tar.close()
"""
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
non_result = os.path.join(test_dir, '../a_parent_folder.txt')
assert not os.path.exists(non_result)
expected = [
'dotdot/dotdot/another_folder/b_two_root.txt',
'dotdot/a_parent_folder.txt',
'dotdot/folder/subfolder/b_subfolder.txt'
]
check_files(test_dir, expected)
def test_extract_targz_with_trailing_data2(self):
test_dir1 = self.get_temp_dir()
test_file = self.get_test_loc('archive/tgz/trailing2.tar.gz')
archive.extract_tar(test_file, test_dir1)
test_dir2 = self.get_temp_dir()
test_file2 = self.get_test_loc('archive/tgz/no_trailing.tar.gz')
archive.extract_tar(test_file2, test_dir2)
assert commoncode.testcase.is_same(test_dir1, test_dir2)
def test_extract_targz_with_mixed_case_and_symlink(self):
test_file = self.get_test_loc('archive/tgz/mixed_case_and_symlink.tgz')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
assert [] == result
import json
exp_file = self.get_test_loc('archive/tgz/mixed_case_and_symlink.tgz.expected')
with io.open(exp_file, encoding='utf-8') as ef:
expected_files = json.load(ef)
check_files(test_dir, map(str, expected_files))
def test_extract_targz_symlinks(self):
test_file = self.get_test_loc('archive/tgz/symlink.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
expected = [
'z/x/a',
# these are skipped
# this is a link: a -> ../x/a
# 'z/y/a',
# this is a broken link: x.a -> ../x.a
# 'z/y/x.a',
# this is a broken link: broken -> ../x/broken
# 'z/z/broken',
]
check_files(test_dir, expected)
def test_extract_targz_from_apache_should_not_return_errors(self):
# from http://archive.apache.org/dist/commons/logging/source/commons-logging-1.1.2-src.tar.gz
# failed with ReadError('not a bzip2 file',)
test_file = self.get_test_loc('archive/tgz/commons-logging-1.1.2-src.tar.gz')
test_dir = self.get_temp_dir()
extractor = archive.get_extractor(test_file)
assert archive.extract_tar == extractor
result = archive.extract_tar(test_file, test_dir)
assert [] == result
assert os.listdir(test_dir)
def test_extract_targz_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/tgz/tgz_unicode.tgz')
test_dir = self.get_temp_dir()
extractor = archive.get_extractor(test_file)
assert archive.extract_tar == extractor
result = archive.extract_tar(test_file, test_dir)
assert [] == result
assert os.listdir(test_dir)
class TestGzip(BaseArchiveTestCase):
def test_uncompress_gzip_basic(self):
test_file = self.get_test_loc('archive/gzip/file_4.26-1.diff.gz')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'file_4.26-1.diff.gz-extract')
assert os.path.exists(result)
def test_uncompress_concatenated_gzip(self):
# Archive created with:
# echo "f1content" > f1
# echo "f2content" > f2
# gzip -k f1
# gzip -k -c f2 >> twofiles.gz
test_file = self.get_test_loc('archive/gzip/twofiles.gz')
test_dir = self.get_temp_dir()
warnings = archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'twofiles.gz-extract')
assert os.path.exists(result)
assert b'f1content\nf2content\n' == open(result, 'rb').read()
assert [] == warnings
def test_uncompress_gzip_with_trailing_data(self):
test_file = self.get_test_loc('archive/gzip/trailing_data.gz')
test_dir = self.get_temp_dir()
warnings = archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'trailing_data.gz-extract')
assert os.path.exists(result)
assert [] == warnings
def test_uncompress_gzip_with_leading_data(self):
# even though we do not fail when there is invalid trailing data we
# should still fail on invalid leading data
test_file = self.get_test_loc('archive/gzip/leading_data.gz')
test_dir = self.get_temp_dir()
expected = Exception('Not a gzipped file')
self.assertRaisesInstance(expected, archive.uncompress_gzip, test_file, test_dir)
def test_uncompress_gzip_with_random_data(self):
test_file = self.get_test_loc('archive/gzip/random_binary.data')
test_dir = self.get_temp_dir()
expected = Exception('Not a gzipped file')
self.assertRaisesInstance(expected, archive.uncompress_gzip, test_file, test_dir)
def test_uncompress_gzip_with_backslash_in_path(self):
# weirdly enough, gzip keeps the original path/name
test_file = self.get_test_loc('archive/gzip/backslash_path.gz')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'backslash_path.gz-extract')
assert os.path.exists(result)
def test_uncompress_gzip_can_uncompress_windows_ntfs_wmz(self):
test_file = self.get_test_loc('archive/wmz/image003.wmz')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'image003.wmz-extract')
assert os.path.exists(result)
def test_uncompress_gzip_can_uncompress_mysql_arz(self):
test_file = self.get_test_loc('archive/gzip/mysql-arch.ARZ')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'mysql-arch.ARZ-extract')
assert os.path.exists(result)
class TestTarBz2(BaseArchiveTestCase):
def test_extract_tar_bz2_basic(self):
test_file = self.get_test_loc('archive/tbz/tarred_bzipped.tar.bz2')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_basic_bz(self):
test_file = self.get_test_loc('archive/tbz/tarred_bzipped.bz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_with_trailing_data__and_wrong_extension(self):
test_file = self.get_test_loc('archive/tbz/single_file_trailing_data.tar.gz')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'a.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_broken(self):
test_file = self.get_test_loc('archive/tbz/tarred_bzipped_broken.tar.bz2')
test_dir = self.get_temp_dir()
expected = Exception('bzip decompression failed')
self.assertRaisesInstance(expected, archive.extract_tar, test_file, test_dir)
def test_extract_tar_bz2_absolute_path(self):
assert not os.path.exists('/tmp/subdir')
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/tbz/absolute_path.tar.bz2')
archive.extract_tar(test_file, test_dir)
assert not os.path.exists('/tmp/subdir')
result = os.path.join(test_dir, 'tmp/subdir/a.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_relative_path(self):
test_file = self.get_test_loc('archive/tbz/bz2withtar_relative.tar.bz2')
"""
This test file was created with:
import tarfile
tar = tarfile.open("TarTest.tar.gz", "w:bz")
tar.add('a.txt', '../a_parent_folder.txt')
tar.add('b.txt', '../../another_folder/b_two_root.txt')
tar.add('b.txt', '../folder/subfolder/b_subfolder.txt')
tar.close()
"""
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
non_result = os.path.join(test_dir, '../a_parent_folder.txt')
assert not os.path.exists(non_result)
result = os.path.join(test_dir, 'dotdot/folder/subfolder/b_subfolder.txt')
assert os.path.exists(result)
result = os.path.join(test_dir, 'dotdot', 'a_parent_folder.txt')
assert os.path.exists(result)
def test_extract_tar_bz2_iproute(self):
test_file = self.get_test_loc('archive/tbz/iproute2.tar.bz2')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'iproute2/README')
assert os.path.exists(result)
def test_extract_tar_bz2_multistream(self):
test_file = self.get_test_loc('archive/tbz/bzip2_multistream/example-file.csv.tar.bz2')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
expected = self.get_test_loc('archive/tbz/bzip2_multistream/example-file.csv')
result = os.path.join(test_dir, 'example-file.csv')
assert open(expected, 'rb').read() == open(result, 'rb').read()
class TestBz2(BaseArchiveTestCase):
def test_uncompress_bzip2_basic(self):
test_file = self.get_test_loc('archive/bz2/single_file_not_tarred.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
result = os.path.join(test_dir, 'single_file_not_tarred.bz2-extract')
assert os.path.exists(result)
def test_uncompress_bzip2_with_trailing_data(self):
test_file = self.get_test_loc('archive/bz2/single_file_trailing_data.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
result = os.path.join(test_dir, 'single_file_trailing_data.bz2-extract')
assert os.path.exists(result)
def test_uncompress_bzip2_broken(self):
test_file = self.get_test_loc('archive/bz2/bz2_not_tarred_broken.bz2')
test_dir = self.get_temp_dir()
expected = Exception('invalid data stream')
self.assertRaisesInstance(expected, archive.uncompress_bzip2,
test_file, test_dir)
def test_uncompress_bzip2_with_invalid_path(self):
test_file = self.get_test_loc('archive/bz2/bz_invalidpath.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
result = os.path.join(test_dir, 'bz_invalidpath.bz2-extract')
assert os.path.exists(result)
def test_uncompress_bzip2_multistream(self):
test_file = self.get_test_loc('archive/bz2/bzip2_multistream/example-file.csv.bz2')
test_dir = self.get_temp_dir()
archive.uncompress_bzip2(test_file, test_dir)
expected = self.get_test_loc('archive/bz2/bzip2_multistream/expected.csv')
result = os.path.join(test_dir, 'example-file.csv.bz2-extract')
assert open(expected, 'rb').read() == open(result, 'rb').read()
def test_sevenzip_extract_can_handle_bz2_multistream_differently(self):
test_file = self.get_test_loc('archive/bz2/bzip2_multistream/example-file.csv.bz2')
test_dir = self.get_temp_dir()
sevenzip.extract(test_file, test_dir)
expected = self.get_test_loc('archive/bz2/bzip2_multistream/expected.csv')
# the extraction dir is not created with suffix by z7
result = os.path.join(test_dir, 'example-file.csv')
expected_extracted = open(expected, 'rb').read()
expected_result = open(result, 'rb').read()
assert expected_extracted == expected_result
class TestShellArchives(BaseArchiveTestCase):
def test_extract_springboot(self):
# a self executable springboot Jar is a zip with a shell script prefix
test_file = self.get_test_loc('archive/shar/demo-spring-boot.jar')
test_dir = self.get_temp_dir()
result = archive.extract_springboot(test_file, test_dir)
assert [] == result
expected = ['META-INF/MANIFEST.MF', 'application.properties']
check_files(test_dir, expected)
def test_springboot_is_not_recognized_without_jar_extension(self):
test_file = self.get_test_loc('archive/shar/demo-spring-boot.sh')
handler = get_best_handler(test_file)
assert None == handler
def test_springboot_is_recognized_with_jar_extension(self):
test_file = self.get_test_loc('archive/shar/demo-spring-boot.jar')
handler = get_best_handler(test_file)
assert handler.name == 'Springboot Java Jar package'
class TestZip(BaseArchiveTestCase):
def test_extract_zip_basic(self):
test_file = self.get_test_loc('archive/zip/basic.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
expected = ['c/a/a.txt', 'c/b/a.txt', 'c/c/a.txt']
check_files(test_dir, expected)
def test_extract_zip_broken(self):
test_file = self.get_test_loc('archive/zip/zip_broken.zip')
test_dir = self.get_temp_dir()
self.assertRaises(Exception, archive.extract_zip, test_file, test_dir)
# note: broken zip opens and extracts with 7z with exceptions sometimes
# something is extracted in latest 7z
# result = os.path.join(test_dir, 'a.txt')
# print(test_dir)
# assert os.path.exists(result)
def test_extract_zip_with_invalid_path(self):
test_file = self.get_test_loc('archive/zip/zip_invalidpath.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
result = os.path.join(test_dir, 'this/that')
assert os.path.exists(result)
def test_extract_zip_with_trailing_data(self):
test_file = self.get_test_loc('archive/zip/zip_trailing_data.zip')
test_dir = self.get_temp_dir()
try:
archive.extract_zip(test_file, test_dir)
except libarchive2.ArchiveError as ae:
assert 'Invalid central directory signature' in str(ae)
# fails because of https://github.com/libarchive/libarchive/issues/545
result = os.path.join(test_dir, 'a.txt')
assert os.path.exists(result)
def test_extract_zip_with_trailing_data2(self):
# test archive created on cygwin with:
# $ echo "test content" > f1
# $ zip test f1
# $ echo "some junk" >> test.zip
test_file = self.get_test_loc('archive/zip/zip_trailing_data2.zip')
test_dir = self.get_temp_dir()
try:
archive.extract_zip(test_file, test_dir)
except libarchive2.ArchiveError as ae:
assert 'Invalid central directory signature' in str(ae)
# fails because of https://github.com/libarchive/libarchive/issues/545
result = os.path.join(test_dir, 'f1')
assert os.path.exists(result)
def test_extract_zip_with_relative_path_simple(self):
# The test files for this test and the next one were created with:
# from zipfile import ZipFile
# f = open('/tmp/a.txt', 'w')
# f.write('some data')
# f.close()
# f = open('/tmp/b.txt', 'w')
# f.write('some data')
# f.close()
# f = ZipFile(os.path.join(self.get_test_loc('archive'), 'relative_parent_folders.zip'), 'w')
# f.write('/tmp/a.txt', '../a_parent_folder.txt')
# f.write('/tmp/b.txt', '../../another_folder/b_two_root.txt')
# f.write('/tmp/b.txt', '../folder/subfolder/b_subfolder.txt')
# f.close()
# f = ZipFile(os.path.join(self.get_test_loc('archive'), 'high_ancest.zip'), 'w')
# f.write('/tmp/a.txt', ('../' * 12) + 'a_parent_folder.txt')
# f.write('/tmp/a.txt', ('../' * 12) + ('sub/' * 6) + 'a_parent_folder_in_sub_1.txt')
# f.write('/tmp/a.txt', ('../' * 6) + ('sub/' * 12) + 'a_parent_folder_in_sub_2.txt')
# f.write('/tmp/a.txt', ('../' * 12) + ('sub/' * 12) + 'a_parent_folder_in_sub_3.txt')
# f.close()
test_file = self.get_test_loc('archive/zip/relative_parent_folders.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
abs_path = os.path.join(test_dir , '../a_parent_folder.txt')
assert not os.path.exists(abs_path)
result = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/a_parent_folder.txt',
'/dotdot/dotdot/',
'/dotdot/dotdot/another_folder/',
'/dotdot/dotdot/another_folder/b_two_root.txt',
'/dotdot/folder/',
'/dotdot/folder/subfolder/',
'/dotdot/folder/subfolder/b_subfolder.txt'
]
assert expected == result
expected_deeply_nested_relative_path = [
'/dotdot/',
'/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/a_parent_folder.txt',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_1.txt',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_3.txt',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
'/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_2.txt'
]
# somehow Windows fails randomly and only on certain windows machines at Appveyor
# so we retest with a skinny expectation
expected_deeply_nested_relative_path_alternative = [
u'/a_parent_folder.txt',
u'/sub/',
u'/sub/sub/',
u'/sub/sub/sub/',
u'/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_1.txt',
u'/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_2.txt',
u'/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_3.txt']
def test_extract_zip_with_relative_path_deeply_nested(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
result = self.collect_extracted_path(test_dir)
try:
assert self.expected_deeply_nested_relative_path == result
except:
assert self.expected_deeply_nested_relative_path_alternative == result
@skipIf(on_windows, 'Expectation are different on Windows')
def test_extract_zip_with_relative_path_deeply_nested_with_7zip_posix(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
test_dir = self.get_temp_dir()
try:
sevenzip.extract(test_file, test_dir)
self.fail('Shoul raise an exception')
except ExtractErrorFailedToExtract as e:
assert 'Unknown extraction error' == str(e)
@skipIf(not on_windows, 'Expectation are different on Windows')
def test_extract_zip_with_relative_path_deeply_nested_with_7zip_windows(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
test_dir = self.get_temp_dir()
sevenzip.extract(test_file, test_dir)
result = self.collect_extracted_path(test_dir)
assert self.expected_deeply_nested_relative_path_alternative == result
def test_list_zip_with_relative_path_deeply_nested_with_7zip(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
result = []
for entry in sevenzip.list_entries(test_file):
if on_windows:
entry.path = entry.path.replace('\\', '/')
result.append(entry.to_dict())
expected = [
{u'is_broken_link': False,
u'is_dir': False,
u'is_file': True,
u'is_hardlink': False,
u'is_special': False,
u'is_symlink': False,
u'link_target': None,
u'path': '../../../../../../../../../../../../a_parent_folder.txt',
u'size': '9'},
{u'is_broken_link': False,
u'is_dir': False,
u'is_file': True,
u'is_hardlink': False,
u'is_special': False,
u'is_symlink': False,
u'link_target': None,
u'path': '../../../../../../../../../../../../sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_1.txt',
u'size': '9'},
{u'is_broken_link': False,
u'is_dir': False,
u'is_file': True,
u'is_hardlink': False,
u'is_special': False,
u'is_symlink': False,
u'link_target': None,
u'path': '../../../../../../sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_2.txt',
u'size': '9'},
{u'is_broken_link': False,
u'is_dir': False,
u'is_file': True,
u'is_hardlink': False,
u'is_special': False,
u'is_symlink': False,
u'link_target': None,
u'path': '../../../../../../../../../../../../sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/sub/a_parent_folder_in_sub_3.txt',
u'size': '9'}]
assert expected == result
def test_extract_zip_with_relative_path_deeply_nested_with_libarchive(self):
test_file = self.get_test_loc('archive/zip/relative_nested.zip')
test_dir = self.get_temp_dir()
libarchive2.extract(test_file, test_dir)
result = self.collect_extracted_path(test_dir)
assert self.expected_deeply_nested_relative_path == result
def test_extract_zip_with_password(self):
test_file = self.get_test_loc('archive/zip/zip_password_nexb.zip')
test_dir = self.get_temp_dir()
try:
archive.extract_zip(test_file, test_dir)
except Exception as e:
assert isinstance(e, ExtractErrorFailedToExtract)
assert 'Password protected archive, unable to extract' in str(e)
def test_extract_zip_java_jar(self):
test_file = self.get_test_loc('archive/zip/jar/simple.jar')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = [
'/META-INF/',
'/META-INF/MANIFEST.MF',
'/org/',
'/org/jvnet/',
'/org/jvnet/glassfish/',
'/org/jvnet/glassfish/comms/',
'/org/jvnet/glassfish/comms/sipagent/',
'/org/jvnet/glassfish/comms/sipagent/actions/',
'/org/jvnet/glassfish/comms/sipagent/actions/Bundle.properties',
'/org/jvnet/glassfish/comms/sipagent/actions/SipAgentCookieAction.class',
'/org/jvnet/glassfish/comms/sipagent/actions/bd.png',
'/org/jvnet/glassfish/comms/sipagent/actions/bd24.png',
'/org/jvnet/glassfish/comms/sipagent/org-jvnet-glassfish-comms-sipagent-actions-SipAgentCookieAction.instance',
'/org/jvnet/glassfish/comms/sipagent/org-jvnet-glassfish-comms-sipagent-actions-SipAgentCookieAction_1.instance'
]
assert sorted(expected) == sorted(extracted)
def test_extract_zip_with_duplicated_lowercase_paths(self):
test_file = self.get_test_loc('archive/zip/dup_names.zip')
expected = {'META-INF/license/': None, # a directory
'META-INF/license/LICENSE.base64.txt': 1618,
'META-INF/LICENSE_1': 11366}
self.check_extract(archive.extract_zip, test_file, expected)
def test_extract_zip_with_timezone(self):
test_file = self.get_test_loc('archive/zip/timezone/c.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
expected = [
(os.path.join(test_dir, 'c/a/a.txt'), '2008-07-29'),
(os.path.join(test_dir, 'c/b/a.txt'), '2008-07-29'),
(os.path.join(test_dir, 'c/c/a.txt'), '2008-07-29'),
]
# DST sends a monkey wrench.... so we only test the date, not the time
for loc, expected_date in expected:
result = commoncode.date.get_file_mtime(loc)
assert result.startswith(expected_date)
def test_extract_zip_with_timezone_2(self):
test_file = self.get_test_loc('archive/zip/timezone/projecttest.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
# DST sends a monkey wrench.... so we only test the date, not the time
# and we accept some varation in the date ...
expected = [
(os.path.join(test_dir, 'primes.txt'), ('2009-12-05', '2009-12-06',)),
(os.path.join(test_dir, 'primes2.txt'), ('2009-12-05', '2009-12-06',))
]
for loc, expected_date in expected:
result = commoncode.date.get_file_mtime(loc)
assert result.startswith(expected_date)
def test_extract_zip_with_backslash_in_path_1(self):
test_file = self.get_test_loc('archive/zip/backslash/backslash1.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
# Info-ZIP 'zip' displays:
# warning: booxw-1202-bin.distribution.zip appears to use
# backslashes as path separators (which is the right thing to do)
expected = ['scripts/AutomaticClose.int']
check_files(test_dir, expected)
result = os.path.join(test_dir, 'scripts/AutomaticClose.int')
assert os.path.exists(result)
def test_extract_zip_with_backslash_in_path_2(self):
test_file = self.get_test_loc('archive/zip/backslash/AspectJTest.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
expected = '''
AspectJTest/.classpath
AspectJTest/.project
AspectJTest/src/META-INF/aop.xml
AspectJTest/src/p3/ExpertFlyable.java
AspectJTest/src/p3/MakeFlyableAspect.java
AspectJTest/src/p3/Flyable.java
AspectJTest/src/p3/MakeFlyable.java
AspectJTest/src/p3/Main2.java
AspectJTest/src/p3/p4/Person.java
AspectJTest/src/p2/MyLoggingAspect.java
AspectJTest/src/p1/MyService.java
AspectJTest/src/p1/Main1.java
AspectJTest/bin/META-INF/aop.xml
AspectJTest/bin/p3/MakeFlyableAspect.class
AspectJTest/bin/p3/ExpertFlyable.class
AspectJTest/bin/p3/Flyable.class
AspectJTest/bin/p3/Main2.class
AspectJTest/bin/p3/MakeFlyable.class
AspectJTest/bin/p3/p4/Person.class
AspectJTest/bin/p2/MyLoggingAspect.class
AspectJTest/bin/p1/Main1.class
AspectJTest/bin/p1/MyService.class
'''.split()
check_files(test_dir, expected)
def test_extract_zip_with_backslash_in_path_3(self):
test_file = self.get_test_loc('archive/zip/backslash/boo-0.3-src.zip')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
result = os.path.join(test_dir, 'src/Boo.Lang.Compiler/TypeSystem/InternalCallableType.cs')
assert os.path.exists(result)
def test_get_best_handler_nuget_is_selected_over_zip(self):
test_file = self.get_test_loc('archive/zip/moq.4.2.1507.118.nupkg')
handler = get_best_handler(test_file)
assert archive.NugetHandler == handler
def test_get_best_handler_nuget_is_selected_over_zip2(self):
test_file = self.get_test_loc('archive/zip/exceptionhero.javascript.1.0.5.nupkg')
handler = get_best_handler(test_file)
assert archive.NugetHandler == handler
def test_get_best_handler_nuget_is_selected_over_zip3(self):
test_file = self.get_test_loc('archive/zip/javascript-fastclass.1.1.729.121805.nupkg')
handler = get_best_handler(test_file)
assert archive.NugetHandler == handler
def test_extract_zip_can_extract_windows_media_player_skins(self):
test_file = self.get_test_loc('archive/wmz/Go.wmz')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
expected = ['32px.png', 'go.js', 'go.wms']
check_files(test_dir, expected)
def test_extract_zip_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/zip/zip_unicode.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
assert os.listdir(test_dir)
def test_extract_zip_can_extract_zip_with_directory_not_marked_with_trailing_slash(self):
test_file = self.get_test_loc('archive/zip/directory-with-no-trailing-slash.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
expected = ['online_upgrade_img/machine_type']
check_files(test_dir, expected)
class TestLibarch(BaseArchiveTestCase):
def test_extract_zip_with_relative_path_libarchive(self):
test_file = self.get_test_loc('archive/zip/relative_parent_folders.zip')
test_dir = self.get_temp_dir()
result = libarchive2.extract(test_file, test_dir)
assert [] == result
abs_path = os.path.join(test_dir , '../a_parent_folder.txt')
assert not os.path.exists(abs_path)
result = os.path.join(test_dir, 'dotdot/folder/subfolder/b_subfolder.txt')
assert os.path.exists(result)
result = os.path.join(test_dir, 'dotdot/a_parent_folder.txt')
assert os.path.exists(result)
result = os.path.join(test_dir, 'dotdot/dotdot/another_folder/b_two_root.txt')
assert os.path.exists(result)
class TestTar(BaseArchiveTestCase):
def test_extract_tar_basic(self):
test_file = self.get_test_loc('archive/tar/tarred.tar')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
result = os.path.join(test_dir, 'e/a/b.txt')
assert os.path.exists(result)
def test_extract_tar_broken(self):
test_file = self.get_test_loc('archive/tar/tarred_broken.tar')
test_dir = self.get_temp_dir()
expected = Exception("Unrecognized archive format")
self.assertRaisesInstance(expected, archive.extract_tar,
test_file, test_dir)
def test_extract_tar_absolute_path(self):
non_result = '/home/li/Desktop/absolute_folder'
assert not os.path.exists(non_result)
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/tar/tar_absolute.tar')
archive.extract_tar(test_file, test_dir)
assert not os.path.exists(non_result)
result = os.path.join(test_dir, 'home/li/Desktop/absolute_folder/absolute_file')
assert os.path.exists(result)
def test_extract_tar_with_absolute_path2(self):
assert not os.path.exists('/tmp/subdir')
test_file = self.get_test_loc('archive/tar/absolute_path.tar')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
assert not os.path.exists('/tmp/subdir')
result = os.path.join(test_dir, 'tmp/subdir/a.txt')
assert os.path.exists(result)
def test_extract_tar_with_relative_path(self):
test_file = self.get_test_loc('archive/tar/tar_relative.tar')
"""
This test file was created with:
import tarfile
tar = tarfile.open("TarTest.tar.gz", "w")
tar.add('a.txt', '../a_parent_folder.txt')
tar.add('b.txt', '../../another_folder/b_two_root.txt')
tar.add('b.txt', '../folder/subfolder/b_subfolder.txt')
tar.close()
"""
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
non_result = os.path.abspath(test_file + '/../a_parent_folder.txt')
assert not os.path.exists(non_result)
extracted = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/dotdot/',
'/dotdot/a_parent_folder.txt',
'/dotdot/dotdot/another_folder/',
'/dotdot/dotdot/another_folder/b_two_root.txt',
'/dotdot/folder/',
'/dotdot/folder/subfolder/',
'/dotdot/folder/subfolder/b_subfolder.txt'
]
assert sorted(expected) == sorted(extracted)
def test_extract_tar_archive_with_special_files(self):
test_file = self.get_test_loc('archive/tar/special.tar')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
expected = [
'0-REGTYPE',
'0-REGTYPE-TEXT',
'0-REGTYPE-VEEEERY_LONG_NAME_____________________________________________________________________________________________________________________155',
# '1-LNKTYPE', links are skipped
'S-SPARSE',
'S-SPARSE-WITH-NULLS',
]
check_files(test_dir, expected)
# special files are skipped too
# '2-SYMTYPE: Skipping broken link to: testtar/0-REGTYPE',
# '3-CHRTYPE: Skipping special file.',
# '6-FIFOTYPE: Skipping special file.'
assert [] == result
@skipIf(on_windows, 'Unicode and/or Long paths are not handled well yet on windows')
def test_extract_python_testtar_tar_archive_with_special_files(self):
test_file = self.get_test_loc('archive/tar/testtar.tar')
# this is from:
# https://hg.python.org/cpython/raw-file/bff88c866886/Lib/test/testtar.tar
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
expected_warnings = ["'pax/bad-pax-\\xe4\\xf6\\xfc': \nPathname can't be converted from UTF-8 to current locale."]
assert sorted(expected_warnings) == sorted(result)
expected = [
'gnu/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/longname',
'gnu/regtype-gnu-uid',
'gnu/sparse',
'gnu/sparse-0.0',
'gnu/sparse-0.1',
'gnu/sparse-1.0',
'misc/eof',
'misc/regtype-hpux-signed-chksum-AOUaouss',
'misc/regtype-old-v7',
'misc/regtype-old-v7-signed-chksum-AOUaouss',
'misc/regtype-suntar',
'misc/regtype-xstar',
'pax/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/123/longname',
'pax/bad-pax-aou',
'pax/hdrcharset-aou',
'pax/regtype1',
'pax/regtype2',
'pax/regtype3',
'pax/regtype4',
'pax/umlauts-AOUaouss',
'ustar/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/12345/1234567/longname',
'ustar/conttype',
'ustar/linktest1/regtype',
'ustar/regtype',
'ustar/sparse',
'ustar/umlauts-AOUaouss'
]
if on_linux:
expected = [bytes(e) for e in expected]
check_files(test_dir, expected)
class TestDebian(BaseArchiveTestCase):
def test_extract_deb_package_1(self):
test_file = self.get_test_loc('archive/deb/adduser_3.112ubuntu1_all.deb')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
check_size(110198, os.path.join(test_dir, 'data.tar.gz'))
def test_extract_deb_package_2(self):
test_file = self.get_test_loc('archive/deb/adduser_3.113+nmu3ubuntu3_all.deb')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
check_size(158441, os.path.join(test_dir, 'data.tar.gz'))
def test_get_best_handler_deb_package_is_an_archive(self):
test_file = self.get_test_loc('archive/deb/libjama-dev_1.2.4-2_all.deb')
handler = get_best_handler(test_file)
assert archive.DebHandler == handler
def test_extract_deb_package_3(self):
test_file = self.get_test_loc('archive/deb/wget-el_0.5.0-8_all.deb')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
check_size(36376, os.path.join(test_dir, 'data.tar.gz'))
class TestAr(BaseArchiveTestCase):
def test_extract_ar_basic_7z(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = ['1.txt', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_basic(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_verify_dates(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
expected = [
(os.path.join(test_dir, 'main.o'), '2007-06-12'),
(os.path.join(test_dir, 'yyerror.o'), '2007-06-12'),
]
# DST sends a monkey wrench.... so we only test the date, not the time
for loc, expected_date in expected:
result = commoncode.date.get_file_mtime(loc)
assert result.startswith(expected_date)
def test_extract_ar_broken_7z(self):
test_file = self.get_test_loc('archive/ar/liby-corrupted.a')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_broken(self):
test_file = self.get_test_loc('archive/ar/liby-corrupted.a')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o']
check_files(test_dir, expected)
assert ['None: \nIncorrect file header signature'] == result
def test_extract_ar_with_invalid_path(self):
test_file = self.get_test_loc('archive/ar/ar_invalidpath.ar')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['this/that']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_relative_path_7z(self):
test_file = self.get_test_loc('archive/ar/winlib/htmlhelp.lib')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = [
'1.txt',
'2.txt',
'release/init.obj'
]
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_relative_path_libarch(self):
test_file = self.get_test_loc('archive/ar/winlib/htmlhelp.lib')
test_dir = self.get_temp_dir()
result = archive.libarchive2.extract(test_file, test_dir)
expected_warns = [
"'//': \nInvalid string table",
"'/0': \nCan't find long filename for entry"
]
assert expected_warns == result
# inccorrect for now: need this: ['__.SYMDEF', 'release/init.obj']
expected = ['0', 'dot', 'dot_1', 'dot_2']
check_files(test_dir, expected)
def test_extract_ar_with_relative_path_and_backslashes_in_names_libarch(self):
test_file = self.get_test_loc('archive/ar/winlib/freetype.lib')
test_dir = self.get_temp_dir()
result = archive.libarchive2.extract(test_file, test_dir)
expected_warns = [
u"'//': \nInvalid string table",
u"'/0': \nCan't find long filename for entry",
u"'/34': \nCan't find long filename for entry",
u"'/68': \nCan't find long filename for entry",
u"'/104': \nCan't find long filename for entry",
u"'/137': \nCan't find long filename for entry",
u"'/173': \nCan't find long filename for entry",
u"'/205': \nCan't find long filename for entry",
u"'/239': \nCan't find long filename for entry",
u"'/275': \nCan't find long filename for entry",
u"'/311': \nCan't find long filename for entry",
u"'/344': \nCan't find long filename for entry",
u"'/375': \nCan't find long filename for entry",
u"'/406': \nCan't find long filename for entry",
u"'/442': \nCan't find long filename for entry",
u"'/477': \nCan't find long filename for entry",
u"'/512': \nCan't find long filename for entry",
u"'/545': \nCan't find long filename for entry",
u"'/577': \nCan't find long filename for entry",
u"'/611': \nCan't find long filename for entry",
u"'/645': \nCan't find long filename for entry",
u"'/681': \nCan't find long filename for entry",
u"'/717': \nCan't find long filename for entry",
u"'/750': \nCan't find long filename for entry",
u"'/784': \nCan't find long filename for entry",
u"'/818': \nCan't find long filename for entry",
u"'/853': \nCan't find long filename for entry",
u"'/888': \nCan't find long filename for entry",
u"'/923': \nCan't find long filename for entry",
u"'/957': \nCan't find long filename for entry",
u"'/993': \nCan't find long filename for entry",
u"'/1027': \nCan't find long filename for entry",
u"'/1058': \nCan't find long filename for entry",
u"'/1089': \nCan't find long filename for entry"
]
assert expected_warns == result
# 7zip is better, but has a security bug for now
# GNU ar works fine otherwise, but there are portability issues
expected = [
'0',
'1027',
'104',
'1058',
'1089',
'137',
'173',
'205',
'239',
'275',
'311',
'34',
'344',
'375',
'406',
'442',
'477',
'512',
'545',
'577',
'611',
'645',
'68',
'681',
'717',
'750',
'784',
'818',
'853',
'888',
'923',
'957',
'993',
'dot',
'dot_1',
'dot_2'
]
if on_linux:
expected = [bytes(e) for e in expected]
check_files(test_dir, expected)
def test_extract_ar_with_relative_path_and_backslashes_in_names_7z(self):
test_file = self.get_test_loc('archive/ar/winlib/freetype.lib')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
assert [] == result
expected = [
'1.txt',
'2.txt',
'objs/debug_mt/autofit.obj',
'objs/debug_mt/bdf.obj',
'objs/debug_mt/cff.obj',
'objs/debug_mt/ftbase.obj',
'objs/debug_mt/ftbbox.obj',
'objs/debug_mt/ftbitmap.obj',
'objs/debug_mt/ftcache.obj',
'objs/debug_mt/ftdebug.obj',
'objs/debug_mt/ftgasp.obj',
'objs/debug_mt/ftglyph.obj',
'objs/debug_mt/ftgzip.obj',
'objs/debug_mt/ftinit.obj',
'objs/debug_mt/ftlzw.obj',
'objs/debug_mt/ftmm.obj',
'objs/debug_mt/ftpfr.obj',
'objs/debug_mt/ftstroke.obj',
'objs/debug_mt/ftsynth.obj',
'objs/debug_mt/ftsystem.obj',
'objs/debug_mt/fttype1.obj',
'objs/debug_mt/ftwinfnt.obj',
'objs/debug_mt/pcf.obj',
'objs/debug_mt/pfr.obj',
'objs/debug_mt/psaux.obj',
'objs/debug_mt/pshinter.obj',
'objs/debug_mt/psmodule.obj',
'objs/debug_mt/raster.obj',
'objs/debug_mt/sfnt.obj',
'objs/debug_mt/smooth.obj',
'objs/debug_mt/truetype.obj',
'objs/debug_mt/type1.obj',
'objs/debug_mt/type1cid.obj',
'objs/debug_mt/type42.obj',
'objs/debug_mt/winfnt.obj'
]
check_files(test_dir, expected)
def test_extract_ar_static_library_does_not_delete_symdefs_7z(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
# the symdef file is 1.txt with 7z
expected = ['1.txt', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_static_library_does_not_delete_symdefs(self):
test_file = self.get_test_loc('archive/ar/liby.a')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
expected = ['__.SYMDEF', 'main.o', 'yyerror.o']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_trailing_data(self):
test_file = self.get_test_loc('archive/ar/ar_trailing.a')
test_dir = self.get_temp_dir()
archive.extract_ar(test_file, test_dir)
result = os.path.join(test_dir, 'main.o')
assert os.path.exists(result)
result = os.path.join(test_dir, 'yyerror.o')
assert os.path.exists(result)
def test_extract_ar_with_permissions_7z(self):
test_file = self.get_test_loc('archive/ar/winlib/zlib.lib')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
expected = ['1.txt', '1.zlib.pyd', '2.txt', '2.zlib.pyd', '3.zlib.pyd', '4.zlib.pyd']
check_files(test_dir, expected)
assert [] == result
def test_extract_ar_with_permissions(self):
# this behavior is not correct: 7z is better, but has security flaws for now
test_file = self.get_test_loc('archive/ar/winlib/zlib.lib')
test_dir = self.get_temp_dir()
result = archive.extract_ar(test_file, test_dir)
assert [] == result
expected = ['dot', 'dot_1']
check_files(test_dir, expected)
class TestCpio(BaseArchiveTestCase):
def test_extract_cpio_basic(self):
test_file = self.get_test_loc('archive/cpio/elfinfo-1.0-1.fc9.src.cpio')
test_dir = self.get_temp_dir()
archive.extract_cpio(test_file, test_dir)
result = os.path.join(test_dir, 'elfinfo-1.0.tar.gz')
assert os.path.exists(result)
def test_extract_cpio_with_trailing_data(self):
test_file = self.get_test_loc('archive/cpio/cpio_trailing.cpio')
test_dir = self.get_temp_dir()
archive.extract_cpio(test_file, test_dir)
result = os.path.join(test_dir, 'elfinfo-1.0.tar.gz')
assert os.path.exists(result)
def test_extract_cpio_broken_7z(self):
test_file = self.get_test_loc('archive/cpio/cpio_broken.cpio')
test_dir = self.get_temp_dir()
self.assertRaisesInstance(Exception('Unknown extraction error'), sevenzip.extract, test_file, test_dir)
def test_extract_cpio_broken2(self):
test_file = self.get_test_loc('archive/cpio/cpio_broken.cpio')
test_dir = self.get_temp_dir()
result = archive.extract_cpio(test_file, test_dir)
expected = sorted(['elfinfo-1.0.tar.gz', 'elfinfo.spec'])
if on_linux:
expected = [bytes(e) for e in expected]
assert expected == sorted(os.listdir(test_dir))
assert ["'elfinfo.spec': \nSkipped 72 bytes before finding valid header"] == result
def test_extract_cpio_with_absolute_path(self):
assert not os.path.exists('/tmp/subdir')
test_dir = self.get_temp_dir()
test_file = self.get_test_loc('archive/cpio/cpio_absolute.cpio')
archive.extract_cpio(test_file, test_dir)
assert not os.path.exists('/tmp/subdir')
result = os.path.join(test_dir, 'home/li/Desktop/absolute_folder/absolute_file')
assert os.path.exists(result)
def test_extract_cpio_with_relative_path(self):
# test file is created by cmd: find ../.. - |cpio -ov >relative.cpio
# We should somehow add a "parent" folder to extract relative paths
test_file = self.get_test_loc('archive/cpio/cpio_relative.cpio')
test_dir = self.get_temp_dir()
result = archive.extract_cpio(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/dotdot/',
'/dotdot/dotdot/2folder/',
'/dotdot/dotdot/2folder/3folder/',
'/dotdot/dotdot/2folder/3folder/cpio_relative.cpio',
'/dotdot/dotdot/2folder/3folder/relative_file',
'/dotdot/dotdot/2folder/3folder/relative_file~',
'/dotdot/dotdot/2folder/relative_file',
'/dotdot/dotdot/relative_file'
]
assert expected == extracted
def test_extract_cpio_with_invalidpath(self):
test_file = self.get_test_loc('archive/cpio/cpio-invalidpath.cpio')
test_dir = self.get_temp_dir()
archive.extract_cpio(test_file, test_dir)
result = os.path.join(test_dir, 'backup')
assert os.path.exists(result)
result = os.path.join(test_dir, 'this/that')
assert os.path.exists(result)
def test_extract_cpio_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cpio/t.cpio.foo')
test_dir = self.get_temp_dir()
result = archive.extract_cpio(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestRpm(BaseArchiveTestCase):
def test_extract_rpm_basic_1(self):
test_file = self.get_test_loc('archive/rpm/elfinfo-1.0-1.fc9.src.rpm')
test_dir = self.get_temp_dir()
archive.extract_rpm(test_file, test_dir)
result = os.path.join(test_dir, 'elfinfo-1.0-1.fc9.src.cpio.gz')
assert os.path.exists(result)
def test_extract_rpm_basic_2(self):
test_file = self.get_test_loc('archive/rpm/python-glc-0.7.1-1.src.rpm')
test_dir = self.get_temp_dir()
archive.extract_rpm(test_file, test_dir)
result = os.path.join(test_dir, 'python-glc-0.7.1-1.src.cpio.gz')
assert os.path.exists(result)
def test_extract_rpm_nested_correctly(self):
test_file = self.get_test_loc('archive/rpm/extract_once/libsqueeze0.2_0-0.2.3-8mdv2010.0.i586.rpm')
test_dir = self.get_temp_dir()
archive.extract_rpm(test_file, test_dir)
result = os.path.join(test_dir, 'libsqueeze0.2_0-0.2.3-8mdv2010.0.i586.cpio.lzma')
assert os.path.exists(result)
def test_extract_rpm_with_trailing_data(self):
test_file = self.get_test_loc('archive/rpm/rpm_trailing.rpm')
test_dir = self.get_temp_dir()
result = archive.extract_rpm(test_file, test_dir)
expected = ['elfinfo-1.0-1.fc9.src.cpio.gz']
check_files(test_dir, expected)
assert [] == result
def test_extract_rpm_with_renamed_content(self):
# When the RPM is renamed, we should still be able to find the cpio
test_file = self.get_test_loc('archive/rpm/renamed.rpm')
test_dir = self.get_temp_dir()
result = archive.extract_rpm(test_file, test_dir)
expected = ['python-glc-0.7.1-1.src.cpio.gz']
check_files(test_dir, expected)
assert [] == result
def test_extract_rpm_broken(self):
test_file = self.get_test_loc('archive/rpm/broken.rpm')
test_dir = self.get_temp_dir()
expected = Exception('Unknown extraction error')
self.assertRaisesInstance(expected, archive.extract_rpm,
test_file, test_dir)
class TestExtractTwice(BaseArchiveTestCase):
def test_extract_twice_with_rpm_with_xz_compressed_cpio(self):
test_file = self.get_test_loc('archive/rpm/xz-compressed-cpio.rpm')
test_dir = self.get_temp_dir()
# this will return an extractor that extracts twice
extractor = archive.get_extractor(test_file)
result = list(extractor(test_file, test_dir))
assert [] == result
expected = [
'etc/abrt/abrt-action-save-package-data.conf',
'etc/abrt/abrt.conf',
'etc/abrt/gpg_keys',
'etc/dbus-1/system.d/dbus-abrt.conf',
'etc/libreport/events.d/abrt_event.conf',
'etc/libreport/events.d/smart_event.conf',
'etc/rc.d/init.d/abrtd',
'usr/bin/abrt-action-save-package-data',
'usr/bin/abrt-handle-upload',
'usr/libexec/abrt-handle-event',
'usr/libexec/abrt1-to-abrt2',
'usr/sbin/abrt-dbus',
'usr/sbin/abrt-server',
'usr/sbin/abrtd',
'usr/share/dbus-1/system-services/com.redhat.abrt.service',
'usr/share/doc/abrt-2.0.8/COPYING',
'usr/share/doc/abrt-2.0.8/README',
'usr/share/locale/ar/LC_MESSAGES/abrt.mo',
'usr/share/locale/as/LC_MESSAGES/abrt.mo',
'usr/share/locale/ast/LC_MESSAGES/abrt.mo',
'usr/share/locale/bg/LC_MESSAGES/abrt.mo',
'usr/share/locale/bn_IN/LC_MESSAGES/abrt.mo',
'usr/share/locale/ca/LC_MESSAGES/abrt.mo',
'usr/share/locale/cs/LC_MESSAGES/abrt.mo',
'usr/share/locale/da/LC_MESSAGES/abrt.mo',
'usr/share/locale/de/LC_MESSAGES/abrt.mo',
'usr/share/locale/el/LC_MESSAGES/abrt.mo',
'usr/share/locale/en_GB/LC_MESSAGES/abrt.mo',
'usr/share/locale/es/LC_MESSAGES/abrt.mo',
'usr/share/locale/fa/LC_MESSAGES/abrt.mo',
'usr/share/locale/fi/LC_MESSAGES/abrt.mo',
'usr/share/locale/fr/LC_MESSAGES/abrt.mo',
'usr/share/locale/gu/LC_MESSAGES/abrt.mo',
'usr/share/locale/he/LC_MESSAGES/abrt.mo',
'usr/share/locale/hi/LC_MESSAGES/abrt.mo',
'usr/share/locale/hu/LC_MESSAGES/abrt.mo',
'usr/share/locale/id/LC_MESSAGES/abrt.mo',
'usr/share/locale/it/LC_MESSAGES/abrt.mo',
'usr/share/locale/ja/LC_MESSAGES/abrt.mo',
'usr/share/locale/kn/LC_MESSAGES/abrt.mo',
'usr/share/locale/ko/LC_MESSAGES/abrt.mo',
'usr/share/locale/ml/LC_MESSAGES/abrt.mo',
'usr/share/locale/mr/LC_MESSAGES/abrt.mo',
'usr/share/locale/nb/LC_MESSAGES/abrt.mo',
'usr/share/locale/nl/LC_MESSAGES/abrt.mo',
'usr/share/locale/or/LC_MESSAGES/abrt.mo',
'usr/share/locale/pa/LC_MESSAGES/abrt.mo',
'usr/share/locale/pl/LC_MESSAGES/abrt.mo',
'usr/share/locale/pt/LC_MESSAGES/abrt.mo',
'usr/share/locale/pt_BR/LC_MESSAGES/abrt.mo',
'usr/share/locale/ru/LC_MESSAGES/abrt.mo',
'usr/share/locale/sk/LC_MESSAGES/abrt.mo',
'usr/share/locale/sr/LC_MESSAGES/abrt.mo',
'usr/share/locale/sr@latin/LC_MESSAGES/abrt.mo',
'usr/share/locale/sv/LC_MESSAGES/abrt.mo',
'usr/share/locale/ta/LC_MESSAGES/abrt.mo',
'usr/share/locale/te/LC_MESSAGES/abrt.mo',
'usr/share/locale/uk/LC_MESSAGES/abrt.mo',
'usr/share/locale/zh_CN/LC_MESSAGES/abrt.mo',
'usr/share/locale/zh_TW/LC_MESSAGES/abrt.mo',
'usr/share/man/man1/abrt-action-save-package-data.1.gz',
'usr/share/man/man1/abrt-handle-upload.1.gz',
'usr/share/man/man1/abrt-server.1.gz',
'usr/share/man/man5/abrt-action-save-package-data.conf.5.gz',
'usr/share/man/man5/abrt.conf.5.gz',
'usr/share/man/man8/abrt-dbus.8.gz',
'usr/share/man/man8/abrtd.8.gz'
]
check_files(test_dir, expected)
def test_extract_twice_can_extract_to_relative_paths(self):
# The setup is a tad complex because we want to have a relative dir
# to the base dir where we run tests from, ie the scancode-toolkit/ dir
# To use relative paths, we use our tmp dir at the root of the code tree
from os.path import dirname, join, abspath, exists
import shutil
import tempfile
test_file = self.get_test_loc('archive/rpm/xz-compressed-cpio.rpm')
# this will return an extractor that extracts twice
extractor = archive.get_extractor(test_file)
scancode_root = dirname(dirname(dirname(__file__)))
scancode_tmp = join(scancode_root, 'tmp')
fileutils.create_dir(scancode_tmp)
scancode_root_abs = abspath(scancode_root)
test_src_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
test_tgt_dir = tempfile.mkdtemp(dir=scancode_tmp).replace(scancode_root_abs, '').strip('\\/')
shutil.copy(test_file, test_src_dir)
test_src_file = join(test_src_dir, 'xz-compressed-cpio.rpm')
result = list(extractor(test_src_file, test_tgt_dir))
assert [] == result
assert exists(join(test_tgt_dir, 'usr/sbin/abrt-dbus'))
class TestRar(BaseArchiveTestCase):
def test_extract_rar_basic(self):
test_file = self.get_test_loc('archive/rar/basic.rar')
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
result = os.path.join(test_dir, 'd', 'b', 'a.txt')
assert os.path.exists(result)
def test_extract_rar_with_invalid_path(self):
test_file = self.get_test_loc('archive/rar/rar_invalidpath.rar')
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
result = os.path.join(test_dir, 'this/that')
assert os.path.exists(result)
def test_extract_rar_with_trailing_data(self):
test_file = self.get_test_loc('archive/rar/rar_trailing.rar')
test_dir = self.get_temp_dir()
Exception('Unknown extraction error')
archive.extract_rar(test_file, test_dir)
result = os.path.join(test_dir, 'd', 'b', 'a.txt')
assert os.path.exists(result)
def test_extract_rar_broken(self):
test_file = self.get_test_loc('archive/rar/broken.rar')
test_dir = self.get_temp_dir()
expected = Exception('Header CRC error')
self.assertRaisesInstance(expected, archive.extract_rar, test_file, test_dir)
def test_extract_rar_with_relative_path(self):
# FIXME: this file may not have a real relative path
test_file = self.get_test_loc('archive/rar/rar_relative.rar', copy=True)
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
result = os.path.abspath(test_file + '/../a_parent_folder.txt')
assert not os.path.exists(result)
result = os.path.join(test_dir, '2folder/relative_file')
assert os.path.exists(result)
result = os.path.join(test_dir, '2folder/3folder/relative_file')
assert os.path.exists(result)
def test_extract_rar_with_absolute_path(self):
# FIXME: this file may not have a real absolute path
assert not os.path.exists('/home/li/Desktop/zip_folder')
test_file = self.get_test_loc('archive/rar/rar_absolute.rar', copy=True)
test_dir = self.get_temp_dir()
archive.extract_rar(test_file, test_dir)
assert not os.path.exists('/home/li/Desktop/absolute_folder')
result = os.path.join(test_dir, 'home/li/Desktop',
'absolute_folder/absolute_file')
assert os.path.exists(result)
def test_extract_rar_with_password(self):
test_file = self.get_test_loc('archive/rar/rar_password.rar')
test_dir = self.get_temp_dir()
expected = Exception('RAR encryption support unavailable.')
self.assertRaisesInstance(expected, archive.extract_rar,
test_file, test_dir)
def test_extract_rar_with_non_ascii_path(self):
test_file = self.get_test_loc('archive/rar/non_ascii_corrupted.rar')
# The bug only occurs if the path was given as Unicode
test_file = unicode(test_file)
test_dir = self.get_temp_dir()
# raise an exception but still extracts some
expected = Exception('Prefix found')
self.assertRaisesInstance(expected, archive.extract_rar, test_file, test_dir)
result = os.path.join(test_dir, 'EdoProject_java/WebContent/WEB-INF/lib/cos.jar')
assert os.path.exists(result)
class TestSevenZip(BaseArchiveTestCase):
def test_extract_7z_basic(self):
test_file = self.get_test_loc('archive/7z/z.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
expected = ['z/a/a.txt', 'z/b/a.txt', 'z/c/a.txt']
check_files(test_dir, expected)
def test_extract_7z_with_trailing_data(self):
test_file = self.get_test_loc('archive/7z/7zip_trailing.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
expected = ['z/a/a.txt', 'z/b/a.txt', 'z/c/a.txt']
check_files(test_dir, expected)
def test_extract_7z_with_broken_archive_with7z(self):
test_file = self.get_test_loc('archive/7z/corrupted7z.7z')
test_dir = self.get_temp_dir()
msg = 'Unknown extraction error'
self.assertRaisesInstance(ExtractErrorFailedToExtract(msg), sevenzip.extract, test_file, test_dir)
def test_extract_7z_with_broken_archive_does_not_fail_when_using_fallback(self):
test_file = self.get_test_loc('archive/7z/corrupted7z.7z')
test_dir = self.get_temp_dir()
msg = 'Unknown extraction error'
self.assertRaisesInstance(ExtractErrorFailedToExtract(msg), archive.extract_7z, test_file, test_dir)
def test_extract_7z_with_non_existing_archive(self):
test_file = 'archive/7z/I_DO_NOT_EXIST.zip'
test_dir = self.get_temp_dir()
msg = 'Unknown extraction error'
self.assertExceptionContains(msg, sevenzip.extract, test_file, test_dir)
def test_extract_7z_with_invalid_path_using_7z(self):
test_file = self.get_test_loc('archive/7z/7zip_invalidpath.7z')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = ['/this/', '/this/that']
assert expected == extracted
def test_extract_7z_with_invalid_path(self):
test_file = self.get_test_loc('archive/7z/7zip_invalidpath.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = ['/this/', '/this/that']
assert expected == extracted
def test_extract_7z_with_relative_path(self):
test_file = self.get_test_loc('archive/7z/7zip_relative.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
non_result = os.path.join(test_dir, '../a_parent_folder.txt')
assert not os.path.exists(non_result)
assert [] == result
extracted = self.collect_extracted_path(test_dir)
expected = [
'/dotdot/',
'/dotdot/2folder/',
'/dotdot/2folder/3folder/',
'/dotdot/2folder/3folder/relative_file',
'/dotdot/2folder/3folder/relative_file~',
'/dotdot/2folder/relative_file',
'/dotdot/relative_file'
]
assert expected == extracted
def test_extract_7z_with_password_with_7z(self):
test_file = self.get_test_loc('archive/7z/7zip_password.7z')
test_dir = self.get_temp_dir()
expected = Exception('Password protected archive, unable to extract')
self.assertRaisesInstance(expected, sevenzip.extract, test_file, test_dir)
def test_extract_7z_with_password(self):
test_file = self.get_test_loc('archive/7z/7zip_password.7z')
test_dir = self.get_temp_dir()
expected = Exception('Password protected archive, unable to extract')
self.assertRaisesInstance(expected, archive.extract_7z, test_file, test_dir)
def test_extract_7zip_native_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/7z/7zip_unicode.7z')
test_dir = self.get_temp_dir()
result = sevenzip.extract(test_file, test_dir)
assert [] == result
assert 2 == len(os.listdir(os.path.join(test_dir, 'zip')))
def test_extract_7zip_with_fallback_with_unicode_path_should_extract_without_error(self):
test_file = self.get_test_loc('archive/7z/7zip_unicode.7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
assert 2 == len(os.listdir(os.path.join(test_dir, 'zip')))
def test_extract_7zip_libarchive_with_unicode_path_extracts_with_errors(self):
test_file = self.get_test_loc('archive/7z/7zip_unicode.7z')
test_dir = self.get_temp_dir()
try:
archive.extract_7z(test_file, test_dir)
except libarchive2.ArchiveError as e:
assert 'Damaged 7-Zip archive' in e.msg
def test_extract_7z_basic_with_space_in_file_name(self):
test_file = self.get_test_loc('archive/7z/t .7z')
test_dir = self.get_temp_dir()
result = archive.extract_7z(test_file, test_dir)
assert [] == result
expected = ['t/t.txt']
check_files(test_dir, expected)
class TestIso(BaseArchiveTestCase):
def test_extract_iso_basic(self):
test_file = self.get_test_loc('archive/iso/small.iso')
test_dir = self.get_temp_dir()
archive.extract_iso(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = [
'/ChangeLog',
'/ChangeLog (copy)',
'/freebase.ABOUT',
'/this/',
'/this/that'
]
assert sorted(expected) == sorted(extracted)
def test_get_extractor_not_iso_text_is_not_mistaken_for_an_iso_image(self):
test_file = self.get_test_loc('archive/iso/ChangeLog')
extractor = archive.get_extractor(test_file)
assert not extractor
def test_extract_iso_basic_with_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/iso/t.iso.foo')
test_dir = self.get_temp_dir()
archive.extract_iso(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestXzLzma(BaseArchiveTestCase):
def check_lzma_extract(self, extract_fun, test_file, expected):
"""
Run the 'extract_fun' function using the 'test_file' file as an input
and verifies that the 'expected' file has been extracted correctly.
"""
test_file = self.get_test_loc(test_file)
extract_dir = self.get_temp_dir()
expected_file = os.path.join(extract_dir, expected)
extract_fun(test_file, extract_dir)
assert os.path.exists(expected_file), (
'%(expected_file)s file was not extracted '
'correctly from archive %(test_file)s'
% locals())
def test_extract_archive_tar_xz_1(self):
test_file = 'archive/lzma_xz/basic/texlive-core-patches-20.tar.xz'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected='texlive-core-patches-20.tar')
def test_extract_archive_tar_xz_2(self):
test_file = 'archive/lzma_xz/all/texlive-core-patches-20.tar.xz'
expected = 'texlive-core-patches-20.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_xz_3(self):
test_file = 'archive/lzma_xz/all/binutils-2.22.52.0.3-patches-1.0.tar.xz'
expected = 'binutils-2.22.52.0.3-patches-1.0.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_xz_4(self):
test_file = 'archive/lzma_xz/all/bdsup2sub-4.0.0.tar.xz'
expected = 'bdsup2sub-4.0.0.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_xz_5(self):
test_file = 'archive/lzma_xz/all/desktop-file-utils-0.19.tar.xz'
expected = 'desktop-file-utils-0.19.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_1(self):
test_file = 'archive/lzma_xz/basic/coreutils-8.5-patches-1.tar.lzma'
expected = 'coreutils-8.5-patches-1.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_2(self):
test_file = 'archive/lzma_xz/all/orionsocket-1.0.9.tar.lzma'
expected = 'orionsocket-1.0.9.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_3(self):
test_file = 'archive/lzma_xz/all/MinGW-5.1.6.exe-src.tar.lzma'
expected = 'MinGW-5.1.6.exe-src.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_4(self):
test_file = 'archive/lzma_xz/all/dnsmasq-2.57.tar.lzma'
expected = 'dnsmasq-2.57.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_lzma_1(self):
test_file = 'archive/lzma_xz/all/cromwell-2.40-r3-cvs-fixes.patch.lzma'
expected = 'cromwell-2.40-r3-cvs-fixes.patch'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
def test_extract_archive_tar_lzma_5(self):
test_file = 'archive/lzma_xz/all/coreutils-8.5-patches-1.tar.lzma'
expected = 'coreutils-8.5-patches-1.tar'
self.check_lzma_extract(extract_fun=archive.extract_lzma,
test_file=test_file,
expected=expected)
class TestDia(BaseArchiveTestCase):
def test_extract_dia_basic(self):
test_file = self.get_test_loc('archive/dia/dia.dia')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'dia.dia-extract')
assert os.path.exists(result)
def test_extract_dia_with_trailing_data(self):
test_file = self.get_test_loc('archive/dia/dia_trailing.dia')
test_dir = self.get_temp_dir()
archive.uncompress_gzip(test_file, test_dir)
result = os.path.join(test_dir, 'dia_trailing.dia-extract')
assert os.path.exists(result)
def test_extract_dia_broken_1(self):
test_file = self.get_test_loc('archive/dia/dia_broken.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('CRC check failed',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_dia_broken_2(self):
test_file = self.get_test_loc('archive/dia/broken/PublisherUML.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('invalid distance too far back',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_dia_broken_3(self):
test_file = self.get_test_loc('archive/dia/broken/schedulerClassDiagram.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('invalid distance too far back',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_dia_broken_4(self):
test_file = self.get_test_loc('archive/dia/broken/ServletProxyGenerator.dia')
test_dir = self.get_temp_dir()
self.assertExceptionContains('invalid distance too far back',
archive.uncompress_gzip,
test_file,
test_dir)
def test_extract_can_get_extractor_and_uncompress_dia_files(self):
test_file = self.get_test_loc('archive/dia/guess/infoset-doc.dia')
test_dir = self.get_temp_dir()
archive.get_extractor(test_file)(test_file, test_dir)
result = os.path.join(test_dir, 'infoset-doc.dia-extract')
assert os.path.exists(result)
class TestTarZ(BaseArchiveTestCase):
def test_extract_tarz_compress_basic(self):
test_file = self.get_test_loc('archive/Z/tkWWW-0.11.tar.Z')
test_dir = self.get_temp_dir()
archive.extract_Z(test_file, test_dir)
result = os.path.join(test_dir, 'tkWWW-0.11.tar')
assert os.path.exists(result)
def test_extract_z_compress_basic(self):
test_file = self.get_test_loc('archive/Z/tr2tex.Z')
test_dir = self.get_temp_dir()
archive.extract_Z(test_file, test_dir)
result = os.path.join(test_dir, 'tr2tex')
assert os.path.exists(result)
class TestXar(BaseArchiveTestCase):
def test_extract_xar_basic(self):
test_file = self.get_test_loc('archive/xar/xar-1.4.xar')
test_dir = self.get_temp_dir()
archive.extract_Z(test_file, test_dir)
result = os.path.join(test_dir, '[TOC].xml')
assert os.path.exists(result)
result = os.path.join(test_dir, 'xar-1.4', 'Makefile.in')
assert os.path.exists(result)
class TestCb7(BaseArchiveTestCase):
def test_get_extractor_cb7(self):
test_file = self.get_test_loc('archive/cb7/t .cb7')
result = archive.get_extractor(test_file)
expected = archive.extract_7z
assert expected == result
def test_extract_cb7_basic_with_space_in_file_name(self):
test_file = self.get_test_loc('archive/cb7/t .cb7')
test_dir = self.get_temp_dir()
archive.extract_7z(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cb7_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cb7/t.cb7.foo')
test_dir = self.get_temp_dir()
archive.extract_7z(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCab(BaseArchiveTestCase):
def test_get_extractor_cab(self):
test_file = self.get_test_loc('archive/cab/basic.cab')
result = archive.get_extractor(test_file)
expected = archive.extract_cab
assert expected == result
def test_extract_cab_basic(self):
test_file = self.get_test_loc('archive/cab/basic.cab')
test_dir = self.get_temp_dir()
archive.extract_cab(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/TREEHELP.TXT']
assert expected == extracted
def test_extract_cab_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cab/t.cab.foo')
test_dir = self.get_temp_dir()
archive.extract_cab(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCbr(BaseArchiveTestCase):
def test_get_extractor_cbr(self):
test_file = self.get_test_loc('archive/cbr/t.cbr')
result = archive.get_extractor(test_file)
# we do not handle these rare extensions (this is a RAR)
expected = None # archive.extract_rar
assert expected == result
def test_extract_cbr_basic(self):
test_file = self.get_test_loc('archive/cbr/t.cbr')
test_dir = self.get_temp_dir()
libarchive2.extract(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cbr_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cbr/t.cbr.foo')
test_dir = self.get_temp_dir()
libarchive2.extract(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCbt(BaseArchiveTestCase):
def test_get_extractor_cbt(self):
test_file = self.get_test_loc('archive/cbt/t.cbt')
result = archive.get_extractor(test_file)
expected = archive.extract_tar
assert expected == result
def test_extract_cbt_basic(self):
test_file = self.get_test_loc('archive/cbt/t.cbt')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cbt_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cbt/t.cbt.foo')
test_dir = self.get_temp_dir()
archive.extract_tar(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
class TestCbz(BaseArchiveTestCase):
def test_get_extractor_cbz(self):
test_file = self.get_test_loc('archive/cbz/t.cbz')
result = archive.get_extractor(test_file)
expected = archive.extract_zip
assert expected == result
def test_extract_cbz_basic(self):
test_file = self.get_test_loc('archive/cbz/t.cbz')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
def test_extract_cbz_basic_with_weird_filename_extension(self):
test_file = self.get_test_loc('archive/cbz/t.cbz.foo')
test_dir = self.get_temp_dir()
archive.extract_zip(test_file, test_dir)
extracted = self.collect_extracted_path(test_dir)
expected = ['/t/', '/t/t.txt']
assert expected == extracted
# Note: this series of test is not easy to grasp but unicode archives on multiple OS
# are hard to tests. So we have one test class for each libarchive and sevenzip on
# each of the three OSses which makes siz test classes each duplicated with
# eventually different expectations on each OS. Then each test class has a subclass
# with check_warnings set to True to tests only possible warnings separately.
# The code tries to avoid too much duplication, but this is at the cost of readability
def is_posixpath(location):
"""
Return True if the `location` path is likely a POSIX-like path using POSIX path
separators (slash or "/")or has no path separator.
Return False if the `location` path is likely a Windows-like path using backslash
as path separators (e.g. "\").
"""
has_slashes = '/' in location
has_backslashes = '\\' in location
# windows paths with drive
if location:
drive, _ = ntpath.splitdrive(location)
if drive:
return False
# a path is always POSIX unless it contains ONLY backslahes
# which is a rough approximation (it could still be posix)
is_posix = True
if has_backslashes and not has_slashes:
is_posix = False
return is_posix
def to_posix(path):
"""
Return a path using the posix path separator given a path that may contain posix
or windows separators, converting \ to /. NB: this path will still be valid in
the windows explorer (except as a UNC or share name). It will be a valid path
everywhere in Python. It will not be valid for windows command line operations.
"""
is_unicode = isinstance(path, unicode)
ntpath_sep = is_unicode and u'\\' or '\\'
posixpath_sep = is_unicode and u'/' or '/'
if is_posixpath(path):
if on_windows:
return path.replace(ntpath_sep, posixpath_sep)
else:
return path
return path.replace(ntpath_sep, posixpath_sep)
class ExtractArchiveWithIllegalFilenamesTestCase(BaseArchiveTestCase):
check_only_warnings = False
def check_extract(self, test_function, test_file, expected_suffix, expected_warnings=None, regen=False):
"""
Run the extraction `test_function` on `test_file` checking that the paths
listed in the `test_file.excepted` file exist in the extracted target
directory. Regen expected file if True.
"""
if not isinstance(test_file, unicode):
test_file = unicode(test_file)
test_file = self.get_test_loc(test_file)
test_dir = self.get_temp_dir()
warnings = test_function(test_file, test_dir)
# shortcut if check of warnings are requested
if self.check_only_warnings and expected_warnings is not None:
assert sorted(expected_warnings) == sorted(warnings)
return
len_test_dir = len(test_dir)
extracted = sorted(path[len_test_dir:] for path in fileutils.resource_iter(test_dir, with_dirs=False))
extracted = [unicode(p) for p in extracted]
extracted = [to_posix(p) for p in extracted]
if on_linux:
os_suffix = 'linux'
elif on_mac:
os_suffix = 'mac'
elif on_windows:
os_suffix = 'win'
expected_file = test_file + '_' + expected_suffix + '_' + os_suffix + '.expected'
import json
if regen:
with open(expected_file, 'wb') as ef:
ef.write(json.dumps(extracted, indent=2))
expected = json.loads(open(expected_file).read())
expected = [p for p in expected if p.strip()]
assert expected == extracted
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnLinux(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_ar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
warns = ['None: \nIncorrect file header signature']
self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch')
def test_extract_cpio_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_tar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnLinuxWarnings(TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnLinux):
check_only_warnings = True
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnMac(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_ar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
warns = ['None: \nIncorrect file header signature']
self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch')
def test_extract_cpio_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_tar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnMacWarnings(TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnMac):
check_only_warnings = True
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnWindows(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_ar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
warns = [u'None: \nIncorrect file header signature']
self.check_extract(libarchive2.extract, test_file, expected_warnings=warns, expected_suffix='libarch')
def test_extract_cpio_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_tar_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
def test_extract_zip_with_weird_filenames_with_libarchive(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(libarchive2.extract, test_file, expected_warnings=[], expected_suffix='libarch')
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnWindowsWarnings(TestExtractArchiveWithIllegalFilenamesWithLibarchiveOnWindows):
check_only_warnings = True
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnLinux(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_ar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_cpio_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_iso_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.iso')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we now use libarchive for these
def test_extract_rar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.rar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_tar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@skipIf(not on_linux, 'Run only on Linux because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnLinuxWarnings(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnLinux):
check_only_warnings = True
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = True
def test_extract_7zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_ar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_cpio_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_iso_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.iso')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure
def test_extract_rar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.rar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_tar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@skipIf(not on_mac, 'Run only on Mac because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMac(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings):
check_only_warnings = False
# not a problem: we use libarchive for these
test_extract_7zip_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_7zip_with_weird_filenames_with_sevenzip)
# not a problem: we use libarchive for these
test_extract_ar_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_ar_with_weird_filenames_with_sevenzip)
# not a problem: we use libarchive for these
test_extract_cpio_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_cpio_with_weird_filenames_with_sevenzip)
# This is a problem
test_extract_iso_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_iso_with_weird_filenames_with_sevenzip)
# not a problem: we use libarchive for these
test_extract_tar_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_tar_with_weird_filenames_with_sevenzip)
# not a problem: we use libarchive for these
test_extract_zip_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnMacWarnings
.test_extract_zip_with_weird_filenames_with_sevenzip)
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin(ExtractArchiveWithIllegalFilenamesTestCase):
check_only_warnings = False
def test_extract_7zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.7z')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_ar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.ar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_cpio_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.cpio')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_iso_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.iso')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_rar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.rar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
def test_extract_tar_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.tar')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@expectedFailure # not a problem: we use libarchive for these
def test_extract_zip_with_weird_filenames_with_sevenzip(self):
test_file = self.get_test_loc('archive/weird_names/weird_names.zip')
self.check_extract(sevenzip.extract, test_file, expected_warnings=[], expected_suffix='7zip')
@skipIf(not on_windows, 'Run only on Windows because of specific test expectations.')
class TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWinWarning(TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin):
check_only_warnings = True
# The results are not correct but not a problem: we use libarchive for these
test_extract_7zip_with_weird_filenames_with_sevenzip = expectedFailure(
TestExtractArchiveWithIllegalFilenamesWithSevenzipOnWin
.test_extract_7zip_with_weird_filenames_with_sevenzip)
class TestZipSlip(BaseArchiveTestCase):
def test_extract_zipslip_zip_posix(self):
test_file = self.get_test_loc('archive/zipslip/zip-slip.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
if on_windows:
expected = [u'good.txt', u'tmp/evil.txt']
else:
expected = [
'dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/tmp/evil.txt',
'good.txt'
]
check_files(test_dir, expected)
@skipIf(on_windows, 'Fails with WindowsError: [Error 206] The filename or extension is too long:')
def test_extract_zipslip_tar_posix(self):
test_file = self.get_test_loc('archive/zipslip/zip-slip.tar')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
assert [] == result
expected = [
'dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/tmp/evil.txt',
'good.txt'
]
check_files(test_dir, expected)
def test_extract_zipslip_zip_win(self):
test_file = self.get_test_loc('archive/zipslip/zip-slip-win.zip')
test_dir = self.get_temp_dir()
result = archive.extract_zip(test_file, test_dir)
assert [] == result
if on_windows:
expected = [u'Temp/evil.txt', u'good.txt']
else:
expected = [
'dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/Temp/evil.txt',
'good.txt'
]
check_files(test_dir, expected)
@skipIf(on_windows, 'Fails with WindowsError: [Error 206] The filename or extension is too long:')
def test_extract_zipslip_tar_win(self):
test_file = self.get_test_loc('archive/zipslip/zip-slip-win.tar')
test_dir = self.get_temp_dir()
result = archive.extract_tar(test_file, test_dir)
assert [] == result
expected = [
'dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/dotdot/Temp/evil.txt',
'good.txt'
]
check_files(test_dir, expected)
|
en
| 0.729317
|
# # Copyright (c) 2017 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. # ScanCode is a trademark of nexB Inc. # # You may not use this software except in compliance with the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # When you publish or redistribute any data created with ScanCode or any ScanCode # derivative work, you must accompany this data with the following acknowledgment: # # Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content created from # ScanCode should be considered or used as legal advice. Consult an Attorney # for any legal advice. # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. For each archive type --when possible-- we are testing extraction of: - basic, plain archive, no tricks - with trailing data appended to archive - broken, either truncated or with extra junk inserted - with hardlinks and symlinks, either valid or broken when supported - with hardlinks and symlinks loops (aka. tarbomb) when supported - with FIFO, character, sparse and other special files when supported - with relative paths pointing outside of the archive when supported - with absolute paths when supported - with invalid paths or mixed slash paths when supported - with unicode or binary path names - with duplicate names or paths when case is ignored - password-protected when supported # failed because of libmagic bug: http://bugs.gw.com/view.php?id=467 # passing by introducing strict flag for handlers # FWIW there is a related libmagic bug: http://bugs.gw.com/view.php?id=473 # The setup is a tad complex because we want to have a relative dir # to the base dir where we run tests from, ie the scancode-toolkit/ dir # To use relative paths, we use our tmp dir at the root of the code tree # The setup is a tad complex because we want to have a relative dir # to the base dir where we run tests from, ie the scancode-toolkit/ dir # To use relative paths, we use our tmp dir at the root of the code tree This assertion accepts an instance instead of a class for refined exception testing. Run the extraction `test_function` on `test_file` checking that a map of expected paths --> size exist in the extracted target directory. Does not test the presence of all files unless `check_all` is True. When extracting: %(test_file)s With function: %(test_function)r Failed to find expected path: %(exp_loc)s When extracting: %(test_file)s With function: %(test_function)r Failed to assert the correct size %(exp_size)d Got instead: %(res_size)d for expected path: %(exp_loc)s This test file was created with: import tarfile tar = tarfile.open("TarTest.tar.gz", "w:gz") tar.add('a.txt', '../a_parent_folder.txt') tar.add('b.txt', '../../another_folder/b_two_root.txt') tar.add('b.txt', '../folder/subfolder/b_subfolder.txt') tar.close() # these are skipped # this is a link: a -> ../x/a # 'z/y/a', # this is a broken link: x.a -> ../x.a # 'z/y/x.a', # this is a broken link: broken -> ../x/broken # 'z/z/broken', # from http://archive.apache.org/dist/commons/logging/source/commons-logging-1.1.2-src.tar.gz # failed with ReadError('not a bzip2 file',) # Archive created with: # echo "f1content" > f1 # echo "f2content" > f2 # gzip -k f1 # gzip -k -c f2 >> twofiles.gz # even though we do not fail when there is invalid trailing data we # should still fail on invalid leading data # weirdly enough, gzip keeps the original path/name This test file was created with: import tarfile tar = tarfile.open("TarTest.tar.gz", "w:bz") tar.add('a.txt', '../a_parent_folder.txt') tar.add('b.txt', '../../another_folder/b_two_root.txt') tar.add('b.txt', '../folder/subfolder/b_subfolder.txt') tar.close() # the extraction dir is not created with suffix by z7 # a self executable springboot Jar is a zip with a shell script prefix # note: broken zip opens and extracts with 7z with exceptions sometimes # something is extracted in latest 7z # result = os.path.join(test_dir, 'a.txt') # print(test_dir) # assert os.path.exists(result) # fails because of https://github.com/libarchive/libarchive/issues/545 # test archive created on cygwin with: # $ echo "test content" > f1 # $ zip test f1 # $ echo "some junk" >> test.zip # fails because of https://github.com/libarchive/libarchive/issues/545 # The test files for this test and the next one were created with: # from zipfile import ZipFile # f = open('/tmp/a.txt', 'w') # f.write('some data') # f.close() # f = open('/tmp/b.txt', 'w') # f.write('some data') # f.close() # f = ZipFile(os.path.join(self.get_test_loc('archive'), 'relative_parent_folders.zip'), 'w') # f.write('/tmp/a.txt', '../a_parent_folder.txt') # f.write('/tmp/b.txt', '../../another_folder/b_two_root.txt') # f.write('/tmp/b.txt', '../folder/subfolder/b_subfolder.txt') # f.close() # f = ZipFile(os.path.join(self.get_test_loc('archive'), 'high_ancest.zip'), 'w') # f.write('/tmp/a.txt', ('../' * 12) + 'a_parent_folder.txt') # f.write('/tmp/a.txt', ('../' * 12) + ('sub/' * 6) + 'a_parent_folder_in_sub_1.txt') # f.write('/tmp/a.txt', ('../' * 6) + ('sub/' * 12) + 'a_parent_folder_in_sub_2.txt') # f.write('/tmp/a.txt', ('../' * 12) + ('sub/' * 12) + 'a_parent_folder_in_sub_3.txt') # f.close() # somehow Windows fails randomly and only on certain windows machines at Appveyor # so we retest with a skinny expectation # a directory # DST sends a monkey wrench.... so we only test the date, not the time # DST sends a monkey wrench.... so we only test the date, not the time # and we accept some varation in the date ... # Info-ZIP 'zip' displays: # warning: booxw-1202-bin.distribution.zip appears to use # backslashes as path separators (which is the right thing to do) AspectJTest/.classpath AspectJTest/.project AspectJTest/src/META-INF/aop.xml AspectJTest/src/p3/ExpertFlyable.java AspectJTest/src/p3/MakeFlyableAspect.java AspectJTest/src/p3/Flyable.java AspectJTest/src/p3/MakeFlyable.java AspectJTest/src/p3/Main2.java AspectJTest/src/p3/p4/Person.java AspectJTest/src/p2/MyLoggingAspect.java AspectJTest/src/p1/MyService.java AspectJTest/src/p1/Main1.java AspectJTest/bin/META-INF/aop.xml AspectJTest/bin/p3/MakeFlyableAspect.class AspectJTest/bin/p3/ExpertFlyable.class AspectJTest/bin/p3/Flyable.class AspectJTest/bin/p3/Main2.class AspectJTest/bin/p3/MakeFlyable.class AspectJTest/bin/p3/p4/Person.class AspectJTest/bin/p2/MyLoggingAspect.class AspectJTest/bin/p1/Main1.class AspectJTest/bin/p1/MyService.class This test file was created with: import tarfile tar = tarfile.open("TarTest.tar.gz", "w") tar.add('a.txt', '../a_parent_folder.txt') tar.add('b.txt', '../../another_folder/b_two_root.txt') tar.add('b.txt', '../folder/subfolder/b_subfolder.txt') tar.close() # '1-LNKTYPE', links are skipped # special files are skipped too # '2-SYMTYPE: Skipping broken link to: testtar/0-REGTYPE', # '3-CHRTYPE: Skipping special file.', # '6-FIFOTYPE: Skipping special file.' # this is from: # https://hg.python.org/cpython/raw-file/bff88c866886/Lib/test/testtar.tar # DST sends a monkey wrench.... so we only test the date, not the time # inccorrect for now: need this: ['__.SYMDEF', 'release/init.obj'] # 7zip is better, but has a security bug for now # GNU ar works fine otherwise, but there are portability issues # the symdef file is 1.txt with 7z # this behavior is not correct: 7z is better, but has security flaws for now # test file is created by cmd: find ../.. - |cpio -ov >relative.cpio # We should somehow add a "parent" folder to extract relative paths # When the RPM is renamed, we should still be able to find the cpio # this will return an extractor that extracts twice # The setup is a tad complex because we want to have a relative dir # to the base dir where we run tests from, ie the scancode-toolkit/ dir # To use relative paths, we use our tmp dir at the root of the code tree # this will return an extractor that extracts twice # FIXME: this file may not have a real relative path # FIXME: this file may not have a real absolute path # The bug only occurs if the path was given as Unicode # raise an exception but still extracts some Run the 'extract_fun' function using the 'test_file' file as an input and verifies that the 'expected' file has been extracted correctly. # we do not handle these rare extensions (this is a RAR) # archive.extract_rar # Note: this series of test is not easy to grasp but unicode archives on multiple OS # are hard to tests. So we have one test class for each libarchive and sevenzip on # each of the three OSses which makes siz test classes each duplicated with # eventually different expectations on each OS. Then each test class has a subclass # with check_warnings set to True to tests only possible warnings separately. # The code tries to avoid too much duplication, but this is at the cost of readability Return True if the `location` path is likely a POSIX-like path using POSIX path separators (slash or "/")or has no path separator. Return False if the `location` path is likely a Windows-like path using backslash as path separators (e.g. "\"). # windows paths with drive # a path is always POSIX unless it contains ONLY backslahes # which is a rough approximation (it could still be posix) Return a path using the posix path separator given a path that may contain posix or windows separators, converting \ to /. NB: this path will still be valid in the windows explorer (except as a UNC or share name). It will be a valid path everywhere in Python. It will not be valid for windows command line operations. Run the extraction `test_function` on `test_file` checking that the paths listed in the `test_file.excepted` file exist in the extracted target directory. Regen expected file if True. # shortcut if check of warnings are requested # not a problem: we now use libarchive for these # not a problem: we use libarchive for these # not a problem: we use libarchive for these # not a problem: we use libarchive for these # This is a problem # not a problem: we use libarchive for these # not a problem: we use libarchive for these # not a problem: we use libarchive for these # not a problem: we use libarchive for these # not a problem: we use libarchive for these # not a problem: we use libarchive for these # The results are not correct but not a problem: we use libarchive for these
| 1.242739
| 1
|
tests/unit/test_nucleotide_sequences.py
|
samirelanduk/valerius
| 0
|
6626197
|
<filename>tests/unit/test_nucleotide_sequences.py
from unittest import TestCase
from unittest.mock import Mock, patch, PropertyMock, MagicMock
from valerius.sequences import NucleotideSequence
class GcContentTests(TestCase):
def test_can_get_gc_content(self):
s = NucleotideSequence("ABC")
self.assertEqual(s.gc_content, 1 / 3)
def test_can_get_gc_content_no_gc(self):
s = NucleotideSequence("ABD")
self.assertEqual(s.gc_content, 0)
def test_can_get_gc_content_no_sequence(self):
s = NucleotideSequence("")
self.assertEqual(s.gc_content, 0)
|
<filename>tests/unit/test_nucleotide_sequences.py
from unittest import TestCase
from unittest.mock import Mock, patch, PropertyMock, MagicMock
from valerius.sequences import NucleotideSequence
class GcContentTests(TestCase):
def test_can_get_gc_content(self):
s = NucleotideSequence("ABC")
self.assertEqual(s.gc_content, 1 / 3)
def test_can_get_gc_content_no_gc(self):
s = NucleotideSequence("ABD")
self.assertEqual(s.gc_content, 0)
def test_can_get_gc_content_no_sequence(self):
s = NucleotideSequence("")
self.assertEqual(s.gc_content, 0)
|
none
| 1
| 2.72298
| 3
|
|
Python/Fluent_Python/chapter5/section15/mirror.py
|
sunyunxian/test_lib
| 1
|
6626198
|
class LookingGlass:
def __enter__(self):
import sys
self.original_write = sys.stdout.write
sys.stdout.write = self.reverse_write
return 'JABBERWOCKY'
def __exit__(self, exc_type, exc_value, traceback):
import sys
sys.stdout.write = self.original_write
if exc_type is ZeroDivisionError:
print('Please DO NOT divide by zero!')
return True
def reverse_write(self, text):
self.origin_write(text[::-1])
|
class LookingGlass:
def __enter__(self):
import sys
self.original_write = sys.stdout.write
sys.stdout.write = self.reverse_write
return 'JABBERWOCKY'
def __exit__(self, exc_type, exc_value, traceback):
import sys
sys.stdout.write = self.original_write
if exc_type is ZeroDivisionError:
print('Please DO NOT divide by zero!')
return True
def reverse_write(self, text):
self.origin_write(text[::-1])
|
none
| 1
| 3.12262
| 3
|
|
Amplo/GridSearch/_GridSearch.py
|
Amplo-GmbH/AutoML
| 5
|
6626199
|
from abc import abstractmethod
import multiprocessing as mp
import re
from typing import Any, Dict, List, Tuple, Optional, Union
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.metrics import SCORERS
from sklearn.metrics._scorer import _BaseScorer # noqa
__all__ = ['_GridSearch']
class _GridSearch:
def __init__(
self,
model,
params=None,
candidates=250,
timeout=None,
cv=KFold(n_splits=10),
scoring='accuracy',
verbose=0,
):
"""
Abstract base class for grid search.
Purposes:
- Enforces to inheriting classes to implement all abstract methods.
- Defines the hyperparameter search space as it's the same for all
grid search methods.
Parameters
----------
model : Amplo.AutoML.Modeller.ModelType
Model object to optimize.
params : optional
Parameters to optimize. Has no effect for `OptunaGridSearch`.
candidates : int
Limit the number of candidates to search.
timeout : int
Limit the time for optimization.
cv : sklearn.model_selection.BaseCrossValidator
Cross validation object.
scoring : str or sklearn.metrics._scorer._BaseScorer
A valid string for `sklearn.metrics.SCORERS`
verbose : int
Verbose logging.
"""
# Input tests
if hasattr(model, 'is_fitted') and model.is_fitted():
raise AssertionError('Model already fitted')
if isinstance(scoring, str):
self.scoring = SCORERS[scoring]
elif not issubclass(type(scoring), _BaseScorer):
raise ValueError('Parameter `scoring` must originate from `sklearn.metrics.make_scorer()` '
'or must be a valid string for `sklearn.metrics.SCORERS`.')
# Set class attributes
self.model = model
self.params = params
self.nTrials = candidates
self.timeout = timeout
self.cv = cv
self.scoring = SCORERS[scoring] if isinstance(scoring, str) else scoring
self.verbose = verbose
self.x, self.y = None, None
self.binary = True
self.samples = None
# Model specific settings
if type(self.model).__name__ == 'LinearRegression':
self.nTrials = 1
@property
def _hyper_parameter_values(self) \
-> Dict[str, Tuple[str, List[Union[str, float]], Optional[int]]]:
"""Get model specific hyper parameter values, indicating predefined
search areas to optimize.
Notes
-----
Each item of the output dictionary consists of:
- parameter name (str), and
- parameter specifications (tuple)
- parameter type (str)
- parameter arguments (list)
- number of distinct values (int, optional) [only for exhaustive grid search]
Parameter types include:
- 'categorical': categorical values
- 'int': discretized uniform value space
- 'logint': discretized logarithmic uniform value space
- 'uniform': uniform value space
- 'loguniform': logarithmic uniform value space
Parameter arguments are:
- [categorical]: a list of all options
- [int, logint, uniform, loguniform]: a tuple with min and max value
**Special case (conditionals):**
In some cases, one wants to grid-search certain parameters only
if another parameter condition is present. Such conditions are
specified via the dedicated key 'CONDITIONALS'.
"""
# Extract model name & type
model_name = type(self.model).__name__
model_type = re.split(r'Regressor|Classifier', model_name)[0]
# Determine whether it's classification or regression
is_regression = bool(re.match(r'.*(Regression|Regressor|SVR)', model_name))
is_classification = bool(re.match(r'.*(Classification|Classifier|SVC)', model_name))
assert is_regression or is_classification,\
'Could not determine mode (regression or classification)'
# Define min-max-function
def minimax(min_, value, max_):
return max(min_, min(value, max_))
# Find matching model and return its parameter values
if model_name == 'LinearRegression':
return {}
elif model_name == 'Lasso' or 'Ridge' in model_name:
return dict(
alpha=('uniform', [0, 10], 25),
)
elif model_name in ('SVR', 'SVC'):
return dict(
gamma=('categorical', ['scale', 'auto', 0.001, 0.01, 0.1, 0.5, 1]),
C=('uniform', [0.001, 10], 25),
)
elif model_type == 'KNeighbors':
return dict(
n_neighbors=('int', [5, minimax(5, self.samples // 10, 50)], 5),
weights=('categorical', ['uniform', 'distance']),
leaf_size=('int', [1, minimax(1, self.samples // 10, 100)], 5),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
elif model_type == 'MLP':
raise NotImplementedError('MLP is not supported')
elif model_type == 'SGD':
params = dict(
loss=('categorical', ['squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']),
penalty=('categorical', ['l2', 'l1', 'elasticnet']),
alpha=('uniform', [0, 10], 5),
max_iter=('int', [250, 1000], 3),
)
if is_classification:
params.update(loss=('categorical', ['hinge', 'log', 'modified_huber', 'squared_hinge']))
return params
elif model_type == 'DecisionTree':
params = dict(
criterion=('categorical', ['squared_error', 'friedman_mse', 'absolute_error', 'poisson']),
max_depth=('int', [3, minimax(3, int(np.log2(self.samples)), 25)], 4),
)
if is_classification:
params.update(criterion=('categorical', ['gini', 'entropy']))
return params
elif model_type == 'AdaBoost':
params = dict(
n_estimators=('int', [25, 250], 5),
loss=('categorical', ['linear', 'square', 'exponential']),
learning_rate=('loguniform', [0.001, 1], 10),
)
if is_classification:
params.pop('loss', None)
return params
elif model_type == 'Bagging':
params = dict(
n_estimators=('int', [10, 250], 4),
max_samples=('uniform', [0.5, 1.0], 4),
max_features=('uniform', [0.5, 1.0], 4),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
return params
elif model_type == 'CatBoost':
params = dict(
n_estimators=('int', [500, 2000], 5),
verbose=('categorical', [0]),
od_pval=('categorical', [1e-5]),
loss_function=('categorical', ['MAE', 'RMSE']),
learning_rate=('loguniform', [0.001, 0.5], 5),
l2_leaf_reg=('uniform', [0, 10], 5),
depth=('int', [3, minimax(3, int(np.log2(self.samples)), 10)], 4),
min_data_in_leaf=('int', [1, minimax(1, self.samples // 10, 1000)], 5),
grow_policy=('categorical', ['SymmetricTree', 'Depthwise', 'Lossguide']),
)
if is_classification:
params.update(loss_function=('categorical', ['Logloss' if self.binary else 'MultiClass']))
return params
elif model_type == 'GradientBoosting':
params = dict(
loss=('categorical', ['ls', 'lad', 'huber']),
learning_rate=('loguniform', [0.001, 0.5], 10),
max_depth=('int', [3, minimax(3, int(np.log2(self.samples)), 10)], 4),
n_estimators=('int', [100, 1000], 4),
min_samples_leaf=('int', [1, minimax(1, int(self.samples / 10), 1000)], 3),
max_features=('uniform', [0.5, 1], 3),
subsample=('uniform', [0.5, 1], 3)
)
if is_classification:
params.update(loss=('categorical', ['deviance', 'exponential']))
return params
elif model_type == 'HistGradientBoosting':
params = dict(
loss=('categorical', ['least_squares', 'least_absolute_deviation']),
learning_rate=('loguniform', [0.001, 0.5], 10),
max_iter=('int', [100, 250], 4),
max_leaf_nodes=('int', [30, 150], 4),
max_depth=('int', [3, minimax(3, int(np.log2(self.samples)), 10)], 4),
min_samples_leaf=('int', [1, minimax(1, int(self.samples / 10), 1000)], 4),
l2_regularization=('uniform', [0, 10], 5),
max_bins=('int', [100, 255], 4),
early_stopping=('categorical', [True])
)
if is_classification:
params.pop('loss', None)
return params
elif model_type == 'RandomForest':
params = dict(
n_estimators=('int', [50, 1000], 5),
criterion=('categorical', ['squared_error', 'absolute_error']),
max_depth=('int', [3, minimax(3, int(np.log2(self.samples)), 15)], 4),
max_features=('categorical', ['auto', 'sqrt']),
min_samples_split=('int', [2, 50], 4),
min_samples_leaf=('int', [1, minimax(1, self.samples // 10, 1000)], 5),
bootstrap=('categorical', [True, False]),
)
if is_classification:
params.update(criterion=('categorical', ['gini', 'entropy']))
return params
elif model_type == 'XGB':
params = dict(
objective=('categorical', ['reg:squarederror']),
eval_metric=('categorical', ['rmse']),
booster=('categorical', ['gbtree', 'gblinear', 'dart']),
alpha=('loguniform', [1e-8, 1], 10),
learning_rate=('loguniform', [0.001, 0.5], 5),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
params['lambda'] = ('loguniform', [1e-8, 1], 10)
if is_classification:
params.update(
objective=('categorical', ['multi:softprob']),
eval_metric=('categorical', ['logloss']),
)
params['CONDITIONALS'] = dict(
booster=[
('gbtree', dict(
max_depth=('int', [1, minimax(1, int(np.log2(self.samples)), 10)], 5),
eta=('loguniform', [1e-8, 1], 5),
gamma=('loguniform', [1e-8, 1], 5),
grow_policy=('categorical', ['depthwise', 'lossguide']),
)),
('dart', dict(
max_depth=('int', [1, minimax(1, int(np.log2(self.samples)), 10)], 5),
eta=('loguniform', [1e-8, 1], 5),
gamma=('loguniform', [1e-8, 1], 5),
grow_policy=('categorical', ['depthwise', 'lossguide']),
sample_type=('categorical', ['uniform', 'weighted']),
normalize_type=('categorical', ['tree', 'forest']),
rate_drop=('loguniform', [1e-8, 1], 5),
skip_drop=('loguniform', [1e-8, 1], 5),
)),
],
)
return params
elif model_type == 'LGBM':
if is_regression:
return dict(
num_leaves=('int', [10, 150], 5),
min_data_in_leaf=('int', [1, minimax(1, self.samples // 10, 1000)], 0),
min_sum_hessian_in_leaf=('uniform', [0.001, 0.5], 0),
colsample_bytree=('uniform', [0, 1], 5),
reg_alpha=('uniform', [0, 1], 5),
reg_lambda=('uniform', [0, 1], 5),
verbosity=('categorical', [-1]),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
else: # is_classification
return dict(
objective=('categorical', ['binary' if self.binary else 'multiclass']),
metric=('categorical',
['binary_error', 'auc', 'average_precision', 'binary_logloss']
if self.binary else ['multi_error', 'multi_logloss', 'auc_mu']),
boosting_type=('categorical', ['gbdt']),
lambda_l1=('loguniform', [1e-8, 10], 4),
lambda_l2=('loguniform', [1e-8, 10], 4),
num_leaves=('int', [10, 5000], 4),
max_depth=('int', [5, 20], 4),
min_data_in_leaf=('int', [1, minimax(1, self.samples // 10, 1000)], 0),
min_gain_to_split=('uniform', [0, 5], 0),
feature_fraction=('uniform', [0.4, 1], 0),
bagging_fraction=('uniform', [0.4, 1], 0),
bagging_freq=('int', [1, 7], 0),
verbosity=('categorical', [-1]),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
# Raise error if no match was found
raise NotImplementedError('Hyper parameter tuning not implemented for {}'.format(model_name))
def get_parameter_min_max(self) -> pd.DataFrame:
"""
Get all min and max values from model-specific set of parameters.
Omit categorical parameters as min and max values are ambiguous.
Returns
-------
param_min_max : pd.DataFrame
Min and max values.
"""
# Get all model's parameters
param_values = self._hyper_parameter_values
# Pop conditionals and integrate all into `param_values`
conditionals = param_values.pop('CONDITIONALS', {})
for check_p_name, check_p_criteria in conditionals.items():
for matching_value, additional_params in check_p_criteria:
for name, value in additional_params.items():
param_values[name] = value
# Filter for min and max in non-categorical parameters
param_min_max = {}
for p_name, value in param_values.items():
p_type = value[0]
p_args = value[1]
if p_type in ('int', 'logint', 'uniform', 'loguniform'):
# Sanity check
assert len(p_args) == 2, 'A {} should have a min and a max value'.format(p_type)
# Add item to dict
add_item = {p_name: {'min': p_args[0], 'max': p_args[1]}}
param_min_max.update(add_item)
# Combine all values to pd.DataFrame
param_min_max = pd.DataFrame(param_min_max).T
return param_min_max
@abstractmethod
def _get_hyper_params(self, *args, **kwargs) -> Dict[str, Any]:
"""
Get grid search specific distributions or samples.
This function translates `self._hyper_parameter_values` to the expected
format for the given grid search.
Parameters
----------
args : optional
Grid search specific arguments.
kwargs : optional
Grid search specific keyword arguments.
Returns
-------
Grid search parameters
"""
pass
@abstractmethod
def fit(self, x, y) -> pd.DataFrame:
"""
Run fit with model-specific set of parameters.
Parameters
----------
x : array
Data features.
y : array
Data labels
Returns
-------
results : pd.DataFrame
Results of the grid search.
"""
pass
|
from abc import abstractmethod
import multiprocessing as mp
import re
from typing import Any, Dict, List, Tuple, Optional, Union
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.metrics import SCORERS
from sklearn.metrics._scorer import _BaseScorer # noqa
__all__ = ['_GridSearch']
class _GridSearch:
def __init__(
self,
model,
params=None,
candidates=250,
timeout=None,
cv=KFold(n_splits=10),
scoring='accuracy',
verbose=0,
):
"""
Abstract base class for grid search.
Purposes:
- Enforces to inheriting classes to implement all abstract methods.
- Defines the hyperparameter search space as it's the same for all
grid search methods.
Parameters
----------
model : Amplo.AutoML.Modeller.ModelType
Model object to optimize.
params : optional
Parameters to optimize. Has no effect for `OptunaGridSearch`.
candidates : int
Limit the number of candidates to search.
timeout : int
Limit the time for optimization.
cv : sklearn.model_selection.BaseCrossValidator
Cross validation object.
scoring : str or sklearn.metrics._scorer._BaseScorer
A valid string for `sklearn.metrics.SCORERS`
verbose : int
Verbose logging.
"""
# Input tests
if hasattr(model, 'is_fitted') and model.is_fitted():
raise AssertionError('Model already fitted')
if isinstance(scoring, str):
self.scoring = SCORERS[scoring]
elif not issubclass(type(scoring), _BaseScorer):
raise ValueError('Parameter `scoring` must originate from `sklearn.metrics.make_scorer()` '
'or must be a valid string for `sklearn.metrics.SCORERS`.')
# Set class attributes
self.model = model
self.params = params
self.nTrials = candidates
self.timeout = timeout
self.cv = cv
self.scoring = SCORERS[scoring] if isinstance(scoring, str) else scoring
self.verbose = verbose
self.x, self.y = None, None
self.binary = True
self.samples = None
# Model specific settings
if type(self.model).__name__ == 'LinearRegression':
self.nTrials = 1
@property
def _hyper_parameter_values(self) \
-> Dict[str, Tuple[str, List[Union[str, float]], Optional[int]]]:
"""Get model specific hyper parameter values, indicating predefined
search areas to optimize.
Notes
-----
Each item of the output dictionary consists of:
- parameter name (str), and
- parameter specifications (tuple)
- parameter type (str)
- parameter arguments (list)
- number of distinct values (int, optional) [only for exhaustive grid search]
Parameter types include:
- 'categorical': categorical values
- 'int': discretized uniform value space
- 'logint': discretized logarithmic uniform value space
- 'uniform': uniform value space
- 'loguniform': logarithmic uniform value space
Parameter arguments are:
- [categorical]: a list of all options
- [int, logint, uniform, loguniform]: a tuple with min and max value
**Special case (conditionals):**
In some cases, one wants to grid-search certain parameters only
if another parameter condition is present. Such conditions are
specified via the dedicated key 'CONDITIONALS'.
"""
# Extract model name & type
model_name = type(self.model).__name__
model_type = re.split(r'Regressor|Classifier', model_name)[0]
# Determine whether it's classification or regression
is_regression = bool(re.match(r'.*(Regression|Regressor|SVR)', model_name))
is_classification = bool(re.match(r'.*(Classification|Classifier|SVC)', model_name))
assert is_regression or is_classification,\
'Could not determine mode (regression or classification)'
# Define min-max-function
def minimax(min_, value, max_):
return max(min_, min(value, max_))
# Find matching model and return its parameter values
if model_name == 'LinearRegression':
return {}
elif model_name == 'Lasso' or 'Ridge' in model_name:
return dict(
alpha=('uniform', [0, 10], 25),
)
elif model_name in ('SVR', 'SVC'):
return dict(
gamma=('categorical', ['scale', 'auto', 0.001, 0.01, 0.1, 0.5, 1]),
C=('uniform', [0.001, 10], 25),
)
elif model_type == 'KNeighbors':
return dict(
n_neighbors=('int', [5, minimax(5, self.samples // 10, 50)], 5),
weights=('categorical', ['uniform', 'distance']),
leaf_size=('int', [1, minimax(1, self.samples // 10, 100)], 5),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
elif model_type == 'MLP':
raise NotImplementedError('MLP is not supported')
elif model_type == 'SGD':
params = dict(
loss=('categorical', ['squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']),
penalty=('categorical', ['l2', 'l1', 'elasticnet']),
alpha=('uniform', [0, 10], 5),
max_iter=('int', [250, 1000], 3),
)
if is_classification:
params.update(loss=('categorical', ['hinge', 'log', 'modified_huber', 'squared_hinge']))
return params
elif model_type == 'DecisionTree':
params = dict(
criterion=('categorical', ['squared_error', 'friedman_mse', 'absolute_error', 'poisson']),
max_depth=('int', [3, minimax(3, int(np.log2(self.samples)), 25)], 4),
)
if is_classification:
params.update(criterion=('categorical', ['gini', 'entropy']))
return params
elif model_type == 'AdaBoost':
params = dict(
n_estimators=('int', [25, 250], 5),
loss=('categorical', ['linear', 'square', 'exponential']),
learning_rate=('loguniform', [0.001, 1], 10),
)
if is_classification:
params.pop('loss', None)
return params
elif model_type == 'Bagging':
params = dict(
n_estimators=('int', [10, 250], 4),
max_samples=('uniform', [0.5, 1.0], 4),
max_features=('uniform', [0.5, 1.0], 4),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
return params
elif model_type == 'CatBoost':
params = dict(
n_estimators=('int', [500, 2000], 5),
verbose=('categorical', [0]),
od_pval=('categorical', [1e-5]),
loss_function=('categorical', ['MAE', 'RMSE']),
learning_rate=('loguniform', [0.001, 0.5], 5),
l2_leaf_reg=('uniform', [0, 10], 5),
depth=('int', [3, minimax(3, int(np.log2(self.samples)), 10)], 4),
min_data_in_leaf=('int', [1, minimax(1, self.samples // 10, 1000)], 5),
grow_policy=('categorical', ['SymmetricTree', 'Depthwise', 'Lossguide']),
)
if is_classification:
params.update(loss_function=('categorical', ['Logloss' if self.binary else 'MultiClass']))
return params
elif model_type == 'GradientBoosting':
params = dict(
loss=('categorical', ['ls', 'lad', 'huber']),
learning_rate=('loguniform', [0.001, 0.5], 10),
max_depth=('int', [3, minimax(3, int(np.log2(self.samples)), 10)], 4),
n_estimators=('int', [100, 1000], 4),
min_samples_leaf=('int', [1, minimax(1, int(self.samples / 10), 1000)], 3),
max_features=('uniform', [0.5, 1], 3),
subsample=('uniform', [0.5, 1], 3)
)
if is_classification:
params.update(loss=('categorical', ['deviance', 'exponential']))
return params
elif model_type == 'HistGradientBoosting':
params = dict(
loss=('categorical', ['least_squares', 'least_absolute_deviation']),
learning_rate=('loguniform', [0.001, 0.5], 10),
max_iter=('int', [100, 250], 4),
max_leaf_nodes=('int', [30, 150], 4),
max_depth=('int', [3, minimax(3, int(np.log2(self.samples)), 10)], 4),
min_samples_leaf=('int', [1, minimax(1, int(self.samples / 10), 1000)], 4),
l2_regularization=('uniform', [0, 10], 5),
max_bins=('int', [100, 255], 4),
early_stopping=('categorical', [True])
)
if is_classification:
params.pop('loss', None)
return params
elif model_type == 'RandomForest':
params = dict(
n_estimators=('int', [50, 1000], 5),
criterion=('categorical', ['squared_error', 'absolute_error']),
max_depth=('int', [3, minimax(3, int(np.log2(self.samples)), 15)], 4),
max_features=('categorical', ['auto', 'sqrt']),
min_samples_split=('int', [2, 50], 4),
min_samples_leaf=('int', [1, minimax(1, self.samples // 10, 1000)], 5),
bootstrap=('categorical', [True, False]),
)
if is_classification:
params.update(criterion=('categorical', ['gini', 'entropy']))
return params
elif model_type == 'XGB':
params = dict(
objective=('categorical', ['reg:squarederror']),
eval_metric=('categorical', ['rmse']),
booster=('categorical', ['gbtree', 'gblinear', 'dart']),
alpha=('loguniform', [1e-8, 1], 10),
learning_rate=('loguniform', [0.001, 0.5], 5),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
params['lambda'] = ('loguniform', [1e-8, 1], 10)
if is_classification:
params.update(
objective=('categorical', ['multi:softprob']),
eval_metric=('categorical', ['logloss']),
)
params['CONDITIONALS'] = dict(
booster=[
('gbtree', dict(
max_depth=('int', [1, minimax(1, int(np.log2(self.samples)), 10)], 5),
eta=('loguniform', [1e-8, 1], 5),
gamma=('loguniform', [1e-8, 1], 5),
grow_policy=('categorical', ['depthwise', 'lossguide']),
)),
('dart', dict(
max_depth=('int', [1, minimax(1, int(np.log2(self.samples)), 10)], 5),
eta=('loguniform', [1e-8, 1], 5),
gamma=('loguniform', [1e-8, 1], 5),
grow_policy=('categorical', ['depthwise', 'lossguide']),
sample_type=('categorical', ['uniform', 'weighted']),
normalize_type=('categorical', ['tree', 'forest']),
rate_drop=('loguniform', [1e-8, 1], 5),
skip_drop=('loguniform', [1e-8, 1], 5),
)),
],
)
return params
elif model_type == 'LGBM':
if is_regression:
return dict(
num_leaves=('int', [10, 150], 5),
min_data_in_leaf=('int', [1, minimax(1, self.samples // 10, 1000)], 0),
min_sum_hessian_in_leaf=('uniform', [0.001, 0.5], 0),
colsample_bytree=('uniform', [0, 1], 5),
reg_alpha=('uniform', [0, 1], 5),
reg_lambda=('uniform', [0, 1], 5),
verbosity=('categorical', [-1]),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
else: # is_classification
return dict(
objective=('categorical', ['binary' if self.binary else 'multiclass']),
metric=('categorical',
['binary_error', 'auc', 'average_precision', 'binary_logloss']
if self.binary else ['multi_error', 'multi_logloss', 'auc_mu']),
boosting_type=('categorical', ['gbdt']),
lambda_l1=('loguniform', [1e-8, 10], 4),
lambda_l2=('loguniform', [1e-8, 10], 4),
num_leaves=('int', [10, 5000], 4),
max_depth=('int', [5, 20], 4),
min_data_in_leaf=('int', [1, minimax(1, self.samples // 10, 1000)], 0),
min_gain_to_split=('uniform', [0, 5], 0),
feature_fraction=('uniform', [0.4, 1], 0),
bagging_fraction=('uniform', [0.4, 1], 0),
bagging_freq=('int', [1, 7], 0),
verbosity=('categorical', [-1]),
n_jobs=('categorical', [mp.cpu_count() - 1]),
)
# Raise error if no match was found
raise NotImplementedError('Hyper parameter tuning not implemented for {}'.format(model_name))
def get_parameter_min_max(self) -> pd.DataFrame:
"""
Get all min and max values from model-specific set of parameters.
Omit categorical parameters as min and max values are ambiguous.
Returns
-------
param_min_max : pd.DataFrame
Min and max values.
"""
# Get all model's parameters
param_values = self._hyper_parameter_values
# Pop conditionals and integrate all into `param_values`
conditionals = param_values.pop('CONDITIONALS', {})
for check_p_name, check_p_criteria in conditionals.items():
for matching_value, additional_params in check_p_criteria:
for name, value in additional_params.items():
param_values[name] = value
# Filter for min and max in non-categorical parameters
param_min_max = {}
for p_name, value in param_values.items():
p_type = value[0]
p_args = value[1]
if p_type in ('int', 'logint', 'uniform', 'loguniform'):
# Sanity check
assert len(p_args) == 2, 'A {} should have a min and a max value'.format(p_type)
# Add item to dict
add_item = {p_name: {'min': p_args[0], 'max': p_args[1]}}
param_min_max.update(add_item)
# Combine all values to pd.DataFrame
param_min_max = pd.DataFrame(param_min_max).T
return param_min_max
@abstractmethod
def _get_hyper_params(self, *args, **kwargs) -> Dict[str, Any]:
"""
Get grid search specific distributions or samples.
This function translates `self._hyper_parameter_values` to the expected
format for the given grid search.
Parameters
----------
args : optional
Grid search specific arguments.
kwargs : optional
Grid search specific keyword arguments.
Returns
-------
Grid search parameters
"""
pass
@abstractmethod
def fit(self, x, y) -> pd.DataFrame:
"""
Run fit with model-specific set of parameters.
Parameters
----------
x : array
Data features.
y : array
Data labels
Returns
-------
results : pd.DataFrame
Results of the grid search.
"""
pass
|
en
| 0.422549
|
# noqa Abstract base class for grid search. Purposes: - Enforces to inheriting classes to implement all abstract methods. - Defines the hyperparameter search space as it's the same for all grid search methods. Parameters ---------- model : Amplo.AutoML.Modeller.ModelType Model object to optimize. params : optional Parameters to optimize. Has no effect for `OptunaGridSearch`. candidates : int Limit the number of candidates to search. timeout : int Limit the time for optimization. cv : sklearn.model_selection.BaseCrossValidator Cross validation object. scoring : str or sklearn.metrics._scorer._BaseScorer A valid string for `sklearn.metrics.SCORERS` verbose : int Verbose logging. # Input tests # Set class attributes # Model specific settings Get model specific hyper parameter values, indicating predefined search areas to optimize. Notes ----- Each item of the output dictionary consists of: - parameter name (str), and - parameter specifications (tuple) - parameter type (str) - parameter arguments (list) - number of distinct values (int, optional) [only for exhaustive grid search] Parameter types include: - 'categorical': categorical values - 'int': discretized uniform value space - 'logint': discretized logarithmic uniform value space - 'uniform': uniform value space - 'loguniform': logarithmic uniform value space Parameter arguments are: - [categorical]: a list of all options - [int, logint, uniform, loguniform]: a tuple with min and max value **Special case (conditionals):** In some cases, one wants to grid-search certain parameters only if another parameter condition is present. Such conditions are specified via the dedicated key 'CONDITIONALS'. # Extract model name & type # Determine whether it's classification or regression # Define min-max-function # Find matching model and return its parameter values # is_classification # Raise error if no match was found Get all min and max values from model-specific set of parameters. Omit categorical parameters as min and max values are ambiguous. Returns ------- param_min_max : pd.DataFrame Min and max values. # Get all model's parameters # Pop conditionals and integrate all into `param_values` # Filter for min and max in non-categorical parameters # Sanity check # Add item to dict # Combine all values to pd.DataFrame Get grid search specific distributions or samples. This function translates `self._hyper_parameter_values` to the expected format for the given grid search. Parameters ---------- args : optional Grid search specific arguments. kwargs : optional Grid search specific keyword arguments. Returns ------- Grid search parameters Run fit with model-specific set of parameters. Parameters ---------- x : array Data features. y : array Data labels Returns ------- results : pd.DataFrame Results of the grid search.
| 2.618208
| 3
|
modules/Dataset.py
|
sfwyly/Loader
| 1
|
6626200
|
"""
@Author: sfwyly
@Date: 2021/3/28
@Description: process Dataset
"""
import numpy as np
import pathlib
import json
import platform
"""
Dataset: provide more methods for operate Dataset
"""
class Dataset(object):
"""
is_split: decide to whether split dataset
image_size: image size
root_path: data root path (including train or test)
"""
def __init__(self, is_train = True, **kwargs):
super(Dataset,self).__init__()
self.trains = []
self.labels = []
self.isTrain = is_train
self.category_to_label = {} # category2label
self.labelNums = 0 # labels numbers
self.all_file_path = [] # storage two-level category
self.balanced = kwargs['balanced']
if(self.balanced):
self.balanced_num = kwargs['balanced_num']
self.root = kwargs['root_path']
print("balanced ",self.balanced)
sys = platform.system()
if(sys=="Windows"):
self.separator = "\\"
else:
self.separator = "/"
def getAllFile(self):
self.all_file_path.clear()
file_root = pathlib.Path(self.root)
self.all_file_path = list(file_root.glob("*/*"))#+list(file_root.glob("*/*.jpg"))
"""
catagory_str : catagory path string
enable to override
"""
def processLabelStr(self,category_str) -> int:
"""
:param category_str:
:return: int
"""
# TODO
return len(self.category_to_label)
def processLabelCategories(self,category_str):
# here adding custom method to process catagory_to_label map
label = self.processLabelStr(category_str)
if(category_str not in self.category_to_label):
self.category_to_label[category_str.strip()] = label
self.labelNums += 1
else:
label = self.category_to_label[category_str.strip()]
return label
def __getitem__(self,item):
"""
:param item:
:return: only return path (train,label)
"""
return self.trains[item],self.labels[item]
def __len__(self):
return len(self.trains)
def shuffle(self):
trainX = list(zip(self.trains, self.labels))
np.random.shuffle(trainX)
self.trains, self.labels = zip(*trainX)
def loader(self, shuffle=True):
self.trains.clear()
self.labels.clear()
if(len(self.all_file_path) <=0):
if(self.balanced):
self.balanceSample(self.balanced_num)
else:
self.getAllFile()
for file_path in self.all_file_path:
r = str(file_path).rfind(self.separator)
l = str(file_path)[:r].rfind(self.separator)
# label_content str(file_path)[l+1:r]
self.labels.append(self.processLabelCategories(str(file_path)[l+1:r]))
self.trains.append(str(file_path))
if (shuffle):
self.shuffle()
return self.trains,self.labels
def balanceSample(self, num=200):
"""
:param num: sample num
:return:
"""
# balance sample
self.all_file_path.clear()
file_root = pathlib.Path(self.root).glob("*")
for cate in file_root:
img_list = list(cate.glob("*"))
if (len(img_list) <= 0):
continue
if (len(img_list) >= num):
self.all_file_path.extend(np.random.choice(img_list, num))
else:
self.all_file_path.extend(img_list)
def setData(self,trains,labels):
self.trains = trains[:]
self.labels = labels[:]
def setAllFilePaths(self,all_file_paths):
self.all_file_path = all_file_paths[:]
@staticmethod
def splitTrainAndTest(root_path,split_ratio = 0.8,save_path = "/usr/",isWrite = False):
"""
split Train and Val
:return:
"""
train_file_paths = []
val_file_paths = []
root = pathlib.Path(root_path)
cate_list = root.glob("*")
# recurrent each a cate
for cate in cate_list:
file_list = list(map(str,list(cate.glob("*"))))
# no video sequence
np.random.shuffle(file_list)
length = len(file_list)
train_file_paths.extend(file_list[:int(split_ratio*length)])
val_file_paths.extend(file_list[int(split_ratio*length):])
# write file
if(isWrite):
dic = {"train_file_paths":train_file_paths,"val_file_paths":val_file_paths}
with open(save_path,"w") as file:
json.dump(dic,file)
return train_file_paths,val_file_paths
|
"""
@Author: sfwyly
@Date: 2021/3/28
@Description: process Dataset
"""
import numpy as np
import pathlib
import json
import platform
"""
Dataset: provide more methods for operate Dataset
"""
class Dataset(object):
"""
is_split: decide to whether split dataset
image_size: image size
root_path: data root path (including train or test)
"""
def __init__(self, is_train = True, **kwargs):
super(Dataset,self).__init__()
self.trains = []
self.labels = []
self.isTrain = is_train
self.category_to_label = {} # category2label
self.labelNums = 0 # labels numbers
self.all_file_path = [] # storage two-level category
self.balanced = kwargs['balanced']
if(self.balanced):
self.balanced_num = kwargs['balanced_num']
self.root = kwargs['root_path']
print("balanced ",self.balanced)
sys = platform.system()
if(sys=="Windows"):
self.separator = "\\"
else:
self.separator = "/"
def getAllFile(self):
self.all_file_path.clear()
file_root = pathlib.Path(self.root)
self.all_file_path = list(file_root.glob("*/*"))#+list(file_root.glob("*/*.jpg"))
"""
catagory_str : catagory path string
enable to override
"""
def processLabelStr(self,category_str) -> int:
"""
:param category_str:
:return: int
"""
# TODO
return len(self.category_to_label)
def processLabelCategories(self,category_str):
# here adding custom method to process catagory_to_label map
label = self.processLabelStr(category_str)
if(category_str not in self.category_to_label):
self.category_to_label[category_str.strip()] = label
self.labelNums += 1
else:
label = self.category_to_label[category_str.strip()]
return label
def __getitem__(self,item):
"""
:param item:
:return: only return path (train,label)
"""
return self.trains[item],self.labels[item]
def __len__(self):
return len(self.trains)
def shuffle(self):
trainX = list(zip(self.trains, self.labels))
np.random.shuffle(trainX)
self.trains, self.labels = zip(*trainX)
def loader(self, shuffle=True):
self.trains.clear()
self.labels.clear()
if(len(self.all_file_path) <=0):
if(self.balanced):
self.balanceSample(self.balanced_num)
else:
self.getAllFile()
for file_path in self.all_file_path:
r = str(file_path).rfind(self.separator)
l = str(file_path)[:r].rfind(self.separator)
# label_content str(file_path)[l+1:r]
self.labels.append(self.processLabelCategories(str(file_path)[l+1:r]))
self.trains.append(str(file_path))
if (shuffle):
self.shuffle()
return self.trains,self.labels
def balanceSample(self, num=200):
"""
:param num: sample num
:return:
"""
# balance sample
self.all_file_path.clear()
file_root = pathlib.Path(self.root).glob("*")
for cate in file_root:
img_list = list(cate.glob("*"))
if (len(img_list) <= 0):
continue
if (len(img_list) >= num):
self.all_file_path.extend(np.random.choice(img_list, num))
else:
self.all_file_path.extend(img_list)
def setData(self,trains,labels):
self.trains = trains[:]
self.labels = labels[:]
def setAllFilePaths(self,all_file_paths):
self.all_file_path = all_file_paths[:]
@staticmethod
def splitTrainAndTest(root_path,split_ratio = 0.8,save_path = "/usr/",isWrite = False):
"""
split Train and Val
:return:
"""
train_file_paths = []
val_file_paths = []
root = pathlib.Path(root_path)
cate_list = root.glob("*")
# recurrent each a cate
for cate in cate_list:
file_list = list(map(str,list(cate.glob("*"))))
# no video sequence
np.random.shuffle(file_list)
length = len(file_list)
train_file_paths.extend(file_list[:int(split_ratio*length)])
val_file_paths.extend(file_list[int(split_ratio*length):])
# write file
if(isWrite):
dic = {"train_file_paths":train_file_paths,"val_file_paths":val_file_paths}
with open(save_path,"w") as file:
json.dump(dic,file)
return train_file_paths,val_file_paths
|
en
| 0.565342
|
@Author: sfwyly
@Date: 2021/3/28
@Description: process Dataset Dataset: provide more methods for operate Dataset is_split: decide to whether split dataset
image_size: image size
root_path: data root path (including train or test) # category2label # labels numbers # storage two-level category #+list(file_root.glob("*/*.jpg")) catagory_str : catagory path string
enable to override :param category_str:
:return: int # TODO # here adding custom method to process catagory_to_label map :param item:
:return: only return path (train,label) # label_content str(file_path)[l+1:r] :param num: sample num
:return: # balance sample split Train and Val
:return: # recurrent each a cate # no video sequence # write file
| 2.510698
| 3
|
tests/integration/test_dynamodb.py
|
ninhkd/localstack
| 0
|
6626201
|
<filename>tests/integration/test_dynamodb.py
# -*- coding: utf-8 -*-
import unittest
import json
from localstack.services.dynamodbstreams.dynamodbstreams_api import get_kinesis_stream_name
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_models import KinesisStream
from localstack.utils.aws.aws_stack import get_environment
from localstack.utils.common import json_safe, short_uid
PARTITION_KEY = 'id'
TEST_DDB_TABLE_NAME = 'test-ddb-table-1'
TEST_DDB_TABLE_NAME_2 = 'test-ddb-table-2'
TEST_DDB_TABLE_NAME_3 = 'test-ddb-table-3'
TEST_DDB_TABLE_NAME_4 = 'test-ddb-table-4'
TEST_DDB_TAGS = [
{
'Key': 'Name',
'Value': 'test-table'
},
{
'Key': 'TestKey',
'Value': 'true'
}
]
class DynamoDBIntegrationTest (unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dynamodb = aws_stack.connect_to_resource('dynamodb')
def test_non_ascii_chars(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME)
# write some items containing non-ASCII characters
items = {
'id1': {PARTITION_KEY: 'id1', 'data': 'foobar123 ✓'},
'id2': {PARTITION_KEY: 'id2', 'data': 'foobar123 £'},
'id3': {PARTITION_KEY: 'id3', 'data': 'foobar123 ¢'}
}
for k, item in items.items():
table.put_item(Item=item)
for item_id in items.keys():
item = table.get_item(Key={PARTITION_KEY: item_id})['Item']
# need to fix up the JSON and convert str to unicode for Python 2
item1 = json_safe(item)
item2 = json_safe(items[item_id])
self.assertEqual(item1, item2)
# clean up
delete_table(TEST_DDB_TABLE_NAME)
def test_large_data_download(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_2, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_2)
# Create a large amount of items
num_items = 20
for i in range(0, num_items):
item = {PARTITION_KEY: 'id%s' % i, 'data1': 'foobar123 ' * 1000}
table.put_item(Item=item)
# Retrieve the items. The data will be transmitted to the client with chunked transfer encoding
result = table.scan(TableName=TEST_DDB_TABLE_NAME_2)
self.assertEqual(len(result['Items']), num_items)
# clean up
delete_table(TEST_DDB_TABLE_NAME_2)
def test_time_to_live(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_3, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_3)
# Insert some items to the table
items = {
'id1': {PARTITION_KEY: 'id1', 'data': 'IT IS'},
'id2': {PARTITION_KEY: 'id2', 'data': 'TIME'},
'id3': {PARTITION_KEY: 'id3', 'data': 'TO LIVE!'}
}
for k, item in items.items():
table.put_item(Item=item)
# Describe TTL when still unset.
response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'DISABLED')
# Enable TTL for given table
response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True)
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response._content)['TimeToLiveSpecification']['Enabled'])
# Describe TTL status after being enabled.
response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'ENABLED')
# Disable TTL for given table
response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, False)
self.assertEqual(response.status_code, 200)
self.assertFalse(json.loads(response._content)['TimeToLiveSpecification']['Enabled'])
# Describe TTL status after being disabled.
response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'DISABLED')
# Enable TTL for given table again
response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True)
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response._content)['TimeToLiveSpecification']['Enabled'])
# Describe TTL status after being enabled again.
response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'ENABLED')
# clean up
delete_table(TEST_DDB_TABLE_NAME_3)
def test_list_tags_of_resource(self):
table_name = 'ddb-table-%s' % short_uid()
dynamodb = aws_stack.connect_to_service('dynamodb')
rs = dynamodb.create_table(
TableName=table_name,
KeySchema=[{
'AttributeName': 'id', 'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'id', 'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5
},
Tags=TEST_DDB_TAGS
)
table_arn = rs['TableDescription']['TableArn']
rs = dynamodb.list_tags_of_resource(
ResourceArn=table_arn
)
self.assertEqual(rs['Tags'], TEST_DDB_TAGS)
dynamodb.tag_resource(
ResourceArn=table_arn,
Tags=[
{
'Key': 'NewKey',
'Value': 'TestValue'
}
]
)
rs = dynamodb.list_tags_of_resource(
ResourceArn=table_arn
)
self.assertEqual(len(rs['Tags']), len(TEST_DDB_TAGS) + 1)
tags = {tag['Key']: tag['Value'] for tag in rs['Tags']}
self.assertIn('NewKey', tags.keys())
self.assertEqual(tags['NewKey'], 'TestValue')
dynamodb.untag_resource(
ResourceArn=table_arn,
TagKeys=[
'Name', 'NewKey'
]
)
rs = dynamodb.list_tags_of_resource(
ResourceArn=table_arn
)
tags = {tag['Key']: tag['Value'] for tag in rs['Tags']}
self.assertNotIn('Name', tags.keys())
self.assertNotIn('NewKey', tags.keys())
delete_table(table_name)
def test_stream_spec_and_region_replacement(self):
aws_stack.create_dynamodb_table(
TEST_DDB_TABLE_NAME_4,
partition_key=PARTITION_KEY,
stream_view_type='NEW_AND_OLD_IMAGES'
)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_4)
# assert ARN formats
expected_arn_prefix = 'arn:aws:dynamodb:' + aws_stack.get_local_region()
self.assertTrue(table.table_arn.startswith(expected_arn_prefix))
self.assertTrue(table.latest_stream_arn.startswith(expected_arn_prefix))
# assert shard ID formats
ddbstreams = aws_stack.connect_to_service('dynamodbstreams')
result = ddbstreams.describe_stream(StreamArn=table.latest_stream_arn)['StreamDescription']
self.assertIn('Shards', result)
for shard in result['Shards']:
self.assertRegex(shard['ShardId'], r'^shardId\-[0-9]{20}\-[a-zA-Z0-9]{1,36}$')
# clean up
delete_table(TEST_DDB_TABLE_NAME_4)
def test_multiple_update_expressions(self):
dynamodb = aws_stack.connect_to_service('dynamodb')
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME)
item_id = short_uid()
table.put_item(Item={PARTITION_KEY: item_id, 'data': 'foobar123 ✓'})
response = dynamodb.update_item(TableName=TEST_DDB_TABLE_NAME,
Key={PARTITION_KEY: {'S': item_id}},
UpdateExpression='SET attr1 = :v1, attr2 = :v2',
ExpressionAttributeValues={
':v1': {'S': 'value1'},
':v2': {'S': 'value2'}
})
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
item = table.get_item(Key={PARTITION_KEY: item_id})['Item']
self.assertEqual(item['attr1'], 'value1')
self.assertEqual(item['attr2'], 'value2')
def test_return_values_in_put_item(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME)
# items which are being used to put in the table
item1 = {PARTITION_KEY: 'id1', 'data': 'foobar'}
item2 = {PARTITION_KEY: 'id2', 'data': 'foobar'}
response = table.put_item(Item=item1, ReturnValues='ALL_OLD')
# there is no data present in the table already so even if return values
# is set to 'ALL_OLD' as there is no data it will not return any data.
self.assertFalse(response.get('Attributes'))
# now the same data is present so when we pass return values as 'ALL_OLD'
# it should give us attributes
response = table.put_item(Item=item1, ReturnValues='ALL_OLD')
self.assertTrue(response.get('Attributes'))
self.assertEqual(response.get('Attributes').get('id'), item1.get('id'))
self.assertEqual(response.get('Attributes').get('data'), item1.get('data'))
response = table.put_item(Item=item2)
# we do not have any same item as item2 already so when we add this by default
# return values is set to None so no Attribute values should be returned
self.assertFalse(response.get('Attributes'))
response = table.put_item(Item=item2)
# in this case we already have item2 in the table so on this request
# it should not return any data as return values is set to None so no
# Attribute values should be returned
self.assertFalse(response.get('Attributes'))
def test_empty_and_binary_values(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME)
# items which are being used to put in the table
item1 = {PARTITION_KEY: 'id1', 'data': ''}
item2 = {PARTITION_KEY: 'id2', 'data': b'foobar'}
response = table.put_item(Item=item1)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = table.put_item(Item=item2)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
def test_dynamodb_stream_shard_iterator(self):
def wait_for_stream_created(table_name):
stream_name = get_kinesis_stream_name(table_name)
stream = KinesisStream(id=stream_name, num_shards=1)
kinesis = aws_stack.connect_to_service('kinesis', env=get_environment(None))
stream.connect(kinesis)
stream.wait_for()
dynamodb = aws_stack.connect_to_service('dynamodb')
ddbstreams = aws_stack.connect_to_service('dynamodbstreams')
table_name = 'table_with_stream'
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
StreamSpecification={
'StreamEnabled': True,
'StreamViewType': 'NEW_IMAGE',
},
ProvisionedThroughput={
'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5
},
)
wait_for_stream_created(table_name)
stream_arn = table['TableDescription']['LatestStreamArn']
result = ddbstreams.describe_stream(StreamArn=stream_arn)
response = ddbstreams.get_shard_iterator(StreamArn=stream_arn,
ShardId=result['StreamDescription']['Shards'][0]['ShardId'],
ShardIteratorType='LATEST'
)
self.assertIn('ShardIterator', response)
def test_global_tables(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
dynamodb = aws_stack.connect_to_service('dynamodb')
# create global table
regions = [{'RegionName': 'us-east-1'}, {'RegionName': 'us-west-1'}, {'RegionName': 'eu-central-1'}]
response = dynamodb.create_global_table(GlobalTableName=TEST_DDB_TABLE_NAME,
ReplicationGroup=regions)['GlobalTableDescription']
self.assertIn('ReplicationGroup', response)
self.assertEqual(len(regions), len(response['ReplicationGroup']))
# describe global table
response = dynamodb.describe_global_table(GlobalTableName=TEST_DDB_TABLE_NAME)['GlobalTableDescription']
self.assertIn('ReplicationGroup', response)
self.assertEqual(len(regions), len(response['ReplicationGroup']))
# update global table
updates = [
{'Create': {'RegionName': 'us-east-2'}},
{'Create': {'RegionName': 'us-west-2'}},
{'Delete': {'RegionName': 'us-west-1'}}
]
response = dynamodb.update_global_table(GlobalTableName=TEST_DDB_TABLE_NAME,
ReplicaUpdates=updates)['GlobalTableDescription']
self.assertIn('ReplicationGroup', response)
self.assertEqual(len(regions) + 1, len(response['ReplicationGroup']))
# assert exceptions for invalid requests
with self.assertRaises(Exception) as ctx:
dynamodb.create_global_table(GlobalTableName=TEST_DDB_TABLE_NAME, ReplicationGroup=regions)
self.assertIn('GlobalTableAlreadyExistsException', str(ctx.exception))
with self.assertRaises(Exception) as ctx:
dynamodb.describe_global_table(GlobalTableName='invalid-table-name')
self.assertIn('GlobalTableNotFoundException', str(ctx.exception))
def test_create_duplicate_table(self):
table_name = 'duplicateTable'
dynamodb = aws_stack.connect_to_service('dynamodb')
dynamodb.create_table(
TableName=table_name,
KeySchema=[{
'AttributeName': 'id', 'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'id', 'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5
},
Tags=TEST_DDB_TAGS
)
with self.assertRaises(Exception) as ctx:
dynamodb.create_table(
TableName=table_name,
KeySchema=[{
'AttributeName': 'id', 'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'id', 'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5
},
Tags=TEST_DDB_TAGS
)
self.assertIn('ResourceNotFoundException', str(ctx.exception))
def delete_table(name):
dynamodb_client = aws_stack.connect_to_service('dynamodb')
dynamodb_client.delete_table(TableName=name)
|
<filename>tests/integration/test_dynamodb.py
# -*- coding: utf-8 -*-
import unittest
import json
from localstack.services.dynamodbstreams.dynamodbstreams_api import get_kinesis_stream_name
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_models import KinesisStream
from localstack.utils.aws.aws_stack import get_environment
from localstack.utils.common import json_safe, short_uid
PARTITION_KEY = 'id'
TEST_DDB_TABLE_NAME = 'test-ddb-table-1'
TEST_DDB_TABLE_NAME_2 = 'test-ddb-table-2'
TEST_DDB_TABLE_NAME_3 = 'test-ddb-table-3'
TEST_DDB_TABLE_NAME_4 = 'test-ddb-table-4'
TEST_DDB_TAGS = [
{
'Key': 'Name',
'Value': 'test-table'
},
{
'Key': 'TestKey',
'Value': 'true'
}
]
class DynamoDBIntegrationTest (unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dynamodb = aws_stack.connect_to_resource('dynamodb')
def test_non_ascii_chars(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME)
# write some items containing non-ASCII characters
items = {
'id1': {PARTITION_KEY: 'id1', 'data': 'foobar123 ✓'},
'id2': {PARTITION_KEY: 'id2', 'data': 'foobar123 £'},
'id3': {PARTITION_KEY: 'id3', 'data': 'foobar123 ¢'}
}
for k, item in items.items():
table.put_item(Item=item)
for item_id in items.keys():
item = table.get_item(Key={PARTITION_KEY: item_id})['Item']
# need to fix up the JSON and convert str to unicode for Python 2
item1 = json_safe(item)
item2 = json_safe(items[item_id])
self.assertEqual(item1, item2)
# clean up
delete_table(TEST_DDB_TABLE_NAME)
def test_large_data_download(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_2, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_2)
# Create a large amount of items
num_items = 20
for i in range(0, num_items):
item = {PARTITION_KEY: 'id%s' % i, 'data1': 'foobar123 ' * 1000}
table.put_item(Item=item)
# Retrieve the items. The data will be transmitted to the client with chunked transfer encoding
result = table.scan(TableName=TEST_DDB_TABLE_NAME_2)
self.assertEqual(len(result['Items']), num_items)
# clean up
delete_table(TEST_DDB_TABLE_NAME_2)
def test_time_to_live(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME_3, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_3)
# Insert some items to the table
items = {
'id1': {PARTITION_KEY: 'id1', 'data': 'IT IS'},
'id2': {PARTITION_KEY: 'id2', 'data': 'TIME'},
'id3': {PARTITION_KEY: 'id3', 'data': 'TO LIVE!'}
}
for k, item in items.items():
table.put_item(Item=item)
# Describe TTL when still unset.
response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'DISABLED')
# Enable TTL for given table
response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True)
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response._content)['TimeToLiveSpecification']['Enabled'])
# Describe TTL status after being enabled.
response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'ENABLED')
# Disable TTL for given table
response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, False)
self.assertEqual(response.status_code, 200)
self.assertFalse(json.loads(response._content)['TimeToLiveSpecification']['Enabled'])
# Describe TTL status after being disabled.
response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'DISABLED')
# Enable TTL for given table again
response = testutil.send_update_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3, True)
self.assertEqual(response.status_code, 200)
self.assertTrue(json.loads(response._content)['TimeToLiveSpecification']['Enabled'])
# Describe TTL status after being enabled again.
response = testutil.send_describe_dynamodb_ttl_request(TEST_DDB_TABLE_NAME_3)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response._content)['TimeToLiveDescription']['TimeToLiveStatus'], 'ENABLED')
# clean up
delete_table(TEST_DDB_TABLE_NAME_3)
def test_list_tags_of_resource(self):
table_name = 'ddb-table-%s' % short_uid()
dynamodb = aws_stack.connect_to_service('dynamodb')
rs = dynamodb.create_table(
TableName=table_name,
KeySchema=[{
'AttributeName': 'id', 'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'id', 'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5
},
Tags=TEST_DDB_TAGS
)
table_arn = rs['TableDescription']['TableArn']
rs = dynamodb.list_tags_of_resource(
ResourceArn=table_arn
)
self.assertEqual(rs['Tags'], TEST_DDB_TAGS)
dynamodb.tag_resource(
ResourceArn=table_arn,
Tags=[
{
'Key': 'NewKey',
'Value': 'TestValue'
}
]
)
rs = dynamodb.list_tags_of_resource(
ResourceArn=table_arn
)
self.assertEqual(len(rs['Tags']), len(TEST_DDB_TAGS) + 1)
tags = {tag['Key']: tag['Value'] for tag in rs['Tags']}
self.assertIn('NewKey', tags.keys())
self.assertEqual(tags['NewKey'], 'TestValue')
dynamodb.untag_resource(
ResourceArn=table_arn,
TagKeys=[
'Name', 'NewKey'
]
)
rs = dynamodb.list_tags_of_resource(
ResourceArn=table_arn
)
tags = {tag['Key']: tag['Value'] for tag in rs['Tags']}
self.assertNotIn('Name', tags.keys())
self.assertNotIn('NewKey', tags.keys())
delete_table(table_name)
def test_stream_spec_and_region_replacement(self):
aws_stack.create_dynamodb_table(
TEST_DDB_TABLE_NAME_4,
partition_key=PARTITION_KEY,
stream_view_type='NEW_AND_OLD_IMAGES'
)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME_4)
# assert ARN formats
expected_arn_prefix = 'arn:aws:dynamodb:' + aws_stack.get_local_region()
self.assertTrue(table.table_arn.startswith(expected_arn_prefix))
self.assertTrue(table.latest_stream_arn.startswith(expected_arn_prefix))
# assert shard ID formats
ddbstreams = aws_stack.connect_to_service('dynamodbstreams')
result = ddbstreams.describe_stream(StreamArn=table.latest_stream_arn)['StreamDescription']
self.assertIn('Shards', result)
for shard in result['Shards']:
self.assertRegex(shard['ShardId'], r'^shardId\-[0-9]{20}\-[a-zA-Z0-9]{1,36}$')
# clean up
delete_table(TEST_DDB_TABLE_NAME_4)
def test_multiple_update_expressions(self):
dynamodb = aws_stack.connect_to_service('dynamodb')
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME)
item_id = short_uid()
table.put_item(Item={PARTITION_KEY: item_id, 'data': 'foobar123 ✓'})
response = dynamodb.update_item(TableName=TEST_DDB_TABLE_NAME,
Key={PARTITION_KEY: {'S': item_id}},
UpdateExpression='SET attr1 = :v1, attr2 = :v2',
ExpressionAttributeValues={
':v1': {'S': 'value1'},
':v2': {'S': 'value2'}
})
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
item = table.get_item(Key={PARTITION_KEY: item_id})['Item']
self.assertEqual(item['attr1'], 'value1')
self.assertEqual(item['attr2'], 'value2')
def test_return_values_in_put_item(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME)
# items which are being used to put in the table
item1 = {PARTITION_KEY: 'id1', 'data': 'foobar'}
item2 = {PARTITION_KEY: 'id2', 'data': 'foobar'}
response = table.put_item(Item=item1, ReturnValues='ALL_OLD')
# there is no data present in the table already so even if return values
# is set to 'ALL_OLD' as there is no data it will not return any data.
self.assertFalse(response.get('Attributes'))
# now the same data is present so when we pass return values as 'ALL_OLD'
# it should give us attributes
response = table.put_item(Item=item1, ReturnValues='ALL_OLD')
self.assertTrue(response.get('Attributes'))
self.assertEqual(response.get('Attributes').get('id'), item1.get('id'))
self.assertEqual(response.get('Attributes').get('data'), item1.get('data'))
response = table.put_item(Item=item2)
# we do not have any same item as item2 already so when we add this by default
# return values is set to None so no Attribute values should be returned
self.assertFalse(response.get('Attributes'))
response = table.put_item(Item=item2)
# in this case we already have item2 in the table so on this request
# it should not return any data as return values is set to None so no
# Attribute values should be returned
self.assertFalse(response.get('Attributes'))
def test_empty_and_binary_values(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
table = self.dynamodb.Table(TEST_DDB_TABLE_NAME)
# items which are being used to put in the table
item1 = {PARTITION_KEY: 'id1', 'data': ''}
item2 = {PARTITION_KEY: 'id2', 'data': b'foobar'}
response = table.put_item(Item=item1)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = table.put_item(Item=item2)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
def test_dynamodb_stream_shard_iterator(self):
def wait_for_stream_created(table_name):
stream_name = get_kinesis_stream_name(table_name)
stream = KinesisStream(id=stream_name, num_shards=1)
kinesis = aws_stack.connect_to_service('kinesis', env=get_environment(None))
stream.connect(kinesis)
stream.wait_for()
dynamodb = aws_stack.connect_to_service('dynamodb')
ddbstreams = aws_stack.connect_to_service('dynamodbstreams')
table_name = 'table_with_stream'
table = dynamodb.create_table(
TableName=table_name,
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
StreamSpecification={
'StreamEnabled': True,
'StreamViewType': 'NEW_IMAGE',
},
ProvisionedThroughput={
'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5
},
)
wait_for_stream_created(table_name)
stream_arn = table['TableDescription']['LatestStreamArn']
result = ddbstreams.describe_stream(StreamArn=stream_arn)
response = ddbstreams.get_shard_iterator(StreamArn=stream_arn,
ShardId=result['StreamDescription']['Shards'][0]['ShardId'],
ShardIteratorType='LATEST'
)
self.assertIn('ShardIterator', response)
def test_global_tables(self):
aws_stack.create_dynamodb_table(TEST_DDB_TABLE_NAME, partition_key=PARTITION_KEY)
dynamodb = aws_stack.connect_to_service('dynamodb')
# create global table
regions = [{'RegionName': 'us-east-1'}, {'RegionName': 'us-west-1'}, {'RegionName': 'eu-central-1'}]
response = dynamodb.create_global_table(GlobalTableName=TEST_DDB_TABLE_NAME,
ReplicationGroup=regions)['GlobalTableDescription']
self.assertIn('ReplicationGroup', response)
self.assertEqual(len(regions), len(response['ReplicationGroup']))
# describe global table
response = dynamodb.describe_global_table(GlobalTableName=TEST_DDB_TABLE_NAME)['GlobalTableDescription']
self.assertIn('ReplicationGroup', response)
self.assertEqual(len(regions), len(response['ReplicationGroup']))
# update global table
updates = [
{'Create': {'RegionName': 'us-east-2'}},
{'Create': {'RegionName': 'us-west-2'}},
{'Delete': {'RegionName': 'us-west-1'}}
]
response = dynamodb.update_global_table(GlobalTableName=TEST_DDB_TABLE_NAME,
ReplicaUpdates=updates)['GlobalTableDescription']
self.assertIn('ReplicationGroup', response)
self.assertEqual(len(regions) + 1, len(response['ReplicationGroup']))
# assert exceptions for invalid requests
with self.assertRaises(Exception) as ctx:
dynamodb.create_global_table(GlobalTableName=TEST_DDB_TABLE_NAME, ReplicationGroup=regions)
self.assertIn('GlobalTableAlreadyExistsException', str(ctx.exception))
with self.assertRaises(Exception) as ctx:
dynamodb.describe_global_table(GlobalTableName='invalid-table-name')
self.assertIn('GlobalTableNotFoundException', str(ctx.exception))
def test_create_duplicate_table(self):
table_name = 'duplicateTable'
dynamodb = aws_stack.connect_to_service('dynamodb')
dynamodb.create_table(
TableName=table_name,
KeySchema=[{
'AttributeName': 'id', 'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'id', 'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5
},
Tags=TEST_DDB_TAGS
)
with self.assertRaises(Exception) as ctx:
dynamodb.create_table(
TableName=table_name,
KeySchema=[{
'AttributeName': 'id', 'KeyType': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'id', 'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5
},
Tags=TEST_DDB_TAGS
)
self.assertIn('ResourceNotFoundException', str(ctx.exception))
def delete_table(name):
dynamodb_client = aws_stack.connect_to_service('dynamodb')
dynamodb_client.delete_table(TableName=name)
|
en
| 0.79249
|
# -*- coding: utf-8 -*- # write some items containing non-ASCII characters # need to fix up the JSON and convert str to unicode for Python 2 # clean up # Create a large amount of items # Retrieve the items. The data will be transmitted to the client with chunked transfer encoding # clean up # Insert some items to the table # Describe TTL when still unset. # Enable TTL for given table # Describe TTL status after being enabled. # Disable TTL for given table # Describe TTL status after being disabled. # Enable TTL for given table again # Describe TTL status after being enabled again. # clean up # assert ARN formats # assert shard ID formats # clean up # items which are being used to put in the table # there is no data present in the table already so even if return values # is set to 'ALL_OLD' as there is no data it will not return any data. # now the same data is present so when we pass return values as 'ALL_OLD' # it should give us attributes # we do not have any same item as item2 already so when we add this by default # return values is set to None so no Attribute values should be returned # in this case we already have item2 in the table so on this request # it should not return any data as return values is set to None so no # Attribute values should be returned # items which are being used to put in the table # create global table # describe global table # update global table # assert exceptions for invalid requests
| 2.075442
| 2
|
tests/test_chunk.py
|
isConic/mr_streams
| 2
|
6626202
|
<filename>tests/test_chunk.py
import mr_streams as ms
import unittest
# :::: auxilary functions ::::
def repeat_n_times(x, n = 1):
return [x] * n
def double(x):
return [x,x]
class TestChunk(unittest.TestCase):
def test_chunk(self):
_ = ms.stream([1,2,3,4,5]).chunk(2).drain()
def test_less_than_n(self):
_ = ms.stream([1,2,3,4,5]).chunk(6).drain()
def test_empty(self):
_ = ms.stream([]).chunk(6).drain()
|
<filename>tests/test_chunk.py
import mr_streams as ms
import unittest
# :::: auxilary functions ::::
def repeat_n_times(x, n = 1):
return [x] * n
def double(x):
return [x,x]
class TestChunk(unittest.TestCase):
def test_chunk(self):
_ = ms.stream([1,2,3,4,5]).chunk(2).drain()
def test_less_than_n(self):
_ = ms.stream([1,2,3,4,5]).chunk(6).drain()
def test_empty(self):
_ = ms.stream([]).chunk(6).drain()
|
en
| 0.481162
|
# :::: auxilary functions ::::
| 2.92977
| 3
|
src/python/T0/WMBS/Oracle/Tier0Feeder/GetDeploymentID.py
|
hufnagel/T0
| 0
|
6626203
|
"""
_GetDeploymentID_
Oracle implementation of GetDeploymentID
Retrieves T0 deployment ID
"""
from WMCore.Database.DBFormatter import DBFormatter
class GetDeploymentID(DBFormatter):
def execute(self, conn = None, transaction = False):
sql = """SELECT id
from t0_deployment_id"""
results = self.dbi.processData(sql, {}, conn = conn,
transaction = transaction)[0].fetchall()
id = 0
if results:
id=results[0][0]
return id
|
"""
_GetDeploymentID_
Oracle implementation of GetDeploymentID
Retrieves T0 deployment ID
"""
from WMCore.Database.DBFormatter import DBFormatter
class GetDeploymentID(DBFormatter):
def execute(self, conn = None, transaction = False):
sql = """SELECT id
from t0_deployment_id"""
results = self.dbi.processData(sql, {}, conn = conn,
transaction = transaction)[0].fetchall()
id = 0
if results:
id=results[0][0]
return id
|
en
| 0.589639
|
_GetDeploymentID_ Oracle implementation of GetDeploymentID Retrieves T0 deployment ID SELECT id from t0_deployment_id
| 2.434263
| 2
|
tests/cpp/test_cpp_client.py
|
EricGustin/SmartRedis
| 0
|
6626204
|
<reponame>EricGustin/SmartRedis
import pytest
from os import path as osp
from glob import glob
from shutil import which
from subprocess import Popen, PIPE, TimeoutExpired
import time
RANKS = 1
TEST_PATH = osp.dirname(osp.abspath(__file__))
def get_test_names():
"""Obtain test names by globbing for client_test
Add tests manually if necessary
"""
glob_path = osp.join(TEST_PATH, "build/client_test*")
test_names = glob(glob_path)
test_names = [(pytest.param(test,
id=osp.basename(test))) for test in test_names]
return test_names
@pytest.mark.parametrize("test", get_test_names())
def test_cpp_client(test, use_cluster):
cmd = []
cmd.append(test)
print(f"Running test: {osp.basename(test)}")
print(f"Test command {' '.join(cmd)}")
print(f"Using cluster: {use_cluster}")
execute_cmd(cmd)
time.sleep(1)
def execute_cmd(cmd_list):
"""Execute a command """
# spawning the subprocess and connecting to its output
run_path = osp.join(TEST_PATH, "build/")
proc = Popen(
cmd_list, stderr=PIPE, stdout=PIPE, stdin=PIPE, cwd=run_path)
try:
out, err = proc.communicate(timeout=120)
if out:
print("OUTPUT:", out.decode("utf-8"))
if err:
print("ERROR:", err.decode("utf-8"))
assert(proc.returncode == 0)
except UnicodeDecodeError:
output, errs = proc.communicate()
print("ERROR:", errs.decode("utf-8"))
assert(False)
except TimeoutExpired:
proc.kill()
output, errs = proc.communicate()
print("TIMEOUT: test timed out after test timeout limit of 120 seconds")
print("OUTPUT:", output.decode("utf-8"))
print("ERROR:", errs.decode("utf-8"))
assert(False)
except Exception:
proc.kill()
output, errs = proc.communicate()
print("OUTPUT:", output.decode("utf-8"))
print("ERROR:", errs.decode("utf-8"))
assert(False)
|
import pytest
from os import path as osp
from glob import glob
from shutil import which
from subprocess import Popen, PIPE, TimeoutExpired
import time
RANKS = 1
TEST_PATH = osp.dirname(osp.abspath(__file__))
def get_test_names():
"""Obtain test names by globbing for client_test
Add tests manually if necessary
"""
glob_path = osp.join(TEST_PATH, "build/client_test*")
test_names = glob(glob_path)
test_names = [(pytest.param(test,
id=osp.basename(test))) for test in test_names]
return test_names
@pytest.mark.parametrize("test", get_test_names())
def test_cpp_client(test, use_cluster):
cmd = []
cmd.append(test)
print(f"Running test: {osp.basename(test)}")
print(f"Test command {' '.join(cmd)}")
print(f"Using cluster: {use_cluster}")
execute_cmd(cmd)
time.sleep(1)
def execute_cmd(cmd_list):
"""Execute a command """
# spawning the subprocess and connecting to its output
run_path = osp.join(TEST_PATH, "build/")
proc = Popen(
cmd_list, stderr=PIPE, stdout=PIPE, stdin=PIPE, cwd=run_path)
try:
out, err = proc.communicate(timeout=120)
if out:
print("OUTPUT:", out.decode("utf-8"))
if err:
print("ERROR:", err.decode("utf-8"))
assert(proc.returncode == 0)
except UnicodeDecodeError:
output, errs = proc.communicate()
print("ERROR:", errs.decode("utf-8"))
assert(False)
except TimeoutExpired:
proc.kill()
output, errs = proc.communicate()
print("TIMEOUT: test timed out after test timeout limit of 120 seconds")
print("OUTPUT:", output.decode("utf-8"))
print("ERROR:", errs.decode("utf-8"))
assert(False)
except Exception:
proc.kill()
output, errs = proc.communicate()
print("OUTPUT:", output.decode("utf-8"))
print("ERROR:", errs.decode("utf-8"))
assert(False)
|
en
| 0.821245
|
Obtain test names by globbing for client_test Add tests manually if necessary Execute a command # spawning the subprocess and connecting to its output
| 2.195747
| 2
|
tools/data_source/genbank.py
|
tdans1/Use-Galaxy
| 4
|
6626205
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import textwrap
from Bio import GenBank
assert sys.version_info[:2] >= (2, 4)
def make_fasta(rec):
'''Creates fasta format from a record'''
gi = rec.annotations.get('gi', '')
org = rec.annotations.get('organism', '')
date = rec.annotations.get('date', '')
head = '>gi:%s, id:%s, org:%s, date:%s\n' % (gi, rec.id, org, date)
body = '\n'.join(textwrap.wrap(rec.seq.data, width=80))
return head, body
if __name__ == '__main__':
mode = sys.argv[1]
text = sys.argv[2]
output_file = sys.argv[3]
print('Searching for %s <br>' % text)
# check if inputs are all numbers
try:
gi_list = text.split()
[int(_) for _ in gi_list]
except ValueError:
gi_list = GenBank.search_for(text, max_ids=10)
fp = open(output_file, 'wt')
record_parser = GenBank.FeatureParser()
ncbi_dict = GenBank.NCBIDictionary(mode, 'genbank', parser=record_parser)
for gid in gi_list:
res = ncbi_dict[gid]
head, body = make_fasta(res)
fp.write(head + body + '\n')
print(head)
fp.close()
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import textwrap
from Bio import GenBank
assert sys.version_info[:2] >= (2, 4)
def make_fasta(rec):
'''Creates fasta format from a record'''
gi = rec.annotations.get('gi', '')
org = rec.annotations.get('organism', '')
date = rec.annotations.get('date', '')
head = '>gi:%s, id:%s, org:%s, date:%s\n' % (gi, rec.id, org, date)
body = '\n'.join(textwrap.wrap(rec.seq.data, width=80))
return head, body
if __name__ == '__main__':
mode = sys.argv[1]
text = sys.argv[2]
output_file = sys.argv[3]
print('Searching for %s <br>' % text)
# check if inputs are all numbers
try:
gi_list = text.split()
[int(_) for _ in gi_list]
except ValueError:
gi_list = GenBank.search_for(text, max_ids=10)
fp = open(output_file, 'wt')
record_parser = GenBank.FeatureParser()
ncbi_dict = GenBank.NCBIDictionary(mode, 'genbank', parser=record_parser)
for gid in gi_list:
res = ncbi_dict[gid]
head, body = make_fasta(res)
fp.write(head + body + '\n')
print(head)
fp.close()
|
en
| 0.455255
|
#!/usr/bin/env python Creates fasta format from a record # check if inputs are all numbers
| 2.751599
| 3
|
yearn/prices/curve.py
|
pmdaly/yearn-exporter
| 0
|
6626206
|
"""
Curve Registry adapter. Supports regular pools, factory pools and crypto pools.
See also https://curve.readthedocs.io/registry-address-provider.html
Main Registry (id 0)
v1 = 0x7D86446dDb609eD0F5f8684AcF30380a356b2B4c
v2 = 0x90E00ACe148ca3b23Ac1bC8C240C2a7Dd9c2d7f5
Exchanges (id 2)
v1 = 0xD1602F68CC7C4c7B59D686243EA35a9C73B0c6a2
v2 = 0x2393c368C70B42f055a4932a3fbeC2AC9C548011
Metapool Factory (id 3)
v1 = 0x0959158b6040D32d04c301A72CBFD6b39E21c9AE
v2 = 0xB9fC157394Af804a3578134A6585C0dc9cc990d4
"""
import logging
import threading
import time
from collections import defaultdict
from enum import IntEnum
from typing import Dict, List, Optional
from brownie import ZERO_ADDRESS, Contract, chain
from brownie.convert import to_address
from brownie.convert.datatypes import EthAddress
from cachetools.func import lru_cache, ttl_cache
from yearn.decorators import sentry_catch_all, wait_or_exit_after
from yearn.events import create_filter, decode_logs
from yearn.exceptions import UnsupportedNetwork
from yearn.multicall2 import fetch_multicall
from yearn.networks import Network
from yearn.prices import magic
from yearn.typing import Address, AddressOrContract, Block
from yearn.utils import Singleton, contract
logger = logging.getLogger(__name__)
ADDRESS_PROVIDER = '0x0000000022D53366457F9d5E68Ec105046FC4383'
BASIC_TOKENS = {
"0x6B175474E89094C44Da98b954EedeAC495271d0F", # dai
"0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", # weth
"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE", # eth
"0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599", # wbtc
"0xD71eCFF9342A5Ced620049e616c5035F1dB98620", # seur
"0x514910771AF9Ca656af840dff83E8264EcF986CA", # link
"0xD533a949740bb3306d119CC777fa900bA034cd52", # crv
"0x95dFDC8161832e4fF7816aC4B6367CE201538253", # ibkrw
"<KEY>", # ibaud
"<KEY>", # ibgbp
"0x1CC481cE2BD2EC7Bf67d1Be64d4878b16078F309", # ibchf
"0x5555f75e3d5278082200Fb451D1b6bA946D8e13b", # ibjpy
"0xC581b735A1688071A1746c968e0798D642EDE491", # eurt
"0x99D8a9C45b2ecA8864373A26D1459e3Dff1e17F3", # mim
"0x853d955aCEf822Db058eb8505911ED77F175b99e", # frax
"0x956F47F50A910163D8BF957Cf5846D573E7f87CA", # fei
"0xBC6DA0FE9aD5f3b0d58160288917AA56653660E9", # alusd
}
curve_contracts = {
Network.Mainnet: {
'address_provider': ADDRESS_PROVIDER,
'crv': '0xD533a949740bb3306d119CC777fa900bA034cd52',
'voting_escrow': '0x5f3b5DfEb7B28CDbD7FAba78963EE202a494e2A2',
'gauge_controller': '0x2F50D538606Fa9EDD2B11E2446BEb18C9D5846bB',
},
Network.Gnosis: {
# Curve has not properly initialized the provider. contract(self.address_provider.get_address(5)) returns 0x0.
# CurveRegistry class has extra handling to fetch registry in this case.
'address_provider': ADDRESS_PROVIDER,
},
Network.Fantom: {
'address_provider': ADDRESS_PROVIDER,
},
Network.Arbitrum: {
'address_provider': ADDRESS_PROVIDER,
},
}
class Ids(IntEnum):
Main_Registry = 0
PoolInfo_Getters = 1
Exchanges = 2
Metapool_Factory = 3
Fee_Distributor = 4
CryptoSwap_Registry = 5
CryptoPool_Factory = 6
class CurveRegistry(metaclass=Singleton):
@wait_or_exit_after
def __init__(self) -> None:
if chain.id not in curve_contracts:
raise UnsupportedNetwork("curve is not supported on this network")
addrs = curve_contracts[chain.id]
if chain.id == Network.Mainnet:
self.crv = contract(addrs['crv'])
self.voting_escrow = contract(addrs['voting_escrow'])
self.gauge_controller = contract(addrs['gauge_controller'])
self.identifiers = defaultdict(list) # id -> versions
self.registries = defaultdict(set) # registry -> pools
self.factories = defaultdict(set) # factory -> pools
self.token_to_pool = dict() # lp_token -> pool
self.address_provider = contract(addrs['address_provider'])
self._done = threading.Event()
self._thread = threading.Thread(target=self.watch_events, daemon=True)
self._has_exception = False
self._thread.start()
@sentry_catch_all
def watch_events(self) -> None:
address_provider_filter = create_filter(str(self.address_provider))
registries = []
registries_filter = None
registry_logs = []
address_logs = address_provider_filter.get_all_entries()
while True:
# fetch all registries and factories from address provider
for event in decode_logs(address_logs):
if event.name == 'NewAddressIdentifier':
self.identifiers[Ids(event['id'])].append(event['addr'])
if event.name == 'AddressModified':
self.identifiers[Ids(event['id'])].append(event['new_address'])
# NOTE: Gnosis chain's address provider fails to provide registry via events.
if not self.identifiers[Ids.Main_Registry]:
self.identifiers[Ids.Main_Registry] = self.address_provider.get_registry()
# if registries were updated, recreate the filter
_registries = [
self.identifiers[i][-1]
for i in [Ids.Main_Registry, Ids.CryptoSwap_Registry]
if self.identifiers[i]
]
if _registries != registries:
registries = _registries
registries_filter = create_filter(registries)
registry_logs = registries_filter.get_all_entries()
# fetch pools from the latest registries
for event in decode_logs(registry_logs):
if event.name == 'PoolAdded':
self.registries[event.address].add(event['pool'])
lp_token = contract(event.address).get_lp_token(event['pool'])
self.token_to_pool[lp_token] = event['pool']
elif event.name == 'PoolRemoved':
self.registries[event.address].discard(event['pool'])
# load metapool and curve v5 factories
self.load_factories()
if not self._done.is_set():
self._done.set()
logger.info(f'loaded {len(self.token_to_pool)} pools from {len(self.registries)} registries and {len(self.factories)} factories')
time.sleep(600)
# read new logs at end of loop
address_logs = address_provider_filter.get_new_entries()
if registries_filter:
registry_logs = registries_filter.get_new_entries()
def read_pools(self, registry: Address) -> List[EthAddress]:
registry = contract(registry)
return fetch_multicall(
*[[registry, 'pool_list', i] for i in range(registry.pool_count())]
)
def load_factories(self) -> None:
# factory events are quite useless, so we use a different method
for factory in self.identifiers[Ids.Metapool_Factory]:
pool_list = self.read_pools(factory)
for pool in pool_list:
# for metpool factories pool is the same as lp token
self.token_to_pool[pool] = pool
self.factories[factory].add(pool)
# if there are factories that haven't yet been added to the on-chain address provider,
# please refer to commit 3f70c4246615017d87602e03272b3ed18d594d3c to see how to add them manually
for factory in self.identifiers[Ids.CryptoPool_Factory]:
pool_list = self.read_pools(factory)
for pool in pool_list:
if pool in self.factories[factory]:
continue
# for curve v5 pools, pool and lp token are separate
lp_token = contract(factory).get_token(pool)
self.token_to_pool[lp_token] = pool
self.factories[factory].add(pool)
def get_factory(self, pool: Address) -> EthAddress:
"""
Get metapool factory that has spawned a pool.
"""
try:
return next(
factory
for factory, factory_pools in self.factories.items()
if str(pool) in factory_pools
)
except StopIteration:
return None
def get_registry(self, pool: Address) -> EthAddress:
"""
Get registry containing a pool.
"""
try:
return next(
registry
for registry, pools in self.registries.items()
if str(pool) in pools
)
except StopIteration:
return None
def __contains__(self, token: AddressOrContract) -> bool:
return self.get_pool(token) is not None
@lru_cache(maxsize=None)
def get_pool(self, token: AddressOrContract) -> Address:
"""
Get Curve pool (swap) address by LP token address. Supports factory pools.
"""
token = to_address(token)
if token in self.token_to_pool:
return self.token_to_pool[token]
@lru_cache(maxsize=None)
def get_gauge(self, pool: AddressOrContract) -> EthAddress:
"""
Get liquidity gauge address by pool.
"""
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
if factory and hasattr(contract(factory), 'get_gauge'):
gauge = contract(factory).get_gauge(pool)
if gauge != ZERO_ADDRESS:
return gauge
if registry:
gauges, _ = contract(registry).get_gauges(pool)
if gauges[0] != ZERO_ADDRESS:
return gauges[0]
@lru_cache(maxsize=None)
def get_coins(self, pool: AddressOrContract) -> List[EthAddress]:
"""
Get coins of pool.
"""
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
if factory:
coins = contract(factory).get_coins(pool)
elif registry:
coins = contract(registry).get_coins(pool)
# pool not in registry
if set(coins) == {ZERO_ADDRESS}:
coins = fetch_multicall(*[[contract(pool), 'coins', i] for i in range(8)])
return [coin for coin in coins if coin not in {None, ZERO_ADDRESS}]
@lru_cache(maxsize=None)
def get_underlying_coins(self, pool: Address) -> List[EthAddress]:
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
if factory:
factory = contract(factory)
# new factory reverts for non-meta pools
if not hasattr(factory, 'is_meta') or factory.is_meta(pool):
if hasattr(factory, 'get_underlying_coins'):
coins = factory.get_underlying_coins(pool)
elif hasattr(factory, 'get_coins'):
coins = factory.get_coins(pool)
else:
coins = {ZERO_ADDRESS}
else:
coins = factory.get_coins(pool)
elif registry:
registry = contract(registry)
if hasattr(registry, 'get_underlying_coins'):
coins = registry.get_underlying_coins(pool)
elif hasattr(registry, 'get_coins'):
coins = registry.get_coins(pool)
# pool not in registry, not checking for underlying_coins here
if set(coins) == {ZERO_ADDRESS}:
return self.get_coins(pool)
return [coin for coin in coins if coin != ZERO_ADDRESS]
@lru_cache(maxsize=None)
def get_decimals(self, pool: AddressOrContract) -> List[int]:
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
source = contract(factory or registry)
decimals = source.get_decimals(pool)
# pool not in registry
if not any(decimals):
coins = self.get_coins(pool)
decimals = fetch_multicall(
*[[contract(token), 'decimals'] for token in coins]
)
return [dec for dec in decimals if dec != 0]
def get_balances(self, pool: AddressOrContract, block: Optional[Block] = None) -> Dict[EthAddress,float]:
"""
Get {token: balance} of liquidity in the pool.
"""
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
coins = self.get_coins(pool)
decimals = self.get_decimals(pool)
try:
source = contract(factory or registry)
balances = source.get_balances(pool, block_identifier=block)
# fallback for historical queries
except ValueError as e:
if str(e) not in [
'execution reverted',
'No data was returned - the call likely reverted'
]: raise
balances = fetch_multicall(
*[[contract(pool), 'balances', i] for i, _ in enumerate(coins)],
block=block
)
if not any(balances):
raise ValueError(f'could not fetch balances {pool} at {block}')
return {
coin: balance / 10 ** dec
for coin, balance, dec in zip(coins, balances, decimals)
}
def get_virtual_price(self, pool: Address, block: Optional[Block] = None) -> Optional[float]:
pool = contract(pool)
try:
return pool.get_virtual_price(block_identifier=block) / 1e18
except ValueError as e:
if str(e) == "execution reverted":
return None
raise
def get_tvl(self, pool: AddressOrContract, block: Optional[Block] = None) -> float:
"""
Get total value in Curve pool.
"""
pool = to_address(pool)
balances = self.get_balances(pool, block=block)
return sum(
amount * magic.get_price(coin, block=block)
for coin, amount in balances.items()
)
@ttl_cache(maxsize=None, ttl=600)
def get_price(self, token: AddressOrContract, block: Optional[Block] = None) -> Optional[float]:
token = to_address(token)
pool = self.get_pool(token)
# crypto pools can have different tokens, use slow method
if hasattr(contract(pool), 'price_oracle'):
try:
tvl = self.get_tvl(pool, block=block)
except ValueError:
return None
supply = contract(token).totalSupply(block_identifier=block) / 1e18
if supply == 0:
return 0
return tvl / supply
# approximate by using the most common base token we find
coins = self.get_underlying_coins(pool)
try:
coin = (set(coins) & BASIC_TOKENS).pop()
except KeyError:
coin = coins[0]
virtual_price = self.get_virtual_price(pool, block)
if virtual_price:
return virtual_price * magic.get_price(coin, block)
def calculate_boost(self, gauge: Contract, addr, block: Optional[Block] = None) -> Dict[str,float]:
results = fetch_multicall(
[gauge, "balanceOf", addr],
[gauge, "totalSupply"],
[gauge, "working_balances", addr],
[gauge, "working_supply"],
[self.voting_escrow, "balanceOf", addr],
[self.voting_escrow, "totalSupply"],
block=block,
)
results = [x / 1e18 for x in results]
(
gauge_balance,
gauge_total,
working_balance,
working_supply,
vecrv_balance,
vecrv_total,
) = results
try:
boost = working_balance / gauge_balance * 2.5
except ZeroDivisionError:
boost = 1
min_vecrv = vecrv_total * gauge_balance / gauge_total
lim = gauge_balance * 0.4 + gauge_total * min_vecrv / vecrv_total * 0.6
lim = min(gauge_balance, lim)
_working_supply = working_supply + lim - working_balance
noboost_lim = gauge_balance * 0.4
noboost_supply = working_supply + noboost_lim - working_balance
try:
max_boost_possible = (lim / _working_supply) / (
noboost_lim / noboost_supply
)
except ZeroDivisionError:
max_boost_possible = 1
return {
"gauge balance": gauge_balance,
"gauge total": gauge_total,
"vecrv balance": vecrv_balance,
"vecrv total": vecrv_total,
"working balance": working_balance,
"working total": working_supply,
"boost": boost,
"max boost": max_boost_possible,
"min vecrv": min_vecrv,
}
def calculate_apy(self, gauge: Contract, lp_token: AddressOrContract, block: Optional[Block] = None) -> Dict[str,float]:
crv_price = magic.get_price(self.crv)
pool = contract(self.get_pool(lp_token))
results = fetch_multicall(
[gauge, "working_supply"],
[self.gauge_controller, "gauge_relative_weight", gauge],
[gauge, "inflation_rate"],
[pool, "get_virtual_price"],
block=block,
)
results = [x / 1e18 for x in results]
working_supply, relative_weight, inflation_rate, virtual_price = results
token_price = magic.get_price(lp_token, block=block)
try:
rate = (
inflation_rate * relative_weight * 86400 * 365 / working_supply * 0.4
) / token_price
except ZeroDivisionError:
rate = 0
return {
"crv price": crv_price,
"relative weight": relative_weight,
"inflation rate": inflation_rate,
"virtual price": virtual_price,
"crv reward rate": rate,
"crv apy": rate * crv_price,
"token price": token_price,
}
curve = None
try:
curve = CurveRegistry()
except UnsupportedNetwork:
pass
|
"""
Curve Registry adapter. Supports regular pools, factory pools and crypto pools.
See also https://curve.readthedocs.io/registry-address-provider.html
Main Registry (id 0)
v1 = 0x7D86446dDb609eD0F5f8684AcF30380a356b2B4c
v2 = 0x90E00ACe148ca3b23Ac1bC8C240C2a7Dd9c2d7f5
Exchanges (id 2)
v1 = 0xD1602F68CC7C4c7B59D686243EA35a9C73B0c6a2
v2 = 0x2393c368C70B42f055a4932a3fbeC2AC9C548011
Metapool Factory (id 3)
v1 = 0x0959158b6040D32d04c301A72CBFD6b39E21c9AE
v2 = 0xB9fC157394Af804a3578134A6585C0dc9cc990d4
"""
import logging
import threading
import time
from collections import defaultdict
from enum import IntEnum
from typing import Dict, List, Optional
from brownie import ZERO_ADDRESS, Contract, chain
from brownie.convert import to_address
from brownie.convert.datatypes import EthAddress
from cachetools.func import lru_cache, ttl_cache
from yearn.decorators import sentry_catch_all, wait_or_exit_after
from yearn.events import create_filter, decode_logs
from yearn.exceptions import UnsupportedNetwork
from yearn.multicall2 import fetch_multicall
from yearn.networks import Network
from yearn.prices import magic
from yearn.typing import Address, AddressOrContract, Block
from yearn.utils import Singleton, contract
logger = logging.getLogger(__name__)
ADDRESS_PROVIDER = '0x0000000022D53366457F9d5E68Ec105046FC4383'
BASIC_TOKENS = {
"0x6B175474E89094C44Da98b954EedeAC495271d0F", # dai
"0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", # weth
"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE", # eth
"0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599", # wbtc
"0xD71eCFF9342A5Ced620049e616c5035F1dB98620", # seur
"0x514910771AF9Ca656af840dff83E8264EcF986CA", # link
"0xD533a949740bb3306d119CC777fa900bA034cd52", # crv
"0x95dFDC8161832e4fF7816aC4B6367CE201538253", # ibkrw
"<KEY>", # ibaud
"<KEY>", # ibgbp
"0x1CC481cE2BD2EC7Bf67d1Be64d4878b16078F309", # ibchf
"0x5555f75e3d5278082200Fb451D1b6bA946D8e13b", # ibjpy
"0xC581b735A1688071A1746c968e0798D642EDE491", # eurt
"0x99D8a9C45b2ecA8864373A26D1459e3Dff1e17F3", # mim
"0x853d955aCEf822Db058eb8505911ED77F175b99e", # frax
"0x956F47F50A910163D8BF957Cf5846D573E7f87CA", # fei
"0xBC6DA0FE9aD5f3b0d58160288917AA56653660E9", # alusd
}
curve_contracts = {
Network.Mainnet: {
'address_provider': ADDRESS_PROVIDER,
'crv': '0xD533a949740bb3306d119CC777fa900bA034cd52',
'voting_escrow': '0x5f3b5DfEb7B28CDbD7FAba78963EE202a494e2A2',
'gauge_controller': '0x2F50D538606Fa9EDD2B11E2446BEb18C9D5846bB',
},
Network.Gnosis: {
# Curve has not properly initialized the provider. contract(self.address_provider.get_address(5)) returns 0x0.
# CurveRegistry class has extra handling to fetch registry in this case.
'address_provider': ADDRESS_PROVIDER,
},
Network.Fantom: {
'address_provider': ADDRESS_PROVIDER,
},
Network.Arbitrum: {
'address_provider': ADDRESS_PROVIDER,
},
}
class Ids(IntEnum):
Main_Registry = 0
PoolInfo_Getters = 1
Exchanges = 2
Metapool_Factory = 3
Fee_Distributor = 4
CryptoSwap_Registry = 5
CryptoPool_Factory = 6
class CurveRegistry(metaclass=Singleton):
@wait_or_exit_after
def __init__(self) -> None:
if chain.id not in curve_contracts:
raise UnsupportedNetwork("curve is not supported on this network")
addrs = curve_contracts[chain.id]
if chain.id == Network.Mainnet:
self.crv = contract(addrs['crv'])
self.voting_escrow = contract(addrs['voting_escrow'])
self.gauge_controller = contract(addrs['gauge_controller'])
self.identifiers = defaultdict(list) # id -> versions
self.registries = defaultdict(set) # registry -> pools
self.factories = defaultdict(set) # factory -> pools
self.token_to_pool = dict() # lp_token -> pool
self.address_provider = contract(addrs['address_provider'])
self._done = threading.Event()
self._thread = threading.Thread(target=self.watch_events, daemon=True)
self._has_exception = False
self._thread.start()
@sentry_catch_all
def watch_events(self) -> None:
address_provider_filter = create_filter(str(self.address_provider))
registries = []
registries_filter = None
registry_logs = []
address_logs = address_provider_filter.get_all_entries()
while True:
# fetch all registries and factories from address provider
for event in decode_logs(address_logs):
if event.name == 'NewAddressIdentifier':
self.identifiers[Ids(event['id'])].append(event['addr'])
if event.name == 'AddressModified':
self.identifiers[Ids(event['id'])].append(event['new_address'])
# NOTE: Gnosis chain's address provider fails to provide registry via events.
if not self.identifiers[Ids.Main_Registry]:
self.identifiers[Ids.Main_Registry] = self.address_provider.get_registry()
# if registries were updated, recreate the filter
_registries = [
self.identifiers[i][-1]
for i in [Ids.Main_Registry, Ids.CryptoSwap_Registry]
if self.identifiers[i]
]
if _registries != registries:
registries = _registries
registries_filter = create_filter(registries)
registry_logs = registries_filter.get_all_entries()
# fetch pools from the latest registries
for event in decode_logs(registry_logs):
if event.name == 'PoolAdded':
self.registries[event.address].add(event['pool'])
lp_token = contract(event.address).get_lp_token(event['pool'])
self.token_to_pool[lp_token] = event['pool']
elif event.name == 'PoolRemoved':
self.registries[event.address].discard(event['pool'])
# load metapool and curve v5 factories
self.load_factories()
if not self._done.is_set():
self._done.set()
logger.info(f'loaded {len(self.token_to_pool)} pools from {len(self.registries)} registries and {len(self.factories)} factories')
time.sleep(600)
# read new logs at end of loop
address_logs = address_provider_filter.get_new_entries()
if registries_filter:
registry_logs = registries_filter.get_new_entries()
def read_pools(self, registry: Address) -> List[EthAddress]:
registry = contract(registry)
return fetch_multicall(
*[[registry, 'pool_list', i] for i in range(registry.pool_count())]
)
def load_factories(self) -> None:
# factory events are quite useless, so we use a different method
for factory in self.identifiers[Ids.Metapool_Factory]:
pool_list = self.read_pools(factory)
for pool in pool_list:
# for metpool factories pool is the same as lp token
self.token_to_pool[pool] = pool
self.factories[factory].add(pool)
# if there are factories that haven't yet been added to the on-chain address provider,
# please refer to commit 3f70c4246615017d87602e03272b3ed18d594d3c to see how to add them manually
for factory in self.identifiers[Ids.CryptoPool_Factory]:
pool_list = self.read_pools(factory)
for pool in pool_list:
if pool in self.factories[factory]:
continue
# for curve v5 pools, pool and lp token are separate
lp_token = contract(factory).get_token(pool)
self.token_to_pool[lp_token] = pool
self.factories[factory].add(pool)
def get_factory(self, pool: Address) -> EthAddress:
"""
Get metapool factory that has spawned a pool.
"""
try:
return next(
factory
for factory, factory_pools in self.factories.items()
if str(pool) in factory_pools
)
except StopIteration:
return None
def get_registry(self, pool: Address) -> EthAddress:
"""
Get registry containing a pool.
"""
try:
return next(
registry
for registry, pools in self.registries.items()
if str(pool) in pools
)
except StopIteration:
return None
def __contains__(self, token: AddressOrContract) -> bool:
return self.get_pool(token) is not None
@lru_cache(maxsize=None)
def get_pool(self, token: AddressOrContract) -> Address:
"""
Get Curve pool (swap) address by LP token address. Supports factory pools.
"""
token = to_address(token)
if token in self.token_to_pool:
return self.token_to_pool[token]
@lru_cache(maxsize=None)
def get_gauge(self, pool: AddressOrContract) -> EthAddress:
"""
Get liquidity gauge address by pool.
"""
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
if factory and hasattr(contract(factory), 'get_gauge'):
gauge = contract(factory).get_gauge(pool)
if gauge != ZERO_ADDRESS:
return gauge
if registry:
gauges, _ = contract(registry).get_gauges(pool)
if gauges[0] != ZERO_ADDRESS:
return gauges[0]
@lru_cache(maxsize=None)
def get_coins(self, pool: AddressOrContract) -> List[EthAddress]:
"""
Get coins of pool.
"""
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
if factory:
coins = contract(factory).get_coins(pool)
elif registry:
coins = contract(registry).get_coins(pool)
# pool not in registry
if set(coins) == {ZERO_ADDRESS}:
coins = fetch_multicall(*[[contract(pool), 'coins', i] for i in range(8)])
return [coin for coin in coins if coin not in {None, ZERO_ADDRESS}]
@lru_cache(maxsize=None)
def get_underlying_coins(self, pool: Address) -> List[EthAddress]:
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
if factory:
factory = contract(factory)
# new factory reverts for non-meta pools
if not hasattr(factory, 'is_meta') or factory.is_meta(pool):
if hasattr(factory, 'get_underlying_coins'):
coins = factory.get_underlying_coins(pool)
elif hasattr(factory, 'get_coins'):
coins = factory.get_coins(pool)
else:
coins = {ZERO_ADDRESS}
else:
coins = factory.get_coins(pool)
elif registry:
registry = contract(registry)
if hasattr(registry, 'get_underlying_coins'):
coins = registry.get_underlying_coins(pool)
elif hasattr(registry, 'get_coins'):
coins = registry.get_coins(pool)
# pool not in registry, not checking for underlying_coins here
if set(coins) == {ZERO_ADDRESS}:
return self.get_coins(pool)
return [coin for coin in coins if coin != ZERO_ADDRESS]
@lru_cache(maxsize=None)
def get_decimals(self, pool: AddressOrContract) -> List[int]:
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
source = contract(factory or registry)
decimals = source.get_decimals(pool)
# pool not in registry
if not any(decimals):
coins = self.get_coins(pool)
decimals = fetch_multicall(
*[[contract(token), 'decimals'] for token in coins]
)
return [dec for dec in decimals if dec != 0]
def get_balances(self, pool: AddressOrContract, block: Optional[Block] = None) -> Dict[EthAddress,float]:
"""
Get {token: balance} of liquidity in the pool.
"""
pool = to_address(pool)
factory = self.get_factory(pool)
registry = self.get_registry(pool)
coins = self.get_coins(pool)
decimals = self.get_decimals(pool)
try:
source = contract(factory or registry)
balances = source.get_balances(pool, block_identifier=block)
# fallback for historical queries
except ValueError as e:
if str(e) not in [
'execution reverted',
'No data was returned - the call likely reverted'
]: raise
balances = fetch_multicall(
*[[contract(pool), 'balances', i] for i, _ in enumerate(coins)],
block=block
)
if not any(balances):
raise ValueError(f'could not fetch balances {pool} at {block}')
return {
coin: balance / 10 ** dec
for coin, balance, dec in zip(coins, balances, decimals)
}
def get_virtual_price(self, pool: Address, block: Optional[Block] = None) -> Optional[float]:
pool = contract(pool)
try:
return pool.get_virtual_price(block_identifier=block) / 1e18
except ValueError as e:
if str(e) == "execution reverted":
return None
raise
def get_tvl(self, pool: AddressOrContract, block: Optional[Block] = None) -> float:
"""
Get total value in Curve pool.
"""
pool = to_address(pool)
balances = self.get_balances(pool, block=block)
return sum(
amount * magic.get_price(coin, block=block)
for coin, amount in balances.items()
)
@ttl_cache(maxsize=None, ttl=600)
def get_price(self, token: AddressOrContract, block: Optional[Block] = None) -> Optional[float]:
token = to_address(token)
pool = self.get_pool(token)
# crypto pools can have different tokens, use slow method
if hasattr(contract(pool), 'price_oracle'):
try:
tvl = self.get_tvl(pool, block=block)
except ValueError:
return None
supply = contract(token).totalSupply(block_identifier=block) / 1e18
if supply == 0:
return 0
return tvl / supply
# approximate by using the most common base token we find
coins = self.get_underlying_coins(pool)
try:
coin = (set(coins) & BASIC_TOKENS).pop()
except KeyError:
coin = coins[0]
virtual_price = self.get_virtual_price(pool, block)
if virtual_price:
return virtual_price * magic.get_price(coin, block)
def calculate_boost(self, gauge: Contract, addr, block: Optional[Block] = None) -> Dict[str,float]:
results = fetch_multicall(
[gauge, "balanceOf", addr],
[gauge, "totalSupply"],
[gauge, "working_balances", addr],
[gauge, "working_supply"],
[self.voting_escrow, "balanceOf", addr],
[self.voting_escrow, "totalSupply"],
block=block,
)
results = [x / 1e18 for x in results]
(
gauge_balance,
gauge_total,
working_balance,
working_supply,
vecrv_balance,
vecrv_total,
) = results
try:
boost = working_balance / gauge_balance * 2.5
except ZeroDivisionError:
boost = 1
min_vecrv = vecrv_total * gauge_balance / gauge_total
lim = gauge_balance * 0.4 + gauge_total * min_vecrv / vecrv_total * 0.6
lim = min(gauge_balance, lim)
_working_supply = working_supply + lim - working_balance
noboost_lim = gauge_balance * 0.4
noboost_supply = working_supply + noboost_lim - working_balance
try:
max_boost_possible = (lim / _working_supply) / (
noboost_lim / noboost_supply
)
except ZeroDivisionError:
max_boost_possible = 1
return {
"gauge balance": gauge_balance,
"gauge total": gauge_total,
"vecrv balance": vecrv_balance,
"vecrv total": vecrv_total,
"working balance": working_balance,
"working total": working_supply,
"boost": boost,
"max boost": max_boost_possible,
"min vecrv": min_vecrv,
}
def calculate_apy(self, gauge: Contract, lp_token: AddressOrContract, block: Optional[Block] = None) -> Dict[str,float]:
crv_price = magic.get_price(self.crv)
pool = contract(self.get_pool(lp_token))
results = fetch_multicall(
[gauge, "working_supply"],
[self.gauge_controller, "gauge_relative_weight", gauge],
[gauge, "inflation_rate"],
[pool, "get_virtual_price"],
block=block,
)
results = [x / 1e18 for x in results]
working_supply, relative_weight, inflation_rate, virtual_price = results
token_price = magic.get_price(lp_token, block=block)
try:
rate = (
inflation_rate * relative_weight * 86400 * 365 / working_supply * 0.4
) / token_price
except ZeroDivisionError:
rate = 0
return {
"crv price": crv_price,
"relative weight": relative_weight,
"inflation rate": inflation_rate,
"virtual price": virtual_price,
"crv reward rate": rate,
"crv apy": rate * crv_price,
"token price": token_price,
}
curve = None
try:
curve = CurveRegistry()
except UnsupportedNetwork:
pass
|
en
| 0.816256
|
Curve Registry adapter. Supports regular pools, factory pools and crypto pools. See also https://curve.readthedocs.io/registry-address-provider.html Main Registry (id 0) v1 = 0x7D86446dDb609eD0F5f8684AcF30380a356b2B4c v2 = 0x90E00ACe148ca3b23Ac1bC8C240C2a7Dd9c2d7f5 Exchanges (id 2) v1 = 0xD1602F68CC7C4c7B59D686243EA35a9C73B0c6a2 v2 = 0x2393c368C70B42f055a4932a3fbeC2AC9C548011 Metapool Factory (id 3) v1 = 0x0959158b6040D32d04c301A72CBFD6b39E21c9AE v2 = 0xB9fC157394Af804a3578134A6585C0dc9cc990d4 # dai # weth # eth # wbtc # seur # link # crv # ibkrw # ibaud # ibgbp # ibchf # ibjpy # eurt # mim # frax # fei # alusd # Curve has not properly initialized the provider. contract(self.address_provider.get_address(5)) returns 0x0. # CurveRegistry class has extra handling to fetch registry in this case. # id -> versions # registry -> pools # factory -> pools # lp_token -> pool # fetch all registries and factories from address provider # NOTE: Gnosis chain's address provider fails to provide registry via events. # if registries were updated, recreate the filter # fetch pools from the latest registries # load metapool and curve v5 factories # read new logs at end of loop # factory events are quite useless, so we use a different method # for metpool factories pool is the same as lp token # if there are factories that haven't yet been added to the on-chain address provider, # please refer to commit 3f70c4246615017d87602e03272b3ed18d594d3c to see how to add them manually # for curve v5 pools, pool and lp token are separate Get metapool factory that has spawned a pool. Get registry containing a pool. Get Curve pool (swap) address by LP token address. Supports factory pools. Get liquidity gauge address by pool. Get coins of pool. # pool not in registry # new factory reverts for non-meta pools # pool not in registry, not checking for underlying_coins here # pool not in registry Get {token: balance} of liquidity in the pool. # fallback for historical queries Get total value in Curve pool. # crypto pools can have different tokens, use slow method # approximate by using the most common base token we find
| 2.018266
| 2
|
cscs-checks/compile/haswell_fma_check.py
|
stevenvdb/reframe
| 0
|
6626207
|
# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.required_version('>=2.14')
@rfm.simple_test
class HaswellFmaCheck(rfm.CompileOnlyRegressionTest):
def __init__(self):
self.descr = 'check for avx2 instructions'
self.valid_systems = ['dom:login', 'daint:login', 'kesch:login']
if self.current_system.name == 'kesch':
self.valid_prog_environs = [
'PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-cray-nompi',
'PrgEnv-gnu-nompi'
]
else:
self.valid_prog_environs = [
'PrgEnv-cray', 'PrgEnv-gnu',
'PrgEnv-intel', 'PrgEnv-pgi'
]
self.modules = ['craype-haswell']
self.sourcesdir = 'src/haswell_fma'
self.build_system = 'Make'
self.build_system.cflags = ['-O3', '-S']
self.build_system.cxxflags = ['-O3', '-S']
self.build_system.fflags = ['-O3', '-S']
self.sanity_patterns = sn.all([
sn.assert_found(r'vfmadd', 'vectorize_fma_c.s'),
sn.assert_found(r'vfmadd', 'vectorize_fma_cplusplus.s'),
sn.assert_found(r'vfmadd', 'vectorize_fma_ftn.s'),
sn.assert_not_found('warning|WARNING', self.stderr)
])
self.maintainers = ['AJ', 'CB']
self.tags = {'production', 'craype'}
@rfm.run_before('compile')
def setflags(self):
if self.current_system.name == 'kesch':
if self.current_environ.name.startswith('PrgEnv-cray'):
# Ignore CPATH warning
self.build_system.cflags += ['-h nomessage=1254']
self.build_system.cxxflags += ['-h nomessage=1254']
else:
self.build_system.cflags += ['-march=native']
self.build_system.cxxflags += ['-march=native']
self.build_system.fflags += ['-march=native']
else:
if self.current_environ.name == 'PrgEnv-cray':
self.build_system.cflags = ['-Ofast', '-S']
self.build_system.cxxflags = ['-Ofast', '-S']
|
# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.required_version('>=2.14')
@rfm.simple_test
class HaswellFmaCheck(rfm.CompileOnlyRegressionTest):
def __init__(self):
self.descr = 'check for avx2 instructions'
self.valid_systems = ['dom:login', 'daint:login', 'kesch:login']
if self.current_system.name == 'kesch':
self.valid_prog_environs = [
'PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-cray-nompi',
'PrgEnv-gnu-nompi'
]
else:
self.valid_prog_environs = [
'PrgEnv-cray', 'PrgEnv-gnu',
'PrgEnv-intel', 'PrgEnv-pgi'
]
self.modules = ['craype-haswell']
self.sourcesdir = 'src/haswell_fma'
self.build_system = 'Make'
self.build_system.cflags = ['-O3', '-S']
self.build_system.cxxflags = ['-O3', '-S']
self.build_system.fflags = ['-O3', '-S']
self.sanity_patterns = sn.all([
sn.assert_found(r'vfmadd', 'vectorize_fma_c.s'),
sn.assert_found(r'vfmadd', 'vectorize_fma_cplusplus.s'),
sn.assert_found(r'vfmadd', 'vectorize_fma_ftn.s'),
sn.assert_not_found('warning|WARNING', self.stderr)
])
self.maintainers = ['AJ', 'CB']
self.tags = {'production', 'craype'}
@rfm.run_before('compile')
def setflags(self):
if self.current_system.name == 'kesch':
if self.current_environ.name.startswith('PrgEnv-cray'):
# Ignore CPATH warning
self.build_system.cflags += ['-h nomessage=1254']
self.build_system.cxxflags += ['-h nomessage=1254']
else:
self.build_system.cflags += ['-march=native']
self.build_system.cxxflags += ['-march=native']
self.build_system.fflags += ['-march=native']
else:
if self.current_environ.name == 'PrgEnv-cray':
self.build_system.cflags = ['-Ofast', '-S']
self.build_system.cxxflags = ['-Ofast', '-S']
|
en
| 0.60807
|
# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause # Ignore CPATH warning
| 1.8206
| 2
|
bin/run.py
|
alexdy2007/SussedAddTimeTableToGoogleCalander
| 0
|
6626208
|
from getClasses import getClasses
from addToCalender import addTocalendar
def run():
print("Starting")
getNumberOfWeekInAdvance = 2
try:
timeTableClasses = getClasses(getNumberOfWeekInAdvance)
except Exception as e:
print(format("A error has occured in retriving classes : {}", str(e)))
try:
addTocalendar(timeTableClasses)
except Exception as e:
print(format("A error has occured in adding to calendar: {}", str(e)))
print("Finished")
if __name__ == "__main__":
run()
|
from getClasses import getClasses
from addToCalender import addTocalendar
def run():
print("Starting")
getNumberOfWeekInAdvance = 2
try:
timeTableClasses = getClasses(getNumberOfWeekInAdvance)
except Exception as e:
print(format("A error has occured in retriving classes : {}", str(e)))
try:
addTocalendar(timeTableClasses)
except Exception as e:
print(format("A error has occured in adding to calendar: {}", str(e)))
print("Finished")
if __name__ == "__main__":
run()
|
none
| 1
| 3.13325
| 3
|
|
generate_all.py
|
fyviezhao/dressing-in-order
| 172
|
6626209
|
"""General-purpose training script for image-to-image translation.
This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
It first creates model, dataset, and visualizer given the option.
It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
The script supports continue/resume training. Use '--continue_train' to resume your previous training.
Example:
Train a CycleGAN model:
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Train a pix2pix model:
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/train_options.py for more training options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import time
from options.test_options import TestOptions
from datasets import create_dataset
from models import create_model
import os, torch, shutil
from tqdm import tqdm
if __name__ == '__main__':
opt = TestOptions().parse() # get training options
if opt.square:
opt.crop_size = (opt.crop_size, opt.crop_size)
else:
opt.crop_size = (opt.crop_size, max(1,int(opt.crop_size*1.0/256*176)))
print("crop_size:", opt.crop_size)
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
print('The number of training images = %d' % dataset_size)
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
total_iters = 0 # the total number of training iterations
model.eval()
generate_out_dir = os.path.join(opt.eval_output_dir + "_%s"%opt.epoch)
print("generate images at %s" % generate_out_dir)
os.mkdir(generate_out_dir)
model.isTrain = False
# generate
count = 0
for i, data in tqdm(enumerate(dataset), "generating for test split"): # inner loop within one epoch
with torch.no_grad():
model.set_input(data) # unpack data from dataset and apply preprocessing
model.forward()
count = model.save_batch(generate_out_dir, count)
|
"""General-purpose training script for image-to-image translation.
This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
It first creates model, dataset, and visualizer given the option.
It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
The script supports continue/resume training. Use '--continue_train' to resume your previous training.
Example:
Train a CycleGAN model:
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Train a pix2pix model:
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/train_options.py for more training options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import time
from options.test_options import TestOptions
from datasets import create_dataset
from models import create_model
import os, torch, shutil
from tqdm import tqdm
if __name__ == '__main__':
opt = TestOptions().parse() # get training options
if opt.square:
opt.crop_size = (opt.crop_size, opt.crop_size)
else:
opt.crop_size = (opt.crop_size, max(1,int(opt.crop_size*1.0/256*176)))
print("crop_size:", opt.crop_size)
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
print('The number of training images = %d' % dataset_size)
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
total_iters = 0 # the total number of training iterations
model.eval()
generate_out_dir = os.path.join(opt.eval_output_dir + "_%s"%opt.epoch)
print("generate images at %s" % generate_out_dir)
os.mkdir(generate_out_dir)
model.isTrain = False
# generate
count = 0
for i, data in tqdm(enumerate(dataset), "generating for test split"): # inner loop within one epoch
with torch.no_grad():
model.set_input(data) # unpack data from dataset and apply preprocessing
model.forward()
count = model.save_batch(generate_out_dir, count)
|
en
| 0.619603
|
General-purpose training script for image-to-image translation. This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization). You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model'). It first creates model, dataset, and visualizer given the option. It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models. The script supports continue/resume training. Use '--continue_train' to resume your previous training. Example: Train a CycleGAN model: python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan Train a pix2pix model: python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA See options/base_options.py and options/train_options.py for more training options. See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md # get training options # create a dataset given opt.dataset_mode and other options # get the number of images in the dataset. # create a model given opt.model and other options # regular setup: load and print networks; create schedulers # the total number of training iterations # generate # inner loop within one epoch # unpack data from dataset and apply preprocessing
| 2.820544
| 3
|
joystick/pubsub_stick.py
|
quintest/Cloud-IoT-Core-Kit-Examples
| 66
|
6626210
|
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import json
import time
import ssl
import jwt
import paho.mqtt.client as mqtt
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
from subprocess import call
# Software SPI configuration:
CLK = 12
MISO = 23
MOSI = 24
CS = 25
mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
servoMin = 50
servoMax = 250
servoSteps = servoMax - servoMin
stickSensitivity = 5 # the lower the number the more sensitive we are to stick changes that transmit a message
stickToServoPositionRatio = 1024/float(servoSteps) # assume 10bit ADC
#Servo settings
# pwmGPIO = "18"
# pwmClock = "192"
# pwmRange = "2000"
# Update and publish readings at a rate of SENSOR_POLL per second.
SENSOR_POLL=0
SENSOR_DEBOUNCE=0.1
def create_jwt(project_id, private_key_file, algorithm):
"""Create a JWT (https://jwt.io) to establish an MQTT connection."""
token = {
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
'aud': project_id
}
with open(private_key_file, 'r') as f:
private_key = f.read()
print 'Creating JWT using {} from private key file {}'.format(
algorithm, private_key_file)
return jwt.encode(token, private_key, algorithm=algorithm)
def error_str(rc):
"""Convert a Paho error to a human readable string."""
return '{}: {}'.format(rc, mqtt.error_string(rc))
class Device(object):
"""Represents the state of a single device."""
def __init__(self):
#self.leftright = 512
#self.updown = 512
self.servoStep = 150
self.connected = False
# self.acked = True
def update_sensor_data(self):
leftRightServoStep = self.servoStep
#self.leftright = mcp.read_adc(0)
#self.updown = mcp.read_adc(1)
# while self.acked == False:
# pass
# print "."
#self.acked = False
#print 'leftRightServoStep', leftRightServoStep
#poll until the stick moves
while leftRightServoStep == self.servoStep:
leftRightServoStepPreDeb = mcp.read_adc(0)/stickToServoPositionRatio
time.sleep(SENSOR_DEBOUNCE)
leftRightServoStepPostDeb = mcp.read_adc(0)/stickToServoPositionRatio
if leftRightServoStepPreDeb == leftRightServoStepPostDeb:
leftRightServoStep = int(leftRightServoStepPreDeb/stickSensitivity)*stickSensitivity
leftRightServoStep = leftRightServoStep + servoMin
#print 'leftRightServoStep', leftRightServoStep
self.servoStep = leftRightServoStep
def wait_for_connection(self, timeout):
"""Wait for the device to become connected."""
total_time = 0
while not self.connected and total_time < timeout:
time.sleep(1)
total_time += 1
if not self.connected:
raise RuntimeError('Could not connect to MQTT bridge.')
def on_connect(self, unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
print 'Connection Result:', error_str(rc)
self.connected = True
def on_disconnect(self, unused_client, unused_userdata, rc):
"""Callback for when a device disconnects."""
print 'Disconnected:', error_str(rc)
self.connected = False
def on_publish(self, unused_client, unused_userdata, unused_mid):
"""Callback when the device receives a PUBACK from the MQTT bridge."""
# self.acked = True
print 'Published message acked.'
def parse_command_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Example Google Cloud IoT MQTT device connection code.')
parser.add_argument(
'--project_id', required=True, help='GCP cloud project name')
parser.add_argument(
'--registry_id', required=True, help='Cloud IoT registry id')
parser.add_argument('--device_id', required=True, help='Cloud IoT device id')
parser.add_argument(
'--private_key_file', required=True, help='Path to private key file.')
parser.add_argument(
'--algorithm',
choices=('RS256', 'ES256'),
required=True,
help='Which encryption algorithm to use to generate the JWT.')
parser.add_argument(
'--cloud_region', default='us-central1', help='GCP cloud region')
parser.add_argument(
'--ca_certs',
default='roots.pem',
help='CA root certificate. Get from https://pki.google.com/roots.pem')
parser.add_argument(
'--num_messages',
type=int,
default=100,
help='Number of messages to publish.')
parser.add_argument(
'--mqtt_bridge_hostname',
default='mqtt.googleapis.com',
help='MQTT bridge hostname.')
parser.add_argument(
'--mqtt_bridge_port', default=8883, help='MQTT bridge port.')
return parser.parse_args()
def main():
args = parse_command_line_args()
# print "stickToServoPositionRatio", stickToServoPositionRatio
#setup PWM for servo
# err = call(["gpio", "-g", "mode", pwmGPIO, "pwm"])
# err |= call(["gpio", "pwm-ms"])
# err |= call(["gpio", "pwmc", pwmClock])
# err |= call(["gpio", "pwmr", pwmRange])
# if err != 0:
# print "gpio setup error:", err
# quit()
# Create our MQTT client and connect to Cloud IoT.
client = mqtt.Client(
client_id='projects/{}/locations/{}/registries/{}/devices/{}'.format(
args.project_id, args.cloud_region, args.registry_id, args.device_id))
client.username_pw_set(
username='unused',
password=create_jwt(args.project_id, args.private_key_file,
args.algorithm))
client.tls_set(ca_certs=args.ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
device = Device()
client.on_connect = device.on_connect
client.on_publish = device.on_publish
client.on_disconnect = device.on_disconnect
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
client.loop_start()
# This is the topic that the device will publish telemetry events to.
mqtt_telemetry_topic = '/devices/{}/events'.format(args.device_id)
# This is the topic that the device will receive configuration updates on.
mqtt_config_topic = '/devices/{}/config'.format(args.device_id)
# Wait up to 5 seconds for the device to connect.
device.wait_for_connection(5)
# Subscribe to the config topic.
client.subscribe(mqtt_config_topic, qos=1)
# Update and publish stick position readings at a rate of one per SENSOR_POLL but poll the sensor for "stickSensitivity" changes.
for _ in range(args.num_messages):
# In an actual device, this would read the device's sensors.
device.update_sensor_data()
# Report the joystick's position to the server, by serializing it as a JSON
# string.
payload = json.dumps({'servoStep': device.servoStep})
print 'Publishing payload', payload
client.publish(mqtt_telemetry_topic, payload, qos=1)
time.sleep(SENSOR_POLL)
client.disconnect()
client.loop_stop()
print 'Finished loop successfully. Goodbye!'
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import json
import time
import ssl
import jwt
import paho.mqtt.client as mqtt
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
from subprocess import call
# Software SPI configuration:
CLK = 12
MISO = 23
MOSI = 24
CS = 25
mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
servoMin = 50
servoMax = 250
servoSteps = servoMax - servoMin
stickSensitivity = 5 # the lower the number the more sensitive we are to stick changes that transmit a message
stickToServoPositionRatio = 1024/float(servoSteps) # assume 10bit ADC
#Servo settings
# pwmGPIO = "18"
# pwmClock = "192"
# pwmRange = "2000"
# Update and publish readings at a rate of SENSOR_POLL per second.
SENSOR_POLL=0
SENSOR_DEBOUNCE=0.1
def create_jwt(project_id, private_key_file, algorithm):
"""Create a JWT (https://jwt.io) to establish an MQTT connection."""
token = {
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
'aud': project_id
}
with open(private_key_file, 'r') as f:
private_key = f.read()
print 'Creating JWT using {} from private key file {}'.format(
algorithm, private_key_file)
return jwt.encode(token, private_key, algorithm=algorithm)
def error_str(rc):
"""Convert a Paho error to a human readable string."""
return '{}: {}'.format(rc, mqtt.error_string(rc))
class Device(object):
"""Represents the state of a single device."""
def __init__(self):
#self.leftright = 512
#self.updown = 512
self.servoStep = 150
self.connected = False
# self.acked = True
def update_sensor_data(self):
leftRightServoStep = self.servoStep
#self.leftright = mcp.read_adc(0)
#self.updown = mcp.read_adc(1)
# while self.acked == False:
# pass
# print "."
#self.acked = False
#print 'leftRightServoStep', leftRightServoStep
#poll until the stick moves
while leftRightServoStep == self.servoStep:
leftRightServoStepPreDeb = mcp.read_adc(0)/stickToServoPositionRatio
time.sleep(SENSOR_DEBOUNCE)
leftRightServoStepPostDeb = mcp.read_adc(0)/stickToServoPositionRatio
if leftRightServoStepPreDeb == leftRightServoStepPostDeb:
leftRightServoStep = int(leftRightServoStepPreDeb/stickSensitivity)*stickSensitivity
leftRightServoStep = leftRightServoStep + servoMin
#print 'leftRightServoStep', leftRightServoStep
self.servoStep = leftRightServoStep
def wait_for_connection(self, timeout):
"""Wait for the device to become connected."""
total_time = 0
while not self.connected and total_time < timeout:
time.sleep(1)
total_time += 1
if not self.connected:
raise RuntimeError('Could not connect to MQTT bridge.')
def on_connect(self, unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
print 'Connection Result:', error_str(rc)
self.connected = True
def on_disconnect(self, unused_client, unused_userdata, rc):
"""Callback for when a device disconnects."""
print 'Disconnected:', error_str(rc)
self.connected = False
def on_publish(self, unused_client, unused_userdata, unused_mid):
"""Callback when the device receives a PUBACK from the MQTT bridge."""
# self.acked = True
print 'Published message acked.'
def parse_command_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Example Google Cloud IoT MQTT device connection code.')
parser.add_argument(
'--project_id', required=True, help='GCP cloud project name')
parser.add_argument(
'--registry_id', required=True, help='Cloud IoT registry id')
parser.add_argument('--device_id', required=True, help='Cloud IoT device id')
parser.add_argument(
'--private_key_file', required=True, help='Path to private key file.')
parser.add_argument(
'--algorithm',
choices=('RS256', 'ES256'),
required=True,
help='Which encryption algorithm to use to generate the JWT.')
parser.add_argument(
'--cloud_region', default='us-central1', help='GCP cloud region')
parser.add_argument(
'--ca_certs',
default='roots.pem',
help='CA root certificate. Get from https://pki.google.com/roots.pem')
parser.add_argument(
'--num_messages',
type=int,
default=100,
help='Number of messages to publish.')
parser.add_argument(
'--mqtt_bridge_hostname',
default='mqtt.googleapis.com',
help='MQTT bridge hostname.')
parser.add_argument(
'--mqtt_bridge_port', default=8883, help='MQTT bridge port.')
return parser.parse_args()
def main():
args = parse_command_line_args()
# print "stickToServoPositionRatio", stickToServoPositionRatio
#setup PWM for servo
# err = call(["gpio", "-g", "mode", pwmGPIO, "pwm"])
# err |= call(["gpio", "pwm-ms"])
# err |= call(["gpio", "pwmc", pwmClock])
# err |= call(["gpio", "pwmr", pwmRange])
# if err != 0:
# print "gpio setup error:", err
# quit()
# Create our MQTT client and connect to Cloud IoT.
client = mqtt.Client(
client_id='projects/{}/locations/{}/registries/{}/devices/{}'.format(
args.project_id, args.cloud_region, args.registry_id, args.device_id))
client.username_pw_set(
username='unused',
password=create_jwt(args.project_id, args.private_key_file,
args.algorithm))
client.tls_set(ca_certs=args.ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
device = Device()
client.on_connect = device.on_connect
client.on_publish = device.on_publish
client.on_disconnect = device.on_disconnect
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
client.loop_start()
# This is the topic that the device will publish telemetry events to.
mqtt_telemetry_topic = '/devices/{}/events'.format(args.device_id)
# This is the topic that the device will receive configuration updates on.
mqtt_config_topic = '/devices/{}/config'.format(args.device_id)
# Wait up to 5 seconds for the device to connect.
device.wait_for_connection(5)
# Subscribe to the config topic.
client.subscribe(mqtt_config_topic, qos=1)
# Update and publish stick position readings at a rate of one per SENSOR_POLL but poll the sensor for "stickSensitivity" changes.
for _ in range(args.num_messages):
# In an actual device, this would read the device's sensors.
device.update_sensor_data()
# Report the joystick's position to the server, by serializing it as a JSON
# string.
payload = json.dumps({'servoStep': device.servoStep})
print 'Publishing payload', payload
client.publish(mqtt_telemetry_topic, payload, qos=1)
time.sleep(SENSOR_POLL)
client.disconnect()
client.loop_stop()
print 'Finished loop successfully. Goodbye!'
if __name__ == '__main__':
main()
|
en
| 0.779804
|
#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Import SPI library (for hardware SPI) and MCP3008 library. # Software SPI configuration: # the lower the number the more sensitive we are to stick changes that transmit a message # assume 10bit ADC #Servo settings # pwmGPIO = "18" # pwmClock = "192" # pwmRange = "2000" # Update and publish readings at a rate of SENSOR_POLL per second. Create a JWT (https://jwt.io) to establish an MQTT connection. Convert a Paho error to a human readable string. Represents the state of a single device. #self.leftright = 512 #self.updown = 512 # self.acked = True #self.leftright = mcp.read_adc(0) #self.updown = mcp.read_adc(1) # while self.acked == False: # pass # print "." #self.acked = False #print 'leftRightServoStep', leftRightServoStep #poll until the stick moves #print 'leftRightServoStep', leftRightServoStep Wait for the device to become connected. Callback for when a device connects. Callback for when a device disconnects. Callback when the device receives a PUBACK from the MQTT bridge. # self.acked = True Parse command line arguments. # print "stickToServoPositionRatio", stickToServoPositionRatio #setup PWM for servo # err = call(["gpio", "-g", "mode", pwmGPIO, "pwm"]) # err |= call(["gpio", "pwm-ms"]) # err |= call(["gpio", "pwmc", pwmClock]) # err |= call(["gpio", "pwmr", pwmRange]) # if err != 0: # print "gpio setup error:", err # quit() # Create our MQTT client and connect to Cloud IoT. # This is the topic that the device will publish telemetry events to. # This is the topic that the device will receive configuration updates on. # Wait up to 5 seconds for the device to connect. # Subscribe to the config topic. # Update and publish stick position readings at a rate of one per SENSOR_POLL but poll the sensor for "stickSensitivity" changes. # In an actual device, this would read the device's sensors. # Report the joystick's position to the server, by serializing it as a JSON # string.
| 2.472507
| 2
|
missing_video_finder/utils.py
|
KylianCadet/missing-video-finder
| 3
|
6626211
|
<gh_stars>1-10
from missing_video_finder.exception import *
import os
import sys
def filter_deleted_videos(videos):
return [video for video in videos if video['status']['privacyStatus'] != 'public']
def get_id_from_videos(videos):
return [video['contentDetails']['videoId'] for video in videos]
def exec_api(fn, *args):
try:
data = fn(*args)
return data
except NotAuthenticated:
return {'error': 'not auth'}
except APIError:
return {'error': 'api error'}
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
|
from missing_video_finder.exception import *
import os
import sys
def filter_deleted_videos(videos):
return [video for video in videos if video['status']['privacyStatus'] != 'public']
def get_id_from_videos(videos):
return [video['contentDetails']['videoId'] for video in videos]
def exec_api(fn, *args):
try:
data = fn(*args)
return data
except NotAuthenticated:
return {'error': 'not auth'}
except APIError:
return {'error': 'api error'}
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
|
none
| 1
| 2.411191
| 2
|
|
setup.py
|
KarimHShawky/tespy
| 1
|
6626212
|
<reponame>KarimHShawky/tespy
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
setup(
name='TESPy',
version='0.6.0',
license='MIT',
description='Thermal Engineering Systems in Python (TESPy)',
long_description='%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub(
'', read('README.rst')
)
),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/oemof/tespy',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
data_files=[('src/tespy/data', [
'src/tespy/data/char_lines.json', 'src/tespy/data/char_maps.json'])],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
],
project_urls={
'Documentation': 'https://tespy.readthedocs.io/',
'Changelog': 'https://tespy.readthedocs.io/en/main/whats_new.html',
'Issue Tracker': 'https://github.com/oemof/tespy/issues',
},
python_requires='>=3.7, <3.9',
install_requires=[
'CoolProp>=6.4,<7',
'matplotlib>=3.2.1,<4',
'numpy>=1.13.3,<2',
'pandas>=1.3.0,<2',
'tabulate>=0.8.2,<0.9'
],
extras_require={
'dev': ['pytest', 'sphinx', 'sphinx_rtd_theme',
'sphinxcontrib.bibtex', 'tox', ],
'dummy': ['tespy']}
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
setup(
name='TESPy',
version='0.6.0',
license='MIT',
description='Thermal Engineering Systems in Python (TESPy)',
long_description='%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub(
'', read('README.rst')
)
),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/oemof/tespy',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
data_files=[('src/tespy/data', [
'src/tespy/data/char_lines.json', 'src/tespy/data/char_maps.json'])],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
],
project_urls={
'Documentation': 'https://tespy.readthedocs.io/',
'Changelog': 'https://tespy.readthedocs.io/en/main/whats_new.html',
'Issue Tracker': 'https://github.com/oemof/tespy/issues',
},
python_requires='>=3.7, <3.9',
install_requires=[
'CoolProp>=6.4,<7',
'matplotlib>=3.2.1,<4',
'numpy>=1.13.3,<2',
'pandas>=1.3.0,<2',
'tabulate>=0.8.2,<0.9'
],
extras_require={
'dev': ['pytest', 'sphinx', 'sphinx_rtd_theme',
'sphinxcontrib.bibtex', 'tox', ],
'dummy': ['tespy']}
)
|
en
| 0.367975
|
#!/usr/bin/env python # -*- encoding: utf-8 -*-
| 1.830077
| 2
|
atmosForcing.py
|
jvmcgovern/model2roms_MI
| 0
|
6626213
|
<gh_stars>0
import sys
from datetime import datetime
import extrapolate as ex
import numpy as np
from netCDF4 import Dataset, num2date
import IOatmos
import grd
try:
import ESMF
# import esmf
except ImportError:
print("Could not find module ESMF. Required")
sys.exit()
_author_ = '<NAME>'
_email_ = '<EMAIL>'
_created_ = datetime(2014, 12, 16)
_modified_ = datetime(2014, 12, 16)
_version_ = "0.2.0"
_status_ = "Development"
def help():
"""
This function creates atmospheric forcing files for ROMS
def createAtmosFileUV(grdROMS, outfilename, output_format)
To check the file for CF compliancy: http://titania.badc.rl.ac.uk/cgi-bin/cf-checker.pl?cfversion=1.0
"""
def laplaceFilter(field, threshold, toxi, toeta):
undef = 2.0e+35
tx = 0.9 * undef
critx = 0.01
cor = 1.6
mxs = 10
field = np.where(abs(field) > threshold, undef, field)
field = ex.extrapolate.fill(int(1), int(toxi),
int(1), int(toeta),
float(tx), float(critx), float(cor), float(mxs),
np.asarray(field, order='F'),
int(toxi),
int(toeta))
return field
def getERA5Filename(confM2R):
return confM2R.atmospheric_forcing_path + ''
def createAtmosFileUV(confM2R):
if confM2R.show_progress is True:
import progressbar
progress = progressbar.ProgressBar(widgets=[progressbar.Percentage(), progressbar.Bar()],
maxval=len(years)).start()
# Create the objects for source and destination grids
getERA5_1DAYfilename
grdMODEL = grd.grdClass(nor, mytype, mytype, useESMF, 'atmos')
# Create the outputfile
outfilename = abbreviation + '_windUV_' + str(mytype) + '_' + str(startdate.year) + '_to_' + str(
enddate.year) + '.nc'
IOatmos.createNetCDFFileUV(grdROMS, outfilename, myformat, mytype)
# Setup ESMF for interpolation (calculates weights)
print(" -> regridSrc2Dst at RHO points")
grdMODEL.fieldSrc = ESMF.Field(grdMODEL.esmfgrid, "fieldSrc", staggerloc=ESMF.StaggerLoc.CENTER)
grdMODEL.fieldDst_rho = ESMF.Field(grdROMS.esmfgrid, "fieldDst", staggerloc=ESMF.StaggerLoc.CENTER)
grdMODEL.regridSrc2Dst_rho = ESMF.Regrid(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho,
regrid_method=ESMF.RegridMethod.BILINEAR)
# grdMODEL.fieldSrc = esmf.Field(grdMODEL.esmfgrid, "fieldSrc", staggerloc=esmf.StaggerLoc.CENTER)
# grdMODEL.fieldDst_rho = esmf.Field(grdROMS.esmfgrid, "fieldDst", staggerloc=esmf.StaggerLoc.CENTER)
# grdMODEL.regridSrc2Dst_rho = esmf.Regrid(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho,
# regrid_method=esmf.RegridMethod.BILINEAR)
# Loop over each year and do the interpolations and write to file
year = 2050
month = 1
day = 1
if mytype == "NORESM":
filename = getNORESMfilename(year, month, day, "TAUX", atmospath)
cdf = Dataset(filename, "r")
U10 = cdf.variables["U10"][:]
TAUX = -(cdf.variables["TAUX"][:])
TAUY = -(cdf.variables["TAUY"][:])
magstr = np.sqrt(TAUX * TAUX + TAUY * TAUY)
magstr = np.where(magstr < 1.e-8, 1.e-8, magstr)
windE = (TAUX / magstr) * U10
windN = (TAUY / magstr) * U10
time_in = cdf.variables["time"][:]
time_calendar = cdf.variables['time'].calendar
time_units = cdf.variables['time'].units
scru = np.zeros((len(time_in), np.shape(grdROMS.lat_rho)[0], np.shape(grdROMS.lat_rho)[1]))
scrv = np.zeros((len(time_in), np.shape(grdROMS.lat_rho)[0], np.shape(grdROMS.lat_rho)[1]))
# Loop over each time-step in current file
for t in range(len(time_in)):
currentdate = num2date(time_in[t], units=time_units, calendar=time_calendar)
print("Interpolating date: ", currentdate)
# Eastward wind
grdMODEL.fieldSrc[:, :] = np.flipud(np.rot90(np.squeeze(windE[t, :, :])))
fieldE = grdMODEL.regridSrc2Dst_rho(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho)
# Since ESMF uses coordinates (x,y) we need to rotate and flip to get back to (y,x) order.
fieldE = np.fliplr(np.rot90(fieldE.data, 3))
fieldE = laplaceFilter(fieldE, 1000, grdROMS.xi_rho, grdROMS.eta_rho)
fieldE = fieldE * grdROMS.mask_rho
# Northward wind
grdMODEL.fieldSrc[:, :] = np.flipud(np.rot90(np.squeeze(windN[t, :, :])))
fieldN = grdMODEL.regridSrc2Dst_rho(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho)
fieldN = np.fliplr(np.rot90(fieldN.data, 3))
fieldN = laplaceFilter(fieldN, 1000, grdROMS.xi_rho, grdROMS.eta_rho)
fieldN = fieldN * grdROMS.mask_rho
# Magnitude
grdMODEL.fieldSrc[:, :] = np.flipud(np.rot90(np.squeeze(magstr[t, :, :])))
magnitude = grdMODEL.regridSrc2Dst_rho(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho)
magnitude = np.fliplr(np.rot90(magnitude.data, 3))
magnitude = laplaceFilter(magnitude, 1000, grdROMS.xi_rho, grdROMS.eta_rho)
magnitude = magnitude * grdROMS.mask_rho
import plotAtmos
print("Interpolated range: ", np.min(magnitude), np.max(magnitude))
print("Original range: ", np.min(magstr), np.max(magstr))
grdROMS.time += 1
print(np.shape(windE), np.shape(grdMODEL.lon), np.shape(grdMODEL.lat))
plotAtmos.contourMap(grdROMS, grdROMS.lon_rho, grdROMS.lat_rho, fieldE, fieldN, magnitude,
'wind', 'REGSCEN', currentdate)
plotAtmos.contourMap(grdMODEL,
grdMODEL.lon,
grdMODEL.lat,
np.squeeze(windE[t, :, :]),
np.squeeze(windN[t, :, :]),
np.squeeze(magstr[t, :, :]),
'wind', 'NORESM', currentdate)
# Rotate to ROMS grid structure
scru[t, :, :] = (fieldE * np.cos(grdROMS.angle)) + (fieldN * np.sin(grdROMS.angle))
scrv[t, :, :] = (fieldN * np.cos(grdROMS.angle)) - (fieldE * np.sin(grdROMS.angle))
|
import sys
from datetime import datetime
import extrapolate as ex
import numpy as np
from netCDF4 import Dataset, num2date
import IOatmos
import grd
try:
import ESMF
# import esmf
except ImportError:
print("Could not find module ESMF. Required")
sys.exit()
_author_ = '<NAME>'
_email_ = '<EMAIL>'
_created_ = datetime(2014, 12, 16)
_modified_ = datetime(2014, 12, 16)
_version_ = "0.2.0"
_status_ = "Development"
def help():
"""
This function creates atmospheric forcing files for ROMS
def createAtmosFileUV(grdROMS, outfilename, output_format)
To check the file for CF compliancy: http://titania.badc.rl.ac.uk/cgi-bin/cf-checker.pl?cfversion=1.0
"""
def laplaceFilter(field, threshold, toxi, toeta):
undef = 2.0e+35
tx = 0.9 * undef
critx = 0.01
cor = 1.6
mxs = 10
field = np.where(abs(field) > threshold, undef, field)
field = ex.extrapolate.fill(int(1), int(toxi),
int(1), int(toeta),
float(tx), float(critx), float(cor), float(mxs),
np.asarray(field, order='F'),
int(toxi),
int(toeta))
return field
def getERA5Filename(confM2R):
return confM2R.atmospheric_forcing_path + ''
def createAtmosFileUV(confM2R):
if confM2R.show_progress is True:
import progressbar
progress = progressbar.ProgressBar(widgets=[progressbar.Percentage(), progressbar.Bar()],
maxval=len(years)).start()
# Create the objects for source and destination grids
getERA5_1DAYfilename
grdMODEL = grd.grdClass(nor, mytype, mytype, useESMF, 'atmos')
# Create the outputfile
outfilename = abbreviation + '_windUV_' + str(mytype) + '_' + str(startdate.year) + '_to_' + str(
enddate.year) + '.nc'
IOatmos.createNetCDFFileUV(grdROMS, outfilename, myformat, mytype)
# Setup ESMF for interpolation (calculates weights)
print(" -> regridSrc2Dst at RHO points")
grdMODEL.fieldSrc = ESMF.Field(grdMODEL.esmfgrid, "fieldSrc", staggerloc=ESMF.StaggerLoc.CENTER)
grdMODEL.fieldDst_rho = ESMF.Field(grdROMS.esmfgrid, "fieldDst", staggerloc=ESMF.StaggerLoc.CENTER)
grdMODEL.regridSrc2Dst_rho = ESMF.Regrid(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho,
regrid_method=ESMF.RegridMethod.BILINEAR)
# grdMODEL.fieldSrc = esmf.Field(grdMODEL.esmfgrid, "fieldSrc", staggerloc=esmf.StaggerLoc.CENTER)
# grdMODEL.fieldDst_rho = esmf.Field(grdROMS.esmfgrid, "fieldDst", staggerloc=esmf.StaggerLoc.CENTER)
# grdMODEL.regridSrc2Dst_rho = esmf.Regrid(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho,
# regrid_method=esmf.RegridMethod.BILINEAR)
# Loop over each year and do the interpolations and write to file
year = 2050
month = 1
day = 1
if mytype == "NORESM":
filename = getNORESMfilename(year, month, day, "TAUX", atmospath)
cdf = Dataset(filename, "r")
U10 = cdf.variables["U10"][:]
TAUX = -(cdf.variables["TAUX"][:])
TAUY = -(cdf.variables["TAUY"][:])
magstr = np.sqrt(TAUX * TAUX + TAUY * TAUY)
magstr = np.where(magstr < 1.e-8, 1.e-8, magstr)
windE = (TAUX / magstr) * U10
windN = (TAUY / magstr) * U10
time_in = cdf.variables["time"][:]
time_calendar = cdf.variables['time'].calendar
time_units = cdf.variables['time'].units
scru = np.zeros((len(time_in), np.shape(grdROMS.lat_rho)[0], np.shape(grdROMS.lat_rho)[1]))
scrv = np.zeros((len(time_in), np.shape(grdROMS.lat_rho)[0], np.shape(grdROMS.lat_rho)[1]))
# Loop over each time-step in current file
for t in range(len(time_in)):
currentdate = num2date(time_in[t], units=time_units, calendar=time_calendar)
print("Interpolating date: ", currentdate)
# Eastward wind
grdMODEL.fieldSrc[:, :] = np.flipud(np.rot90(np.squeeze(windE[t, :, :])))
fieldE = grdMODEL.regridSrc2Dst_rho(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho)
# Since ESMF uses coordinates (x,y) we need to rotate and flip to get back to (y,x) order.
fieldE = np.fliplr(np.rot90(fieldE.data, 3))
fieldE = laplaceFilter(fieldE, 1000, grdROMS.xi_rho, grdROMS.eta_rho)
fieldE = fieldE * grdROMS.mask_rho
# Northward wind
grdMODEL.fieldSrc[:, :] = np.flipud(np.rot90(np.squeeze(windN[t, :, :])))
fieldN = grdMODEL.regridSrc2Dst_rho(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho)
fieldN = np.fliplr(np.rot90(fieldN.data, 3))
fieldN = laplaceFilter(fieldN, 1000, grdROMS.xi_rho, grdROMS.eta_rho)
fieldN = fieldN * grdROMS.mask_rho
# Magnitude
grdMODEL.fieldSrc[:, :] = np.flipud(np.rot90(np.squeeze(magstr[t, :, :])))
magnitude = grdMODEL.regridSrc2Dst_rho(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho)
magnitude = np.fliplr(np.rot90(magnitude.data, 3))
magnitude = laplaceFilter(magnitude, 1000, grdROMS.xi_rho, grdROMS.eta_rho)
magnitude = magnitude * grdROMS.mask_rho
import plotAtmos
print("Interpolated range: ", np.min(magnitude), np.max(magnitude))
print("Original range: ", np.min(magstr), np.max(magstr))
grdROMS.time += 1
print(np.shape(windE), np.shape(grdMODEL.lon), np.shape(grdMODEL.lat))
plotAtmos.contourMap(grdROMS, grdROMS.lon_rho, grdROMS.lat_rho, fieldE, fieldN, magnitude,
'wind', 'REGSCEN', currentdate)
plotAtmos.contourMap(grdMODEL,
grdMODEL.lon,
grdMODEL.lat,
np.squeeze(windE[t, :, :]),
np.squeeze(windN[t, :, :]),
np.squeeze(magstr[t, :, :]),
'wind', 'NORESM', currentdate)
# Rotate to ROMS grid structure
scru[t, :, :] = (fieldE * np.cos(grdROMS.angle)) + (fieldN * np.sin(grdROMS.angle))
scrv[t, :, :] = (fieldN * np.cos(grdROMS.angle)) - (fieldE * np.sin(grdROMS.angle))
|
en
| 0.584144
|
# import esmf This function creates atmospheric forcing files for ROMS def createAtmosFileUV(grdROMS, outfilename, output_format) To check the file for CF compliancy: http://titania.badc.rl.ac.uk/cgi-bin/cf-checker.pl?cfversion=1.0 # Create the objects for source and destination grids # Create the outputfile # Setup ESMF for interpolation (calculates weights) # grdMODEL.fieldSrc = esmf.Field(grdMODEL.esmfgrid, "fieldSrc", staggerloc=esmf.StaggerLoc.CENTER) # grdMODEL.fieldDst_rho = esmf.Field(grdROMS.esmfgrid, "fieldDst", staggerloc=esmf.StaggerLoc.CENTER) # grdMODEL.regridSrc2Dst_rho = esmf.Regrid(grdMODEL.fieldSrc, grdMODEL.fieldDst_rho, # regrid_method=esmf.RegridMethod.BILINEAR) # Loop over each year and do the interpolations and write to file # Loop over each time-step in current file # Eastward wind # Since ESMF uses coordinates (x,y) we need to rotate and flip to get back to (y,x) order. # Northward wind # Magnitude # Rotate to ROMS grid structure
| 2.300786
| 2
|
src/hub/dataload/sources/ensembl_metazoa/dump.py
|
mlebeur/mygene.info
| 78
|
6626214
|
import os
from ftplib import FTP
from config import DATA_ARCHIVE_ROOT, logger as logging
from biothings.utils.dataload import tab2list
from biothings.utils.common import is_int
from hub.dataload.sources.ensembl.dump import GenericBioMart, XML_QUERY_TEMPLATE
class EnsemblMetazoaBioMart(GenericBioMart):
SRC_NAME = "ensembl_metazoa"
SRC_ROOT_FOLDER = os.path.join(DATA_ARCHIVE_ROOT, SRC_NAME)
# used to get latest release number & list of available species
ENSEMBL_FTP_HOST = "ftp.ensemblgenomes.org"
MART_URL = "http://metazoa.ensembl.org/biomart/martservice"
RELEASE_FOLDER = '/pub/metazoa'
RELEASE_PREFIX = '/pub/metazoa/release-'
def get_species_file(self):
return '/pub/metazoa/release-%s/mysql/metazoa_mart_%s/dataset_names.txt.gz' % (self.release, self.release)
def get_virtual_schema(self):
return 'metazoa_mart'
|
import os
from ftplib import FTP
from config import DATA_ARCHIVE_ROOT, logger as logging
from biothings.utils.dataload import tab2list
from biothings.utils.common import is_int
from hub.dataload.sources.ensembl.dump import GenericBioMart, XML_QUERY_TEMPLATE
class EnsemblMetazoaBioMart(GenericBioMart):
SRC_NAME = "ensembl_metazoa"
SRC_ROOT_FOLDER = os.path.join(DATA_ARCHIVE_ROOT, SRC_NAME)
# used to get latest release number & list of available species
ENSEMBL_FTP_HOST = "ftp.ensemblgenomes.org"
MART_URL = "http://metazoa.ensembl.org/biomart/martservice"
RELEASE_FOLDER = '/pub/metazoa'
RELEASE_PREFIX = '/pub/metazoa/release-'
def get_species_file(self):
return '/pub/metazoa/release-%s/mysql/metazoa_mart_%s/dataset_names.txt.gz' % (self.release, self.release)
def get_virtual_schema(self):
return 'metazoa_mart'
|
en
| 0.667785
|
# used to get latest release number & list of available species
| 2.028759
| 2
|
Dependencies/gyp-master/test/win/compiler-flags/compile-as-managed.gyp
|
knight666/exlibris
| 0
|
6626215
|
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'test-compile-as-managed',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'CompileAsManaged': 'true',
'ExceptionHandling': '0' # /clr is incompatible with /EHs
}
},
'sources': ['compile-as-managed.cc'],
},
{
'target_name': 'test-compile-as-unmanaged',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'CompileAsManaged': 'false',
}
},
'sources': ['compile-as-managed.cc'],
},
]
}
|
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'test-compile-as-managed',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'CompileAsManaged': 'true',
'ExceptionHandling': '0' # /clr is incompatible with /EHs
}
},
'sources': ['compile-as-managed.cc'],
},
{
'target_name': 'test-compile-as-unmanaged',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'CompileAsManaged': 'false',
}
},
'sources': ['compile-as-managed.cc'],
},
]
}
|
en
| 0.922811
|
# Copyright (c) 2015 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # /clr is incompatible with /EHs
| 1.015707
| 1
|
lighttpd/tests/common.py
|
tanner-bruce/integrations-core
| 0
|
6626216
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from datadog_checks.dev import get_docker_hostname
HERE = os.path.dirname(os.path.abspath(__file__))
HOST = get_docker_hostname()
STATUS_URL = 'http://{}:9449/server-status'.format(HOST)
FLAVOR = os.getenv('FLAVOR', 'auth')
COMPOSE_FILE = os.path.join(HERE, 'docker', FLAVOR, 'docker-compose.yaml')
INSTANCES = {
'auth': {
'lighttpd_status_url': STATUS_URL,
'tags': ['instance:first'],
'user': 'username',
'password': 'password',
'auth_type': 'digest',
},
'noauth': {'lighttpd_status_url': STATUS_URL, 'tags': ['instance:first']},
}
INSTANCE = INSTANCES[FLAVOR]
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from datadog_checks.dev import get_docker_hostname
HERE = os.path.dirname(os.path.abspath(__file__))
HOST = get_docker_hostname()
STATUS_URL = 'http://{}:9449/server-status'.format(HOST)
FLAVOR = os.getenv('FLAVOR', 'auth')
COMPOSE_FILE = os.path.join(HERE, 'docker', FLAVOR, 'docker-compose.yaml')
INSTANCES = {
'auth': {
'lighttpd_status_url': STATUS_URL,
'tags': ['instance:first'],
'user': 'username',
'password': 'password',
'auth_type': 'digest',
},
'noauth': {'lighttpd_status_url': STATUS_URL, 'tags': ['instance:first']},
}
INSTANCE = INSTANCES[FLAVOR]
|
en
| 0.764575
|
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE)
| 1.885984
| 2
|
PyHyphen-2.0.5/setup.py
|
GraceJonn123/github-resources
| 0
|
6626217
|
<filename>PyHyphen-2.0.5/setup.py
# setup.py for the PyHyphen hyphenation package
# (c) Dr. Leo (fhaxbox66 <at> gmail >dot< com)
import sys, os, shutil, imp, py_compile, codecs, locale, platform
from string import Template
from distutils.core import setup, Extension
from warnings import warn
# URL of the default repository. It goes into config.py.
# Change this if you want to download dictionaries from somewhere else by default.
# Note that you can also specify the repository individualy
# when calling hyphen.dictools.install.
default_repo = 'http://cgit.freedesktop.org/libreoffice/dictionaries/plain/'
# Copy version-specific files
# to be copied from 2.x/
files_from_2x = {
'__init__.py' : './hyphen/',
'config.py' : './hyphen/',
'dictools.py' : './hyphen/'}
# from either 2.x/ or 3.x/
files_from_any = {
'hnjmodule.c' : 'src/',
'textwrap2.py' : './'}
#copy version-specific files
ver = sys.version[0]
py3k = (ver == '3')
if not os.path.exists('hyphen'):
os.mkdir('hyphen')
for file_name, dest in files_from_2x.items():
shutil.copy('2.x/' + file_name, dest + file_name)
for file_name, dest in files_from_any.items():
shutil.copy(ver + '.x/' + file_name, dest + file_name)
# refactor 2to3
if py3k:
import lib2to3.main
lib2to3.main.main('lib2to3.fixes', args = '--no-diffs -wn -f unicode -f urllib \
hyphen'.split())
longdescr = open('README.txt', 'r').read()
arg_dict = dict(
name = "PyHyphen",
version = "2.0.5",
author = "<NAME>",
author_email = "<EMAIL>",
url = "http://pyhyphen.googlecode.com",
description = "The hyphenation library of LibreOffice and FireFox wrapped for Python",
long_description = longdescr,
classifiers = [
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: C',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic'
],
packages = ['hyphen'],
ext_modules = [
Extension('hyphen.hnj', ['src/hnjmodule.c',
'src/hyphen.c',
'src/hnjalloc.c' ],
include_dirs = ['include'])],
py_modules = ['textwrap2'],
provides = ['hyphen', 'textwrap2']
)
# Check for a binary shipping with this distribution and use it instead of compiling
# the C sources, unless --force_build_ext is given.
if len(set(('install', 'bdist_wininst', 'bdist')) - set(sys.argv)) < 3:
if '--force_build_ext' in sys.argv:
sys.argv.remove('--force_build_ext')
else:
# construct string describing platform
if platform.system() == 'Windows':
if sys.maxsize > 2**32: platform_descr = 'amd64'
else: platform_descr = 'win32'
else: platform_descr = platform.system()
bin_file = ''.join(('bin/hnj', '.', platform_descr, '-', sys.version[:3], '.pyd'))
if os.path.exists(bin_file):
shutil.copy(bin_file, './hyphen/hnj.pyd')
arg_dict['package_data'] = {'hyphen' : ['hnj.pyd']}
arg_dict.pop('ext_modules')
print("""Found a suitable binary version of the C extension module.
This binary will be installed rather than building it from source.
However, if you prefer compiling, reenter 'python setup.py <command> --force_build_ext'.""")
setup(**arg_dict)
# clean up
shutil.rmtree('hyphen') # it would disturb the following import of hyphen
os.remove('textwrap2.py')
os.remove('src/hnjmodule.c')
# Configure the path for dictionaries in config.py
if 'install' in sys.argv:
print("Adjusting /.../hyphen/config.py... ")
# We catch ImportErrors to handle situations where the
# hyphen package has been
# installed in a directory that is not listed in
# sys.path. This occurs, e.g.,
# when creating a Debian package.
try:
pkg_path = imp.find_module('hyphen')[1]
if ver == '2':
pkg_path = pkg_path.decode(sys.getfilesystemencoding())
mod_path = os.path.join(pkg_path, 'config.py')
sep = os.path.sep
if sep != '/':
pkg_path = pkg_path.replace(sep, '/')
mod_path = mod_path.replace(sep, '/')
content = codecs.open(mod_path, 'r', 'utf8').read()
new_content = Template(content).substitute(path = pkg_path,
repo = default_repo)
# Write the new config.py
codecs.open(mod_path, 'w', 'utf8').write(new_content)
py_compile.compile(mod_path)
print("Done.")
# Delete any existing dict registry file
reg_file = pkg_path + '/hyphen_dict_info.pickle'
if os.path.exists(reg_file):
os.remove(reg_file)
# Install dictionaries
if '--no_dictionaries' not in sys.argv:
from hyphen.dictools import install
print('Installing dictionaries... en_US ...')
install('en_US')
# Install dict for local language if needed
try:
locale.setlocale(locale.LC_ALL, '')
local_lang = locale.getlocale()[0]
# Install local dict only if locale has been read (= is not None)
# and local_lang is not en_US.
if local_lang and local_lang != 'en_US':
print(local_lang + ' ')
install(local_lang)
print('Done.')
except Exception:
warn('Could not install dictionary for local language.')
except ImportError:
warn("""Could not import hyphen package.
You may wish to adjust config.py
manually or run setup.py with different options.
No dictionary has been installed.""")
|
<filename>PyHyphen-2.0.5/setup.py
# setup.py for the PyHyphen hyphenation package
# (c) Dr. Leo (fhaxbox66 <at> gmail >dot< com)
import sys, os, shutil, imp, py_compile, codecs, locale, platform
from string import Template
from distutils.core import setup, Extension
from warnings import warn
# URL of the default repository. It goes into config.py.
# Change this if you want to download dictionaries from somewhere else by default.
# Note that you can also specify the repository individualy
# when calling hyphen.dictools.install.
default_repo = 'http://cgit.freedesktop.org/libreoffice/dictionaries/plain/'
# Copy version-specific files
# to be copied from 2.x/
files_from_2x = {
'__init__.py' : './hyphen/',
'config.py' : './hyphen/',
'dictools.py' : './hyphen/'}
# from either 2.x/ or 3.x/
files_from_any = {
'hnjmodule.c' : 'src/',
'textwrap2.py' : './'}
#copy version-specific files
ver = sys.version[0]
py3k = (ver == '3')
if not os.path.exists('hyphen'):
os.mkdir('hyphen')
for file_name, dest in files_from_2x.items():
shutil.copy('2.x/' + file_name, dest + file_name)
for file_name, dest in files_from_any.items():
shutil.copy(ver + '.x/' + file_name, dest + file_name)
# refactor 2to3
if py3k:
import lib2to3.main
lib2to3.main.main('lib2to3.fixes', args = '--no-diffs -wn -f unicode -f urllib \
hyphen'.split())
longdescr = open('README.txt', 'r').read()
arg_dict = dict(
name = "PyHyphen",
version = "2.0.5",
author = "<NAME>",
author_email = "<EMAIL>",
url = "http://pyhyphen.googlecode.com",
description = "The hyphenation library of LibreOffice and FireFox wrapped for Python",
long_description = longdescr,
classifiers = [
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: C',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic'
],
packages = ['hyphen'],
ext_modules = [
Extension('hyphen.hnj', ['src/hnjmodule.c',
'src/hyphen.c',
'src/hnjalloc.c' ],
include_dirs = ['include'])],
py_modules = ['textwrap2'],
provides = ['hyphen', 'textwrap2']
)
# Check for a binary shipping with this distribution and use it instead of compiling
# the C sources, unless --force_build_ext is given.
if len(set(('install', 'bdist_wininst', 'bdist')) - set(sys.argv)) < 3:
if '--force_build_ext' in sys.argv:
sys.argv.remove('--force_build_ext')
else:
# construct string describing platform
if platform.system() == 'Windows':
if sys.maxsize > 2**32: platform_descr = 'amd64'
else: platform_descr = 'win32'
else: platform_descr = platform.system()
bin_file = ''.join(('bin/hnj', '.', platform_descr, '-', sys.version[:3], '.pyd'))
if os.path.exists(bin_file):
shutil.copy(bin_file, './hyphen/hnj.pyd')
arg_dict['package_data'] = {'hyphen' : ['hnj.pyd']}
arg_dict.pop('ext_modules')
print("""Found a suitable binary version of the C extension module.
This binary will be installed rather than building it from source.
However, if you prefer compiling, reenter 'python setup.py <command> --force_build_ext'.""")
setup(**arg_dict)
# clean up
shutil.rmtree('hyphen') # it would disturb the following import of hyphen
os.remove('textwrap2.py')
os.remove('src/hnjmodule.c')
# Configure the path for dictionaries in config.py
if 'install' in sys.argv:
print("Adjusting /.../hyphen/config.py... ")
# We catch ImportErrors to handle situations where the
# hyphen package has been
# installed in a directory that is not listed in
# sys.path. This occurs, e.g.,
# when creating a Debian package.
try:
pkg_path = imp.find_module('hyphen')[1]
if ver == '2':
pkg_path = pkg_path.decode(sys.getfilesystemencoding())
mod_path = os.path.join(pkg_path, 'config.py')
sep = os.path.sep
if sep != '/':
pkg_path = pkg_path.replace(sep, '/')
mod_path = mod_path.replace(sep, '/')
content = codecs.open(mod_path, 'r', 'utf8').read()
new_content = Template(content).substitute(path = pkg_path,
repo = default_repo)
# Write the new config.py
codecs.open(mod_path, 'w', 'utf8').write(new_content)
py_compile.compile(mod_path)
print("Done.")
# Delete any existing dict registry file
reg_file = pkg_path + '/hyphen_dict_info.pickle'
if os.path.exists(reg_file):
os.remove(reg_file)
# Install dictionaries
if '--no_dictionaries' not in sys.argv:
from hyphen.dictools import install
print('Installing dictionaries... en_US ...')
install('en_US')
# Install dict for local language if needed
try:
locale.setlocale(locale.LC_ALL, '')
local_lang = locale.getlocale()[0]
# Install local dict only if locale has been read (= is not None)
# and local_lang is not en_US.
if local_lang and local_lang != 'en_US':
print(local_lang + ' ')
install(local_lang)
print('Done.')
except Exception:
warn('Could not install dictionary for local language.')
except ImportError:
warn("""Could not import hyphen package.
You may wish to adjust config.py
manually or run setup.py with different options.
No dictionary has been installed.""")
|
en
| 0.780495
|
# setup.py for the PyHyphen hyphenation package # (c) Dr. Leo (fhaxbox66 <at> gmail >dot< com) # URL of the default repository. It goes into config.py. # Change this if you want to download dictionaries from somewhere else by default. # Note that you can also specify the repository individualy # when calling hyphen.dictools.install. # Copy version-specific files # to be copied from 2.x/ # from either 2.x/ or 3.x/ #copy version-specific files # refactor 2to3 # Check for a binary shipping with this distribution and use it instead of compiling # the C sources, unless --force_build_ext is given. # construct string describing platform Found a suitable binary version of the C extension module. This binary will be installed rather than building it from source. However, if you prefer compiling, reenter 'python setup.py <command> --force_build_ext'. # clean up # it would disturb the following import of hyphen # Configure the path for dictionaries in config.py # We catch ImportErrors to handle situations where the # hyphen package has been # installed in a directory that is not listed in # sys.path. This occurs, e.g., # when creating a Debian package. # Write the new config.py # Delete any existing dict registry file # Install dictionaries # Install dict for local language if needed # Install local dict only if locale has been read (= is not None) # and local_lang is not en_US. Could not import hyphen package. You may wish to adjust config.py manually or run setup.py with different options. No dictionary has been installed.
| 2.160836
| 2
|
simplepay/management/commands/exporttxn.py
|
sunlightlabs/django-simplepay
| 1
|
6626218
|
from django.core.management.base import BaseCommand, CommandError
from simplepay.models import Transaction
import csv
import datetime
import sys
FIELDS = ('reference_id','amazon_id','name','email','amount','date_created','date_processed','status')
def _str(o):
if o is None:
return u''
elif isinstance(o, basestring):
return o
else:
return unicode(o)
def _parse_date(s):
return datetime.datetime(int(s[:4]), int(s[4:6]), int(s[6:]))
def _endofday(dt):
return dt.replace(hour=23, minute=59, second=59)
class Command(BaseCommand):
args = '(<from_yyyymmdd> (<to_yyyymmdd>))'
help = 'Export transactions to CSV'
def handle(self, *args, **options):
writer = csv.DictWriter(sys.stdout, FIELDS)
writer.writerow(dict((f, f) for f in FIELDS))
if not args:
txns = Transaction.objects.all()
else:
start = _parse_date(args[0])
end = _endofday(_parse_date(args[1]) if len(args) > 1 else datetime.datetime.utcnow())
txns = Transaction.objects.filter(date_created__range=(start, end))
for txn in txns:
writer.writerow(dict((f, _str(getattr(txn, f, ''))) for f in FIELDS))
|
from django.core.management.base import BaseCommand, CommandError
from simplepay.models import Transaction
import csv
import datetime
import sys
FIELDS = ('reference_id','amazon_id','name','email','amount','date_created','date_processed','status')
def _str(o):
if o is None:
return u''
elif isinstance(o, basestring):
return o
else:
return unicode(o)
def _parse_date(s):
return datetime.datetime(int(s[:4]), int(s[4:6]), int(s[6:]))
def _endofday(dt):
return dt.replace(hour=23, minute=59, second=59)
class Command(BaseCommand):
args = '(<from_yyyymmdd> (<to_yyyymmdd>))'
help = 'Export transactions to CSV'
def handle(self, *args, **options):
writer = csv.DictWriter(sys.stdout, FIELDS)
writer.writerow(dict((f, f) for f in FIELDS))
if not args:
txns = Transaction.objects.all()
else:
start = _parse_date(args[0])
end = _endofday(_parse_date(args[1]) if len(args) > 1 else datetime.datetime.utcnow())
txns = Transaction.objects.filter(date_created__range=(start, end))
for txn in txns:
writer.writerow(dict((f, _str(getattr(txn, f, ''))) for f in FIELDS))
|
none
| 1
| 2.150586
| 2
|
|
test/test_contact_info_home_page.py
|
imakarenko2017/python_training
| 0
|
6626219
|
from random import randrange
import re
def test_contact_info_home_page(app):
index = randrange(len(app.contact.get_contacts_list()))
contact_from_home_page = app.contact.get_contacts_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
#compare phones
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
#compare emails
contact_from_edit_page.all_emails = "\n".join(filter(lambda x: x != "",[contact_from_edit_page.email,contact_from_edit_page.email2,contact_from_edit_page.email3]))
assert contact_from_edit_page.all_emails == contact_from_home_page.all_emails_from_home_page
#compare address
assert contact_from_home_page.address ==contact_from_edit_page.address
#compare FIO
assert contact_from_edit_page == contact_from_home_page
def clear(s):
return re.sub("[-() ]","",s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.homephone,contact.cellphone,contact.workphone,contact.homephone2]))))
|
from random import randrange
import re
def test_contact_info_home_page(app):
index = randrange(len(app.contact.get_contacts_list()))
contact_from_home_page = app.contact.get_contacts_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
#compare phones
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
#compare emails
contact_from_edit_page.all_emails = "\n".join(filter(lambda x: x != "",[contact_from_edit_page.email,contact_from_edit_page.email2,contact_from_edit_page.email3]))
assert contact_from_edit_page.all_emails == contact_from_home_page.all_emails_from_home_page
#compare address
assert contact_from_home_page.address ==contact_from_edit_page.address
#compare FIO
assert contact_from_edit_page == contact_from_home_page
def clear(s):
return re.sub("[-() ]","",s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.homephone,contact.cellphone,contact.workphone,contact.homephone2]))))
|
en
| 0.350931
|
#compare phones #compare emails #compare address #compare FIO
| 2.640799
| 3
|
keystone/test/client/test_d5_compat_calls.py
|
admiyo/keystone
| 0
|
6626220
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2 as unittest
from keystone.test.functional import common
class D5_AuthenticationTest(common.FunctionalTestCase):
""" Tests the functionality of the D5 compat module """
use_server = True
def setUp(self, *args, **kwargs):
super(D5_AuthenticationTest, self).setUp(*args, **kwargs)
password = common.<PASSWORD>()
self.tenant = self.create_tenant().json['tenant']
self.user = self.create_user(user_password=password,
tenant_id=self.tenant['id']).json['user']
self.user['password'] = password
self.services = {}
self.endpoint_templates = {}
self.services = self.fixture_create_service()
self.endpoint_templates = self.create_endpoint_template(
name=self.services['name'],
type=self.services['type']).\
json['OS-KSCATALOG:endpointTemplate']
self.create_endpoint_for_tenant(self.tenant['id'],
self.endpoint_templates['id'])
def test_validate_unscoped_token(self):
"""Admin should be able to validate a user's token"""
# Authenticate as user to get a token
self.service_token = self.post_token(as_json={
'passwordCredentials': {
'username': self.user['name'],
'password': <PASSWORD>.user['password']}}).\
json['auth']['token']['id']
# In the real world, the service user would then pass his/her token
# to some service that depends on keystone, which would then need to
# use keystone to validate the provided token.
# Admin independently validates the user token
r = self.get_token(self.service_token)
self.assertEqual(r.json['auth']['token']['id'], self.service_token)
self.assertTrue(r.json['auth']['token']['expires'])
self.assertEqual(r.json['auth']['user']['username'],
self.user['name'])
self.assertEqual(r.json['auth']['user']['roleRefs'], [])
def test_validate_scoped_token(self):
"""Admin should be able to validate a user's scoped token"""
# Authenticate as user to get a token
self.service_token = self.post_token(as_json={
'passwordCredentials': {
'tenantId': self.tenant['id'],
'username': self.user['name'],
'password': <PASSWORD>['password']}}).\
json['auth']['token']['id']
# In the real world, the service user would then pass his/her token
# to some service that depends on keystone, which would then need to
# use keystone to validate the provided token.
# Admin independently validates the user token
r = self.get_token(self.service_token)
self.assertEqual(r.json['auth']['token']['id'], self.service_token)
self.assertEqual(r.json['auth']['token']['tenantId'],
self.tenant['id'])
self.assertTrue(r.json['auth']['token']['expires'])
self.assertEqual(r.json['auth']['user']['username'],
self.user['name'])
self.assertEqual(r.json['auth']['user']['roleRefs'], [])
def test_authenticate_for_a_tenant(self):
r = self.authenticate_D5(self.user['name'], self.user['password'],
self.tenant['id'], assert_status=200)
self.assertIsNotNone(r.json['auth']['token'])
service_catalog = r.json['auth']['serviceCatalog']
self.assertIsNotNone(service_catalog)
self.check_urls_for_regular_user(service_catalog)
def test_authenticate_for_a_tenant_xml(self):
data = ('<?xml version="1.0" encoding="UTF-8"?> '
'<passwordCredentials xmlns="%s" tenantId="%s"'
' username="%s" password="%s" '
'/>') % (
self.xmlns, self.tenant['id'],
self.user['name'], self.user['password'])
r = self.post_token(as_xml=data, assert_status=200)
self.assertEquals(r.xml.tag, '{%s}auth' % self.xmlns)
service_catalog = r.xml.find('{%s}serviceCatalog' % self.xmlns)
self.check_urls_for_regular_user_xml(service_catalog)
def test_authenticate_for_a_tenant_on_admin_api(self):
r = self.authenticate_D5(self.user['name'], self.user['password'],
self.tenant['id'], assert_status=200, request_type='admin')
self.assertIsNotNone(r.json['auth']['token'])
self.assertIsNotNone(r.json['auth']['serviceCatalog'])
service_catalog = r.json['auth']['serviceCatalog']
self.check_urls_for_regular_user(service_catalog)
def test_authenticate_for_a_tenant_xml_on_admin_api(self):
data = ('<?xml version="1.0" encoding="UTF-8"?> '
'<passwordCredentials xmlns="%s" tenantId="%s"'
' username="%s" password="%s" '
'/>') % (
self.xmlns, self.tenant['id'],
self.user['name'], self.user['password'])
r = self.post_token(as_xml=data, assert_status=200,
request_type='admin')
self.assertEquals(r.xml.tag, '{%s}auth' % self.xmlns)
service_catalog = r.xml.find('{%s}serviceCatalog' % self.xmlns)
self.check_urls_for_regular_user_xml(service_catalog)
def test_authenticate_user_disabled(self):
self.disable_user(self.user['id'])
self.authenticate_D5(self.user['name'], self.user['password'],
self.tenant['id'], assert_status=403)
def test_authenticate_user_wrong(self):
data = {"passwordCredentials": {
"username-field-completely-wrong": self.user['name'],
"password": <PASSWORD>['password'],
"tenantId": self.tenant['id']}}
self.post_token(as_json=data, assert_status=400)
def test_authenticate_user_wrong_xml(self):
data = ('<?xml version="1.0" encoding="UTF-8"?> '
'<passwordCredentials '
'xmlns="http://docs.openstack.org/identity/api/v2.0" '
'usernamefieldcompletelywrong="%s" '
'password="%s" '
'tenantId="%s"/>') % (
self.user['name'], self.user['password'], self.tenant['id'])
self.post_token(as_xml=data, assert_status=400)
def check_urls_for_regular_user(self, service_catalog):
self.assertIsNotNone(service_catalog)
for k in service_catalog.keys():
endpoints = service_catalog[k]
for endpoint in endpoints:
for key in endpoint:
#Checks whether adminURL is not present.
self.assertNotEquals(key, 'adminURL')
if __name__ == '__main__':
unittest.main()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2 as unittest
from keystone.test.functional import common
class D5_AuthenticationTest(common.FunctionalTestCase):
""" Tests the functionality of the D5 compat module """
use_server = True
def setUp(self, *args, **kwargs):
super(D5_AuthenticationTest, self).setUp(*args, **kwargs)
password = common.<PASSWORD>()
self.tenant = self.create_tenant().json['tenant']
self.user = self.create_user(user_password=password,
tenant_id=self.tenant['id']).json['user']
self.user['password'] = password
self.services = {}
self.endpoint_templates = {}
self.services = self.fixture_create_service()
self.endpoint_templates = self.create_endpoint_template(
name=self.services['name'],
type=self.services['type']).\
json['OS-KSCATALOG:endpointTemplate']
self.create_endpoint_for_tenant(self.tenant['id'],
self.endpoint_templates['id'])
def test_validate_unscoped_token(self):
"""Admin should be able to validate a user's token"""
# Authenticate as user to get a token
self.service_token = self.post_token(as_json={
'passwordCredentials': {
'username': self.user['name'],
'password': <PASSWORD>.user['password']}}).\
json['auth']['token']['id']
# In the real world, the service user would then pass his/her token
# to some service that depends on keystone, which would then need to
# use keystone to validate the provided token.
# Admin independently validates the user token
r = self.get_token(self.service_token)
self.assertEqual(r.json['auth']['token']['id'], self.service_token)
self.assertTrue(r.json['auth']['token']['expires'])
self.assertEqual(r.json['auth']['user']['username'],
self.user['name'])
self.assertEqual(r.json['auth']['user']['roleRefs'], [])
def test_validate_scoped_token(self):
"""Admin should be able to validate a user's scoped token"""
# Authenticate as user to get a token
self.service_token = self.post_token(as_json={
'passwordCredentials': {
'tenantId': self.tenant['id'],
'username': self.user['name'],
'password': <PASSWORD>['password']}}).\
json['auth']['token']['id']
# In the real world, the service user would then pass his/her token
# to some service that depends on keystone, which would then need to
# use keystone to validate the provided token.
# Admin independently validates the user token
r = self.get_token(self.service_token)
self.assertEqual(r.json['auth']['token']['id'], self.service_token)
self.assertEqual(r.json['auth']['token']['tenantId'],
self.tenant['id'])
self.assertTrue(r.json['auth']['token']['expires'])
self.assertEqual(r.json['auth']['user']['username'],
self.user['name'])
self.assertEqual(r.json['auth']['user']['roleRefs'], [])
def test_authenticate_for_a_tenant(self):
r = self.authenticate_D5(self.user['name'], self.user['password'],
self.tenant['id'], assert_status=200)
self.assertIsNotNone(r.json['auth']['token'])
service_catalog = r.json['auth']['serviceCatalog']
self.assertIsNotNone(service_catalog)
self.check_urls_for_regular_user(service_catalog)
def test_authenticate_for_a_tenant_xml(self):
data = ('<?xml version="1.0" encoding="UTF-8"?> '
'<passwordCredentials xmlns="%s" tenantId="%s"'
' username="%s" password="%s" '
'/>') % (
self.xmlns, self.tenant['id'],
self.user['name'], self.user['password'])
r = self.post_token(as_xml=data, assert_status=200)
self.assertEquals(r.xml.tag, '{%s}auth' % self.xmlns)
service_catalog = r.xml.find('{%s}serviceCatalog' % self.xmlns)
self.check_urls_for_regular_user_xml(service_catalog)
def test_authenticate_for_a_tenant_on_admin_api(self):
r = self.authenticate_D5(self.user['name'], self.user['password'],
self.tenant['id'], assert_status=200, request_type='admin')
self.assertIsNotNone(r.json['auth']['token'])
self.assertIsNotNone(r.json['auth']['serviceCatalog'])
service_catalog = r.json['auth']['serviceCatalog']
self.check_urls_for_regular_user(service_catalog)
def test_authenticate_for_a_tenant_xml_on_admin_api(self):
data = ('<?xml version="1.0" encoding="UTF-8"?> '
'<passwordCredentials xmlns="%s" tenantId="%s"'
' username="%s" password="%s" '
'/>') % (
self.xmlns, self.tenant['id'],
self.user['name'], self.user['password'])
r = self.post_token(as_xml=data, assert_status=200,
request_type='admin')
self.assertEquals(r.xml.tag, '{%s}auth' % self.xmlns)
service_catalog = r.xml.find('{%s}serviceCatalog' % self.xmlns)
self.check_urls_for_regular_user_xml(service_catalog)
def test_authenticate_user_disabled(self):
self.disable_user(self.user['id'])
self.authenticate_D5(self.user['name'], self.user['password'],
self.tenant['id'], assert_status=403)
def test_authenticate_user_wrong(self):
data = {"passwordCredentials": {
"username-field-completely-wrong": self.user['name'],
"password": <PASSWORD>['password'],
"tenantId": self.tenant['id']}}
self.post_token(as_json=data, assert_status=400)
def test_authenticate_user_wrong_xml(self):
data = ('<?xml version="1.0" encoding="UTF-8"?> '
'<passwordCredentials '
'xmlns="http://docs.openstack.org/identity/api/v2.0" '
'usernamefieldcompletelywrong="%s" '
'password="%s" '
'tenantId="%s"/>') % (
self.user['name'], self.user['password'], self.tenant['id'])
self.post_token(as_xml=data, assert_status=400)
def check_urls_for_regular_user(self, service_catalog):
self.assertIsNotNone(service_catalog)
for k in service_catalog.keys():
endpoints = service_catalog[k]
for endpoint in endpoints:
for key in endpoint:
#Checks whether adminURL is not present.
self.assertNotEquals(key, 'adminURL')
if __name__ == '__main__':
unittest.main()
|
en
| 0.870789
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010-2011 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Tests the functionality of the D5 compat module Admin should be able to validate a user's token # Authenticate as user to get a token # In the real world, the service user would then pass his/her token # to some service that depends on keystone, which would then need to # use keystone to validate the provided token. # Admin independently validates the user token Admin should be able to validate a user's scoped token # Authenticate as user to get a token # In the real world, the service user would then pass his/her token # to some service that depends on keystone, which would then need to # use keystone to validate the provided token. # Admin independently validates the user token #Checks whether adminURL is not present.
| 1.913538
| 2
|
sympy/calculus/__init__.py
|
yupbank/sympy
| 1
|
6626221
|
<reponame>yupbank/sympy
"""Calculus-related methods."""
from .euler import euler_equations
from .singularities import (singularities, is_increasing,
is_strictly_increasing, is_decreasing,
is_strictly_decreasing, is_monotonic)
from .finite_diff import finite_diff_weights, apply_finite_diff, differentiate_finite
from .util import (periodicity, not_empty_in, is_convex,
stationary_points, minimum, maximum)
from .accumulationbounds import AccumBounds
__all__ = [
'euler_equations',
'singularities', 'is_increasing',
'is_strictly_increasing', 'is_decreasing',
'is_strictly_decreasing', 'is_monotonic',
'finite_diff_weights', 'apply_finite_diff', 'differentiate_finite',
'periodicity', 'not_empty_in', 'is_convex', 'stationary_points',
'minimum', 'maximum',
'AccumBounds'
]
|
"""Calculus-related methods."""
from .euler import euler_equations
from .singularities import (singularities, is_increasing,
is_strictly_increasing, is_decreasing,
is_strictly_decreasing, is_monotonic)
from .finite_diff import finite_diff_weights, apply_finite_diff, differentiate_finite
from .util import (periodicity, not_empty_in, is_convex,
stationary_points, minimum, maximum)
from .accumulationbounds import AccumBounds
__all__ = [
'euler_equations',
'singularities', 'is_increasing',
'is_strictly_increasing', 'is_decreasing',
'is_strictly_decreasing', 'is_monotonic',
'finite_diff_weights', 'apply_finite_diff', 'differentiate_finite',
'periodicity', 'not_empty_in', 'is_convex', 'stationary_points',
'minimum', 'maximum',
'AccumBounds'
]
|
en
| 0.963753
|
Calculus-related methods.
| 2.343934
| 2
|
src/models/dataset.py
|
martin-fabbri/hf-sentiment-analysis
| 0
|
6626222
|
import torch
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizerBase
from pandas import DataFrame
class BertDataset(Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item
def create_dataset(df: DataFrame, tokenizer: PreTrainedTokenizerBase):
texts = df.reviews.values.tolist()
labels = df.sentiment.values.tolist()
encondings = tokenizer(texts, truncation=True, padding=True)
ds = BertDataset(encondings, labels)
return ds
|
import torch
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizerBase
from pandas import DataFrame
class BertDataset(Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item
def create_dataset(df: DataFrame, tokenizer: PreTrainedTokenizerBase):
texts = df.reviews.values.tolist()
labels = df.sentiment.values.tolist()
encondings = tokenizer(texts, truncation=True, padding=True)
ds = BertDataset(encondings, labels)
return ds
|
none
| 1
| 2.878124
| 3
|
|
acestream/ACEStream/Core/NATFirewall/NatCheck.py
|
GrandPaRPi/p2ptv-pi
| 0
|
6626223
|
<gh_stars>0
#Embedded file name: ACEStream\Core\NATFirewall\NatCheck.pyo
import socket
import sys
DEBUG = False
def Test1(udpsock, serveraddr):
retVal = {'resp': False,
'ex_ip': None,
'ex_port': None}
BUFSIZ = 1024
reply = ''
request = 'ping1'
udpsock.sendto(request, serveraddr)
try:
reply, rcvaddr = udpsock.recvfrom(BUFSIZ)
if DEBUG:
print >> sys.stderr, 'NATCheck:Test1', 'Got reply: serveraddr', serveraddr, 'rcvaddr', rcvaddr
except socket.timeout:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Connection attempt to %s timed out' % (serveraddr,)
return retVal
except ValueError as strerror:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
except socket.error as (errno, strerror):
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
ex_ip, ex_port = reply.split(':')
retVal['resp'] = True
retVal['ex_ip'] = ex_ip
retVal['ex_port'] = ex_port
return retVal
def Test2(udpsock, serveraddr):
retVal = {'resp': False}
BUFSIZ = 1024
request = 'ping2'
udpsock.sendto(request, serveraddr)
try:
reply, rcvaddr = udpsock.recvfrom(BUFSIZ)
if DEBUG:
print >> sys.stderr, 'NATCheck:Test2', 'Got reply: serveraddr', serveraddr, 'rcvaddr', rcvaddr
except socket.timeout:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Connection attempt to %s timed out' % (serveraddr,)
return retVal
except ValueError as strerror:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
except socket.error as (errno, strerror):
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
retVal['resp'] = True
return retVal
def Test3(udpsock, serveraddr):
retVal = {'resp': False,
'ex_ip': None,
'ex_port': None}
BUFSIZ = 1024
reply = ''
request = 'ping3'
udpsock.sendto(request, serveraddr)
try:
reply, rcvaddr = udpsock.recvfrom(BUFSIZ)
if DEBUG:
print >> sys.stderr, 'NATCheck:Test3', 'Got reply: serveraddr', serveraddr, 'rcvaddr', rcvaddr
except socket.timeout:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Connection attempt to %s timed out' % (serveraddr,)
return retVal
except ValueError as strerror:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
except socket.error as (errno, strerror):
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
ex_ip, ex_port = reply.split(':')
retVal['resp'] = True
retVal['ex_ip'] = ex_ip
retVal['ex_port'] = ex_port
return retVal
def GetNATType(in_port, serveraddr1, serveraddr2):
serveraddr1 = ('stun1.tribler.org', 6701)
serveraddr2 = ('stun2.tribler.org', 6702)
nat_type, ex_ip, ex_port, in_ip = ([-1, 'Unknown'],
'0.0.0.0',
'0',
'0.0.0.0')
udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpsock.settimeout(5)
try:
udpsock.bind(('', in_port))
except socket.error as err:
print >> sys.stderr, "Couldn't bind a udp socket on port %d : %s" % (in_port, err)
return (nat_type,
ex_ip,
ex_port,
in_ip)
try:
connectaddr = ('torrentstream.net', 80)
s = socket.socket()
s.connect(connectaddr)
in_ip = s.getsockname()[0]
del s
if DEBUG:
print >> sys.stderr, 'NATCheck: getting the internal ip address by connecting to tribler.org:80', in_ip
except socket.error as err:
print >> sys.stderr, "Couldn't connect to %s:%i" % (connectaddr[0], connectaddr[1])
return (nat_type,
ex_ip,
ex_port,
in_ip)
ret = Test1(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test I reported: ' + str(ret)
if ret['resp'] == False:
nat_type[1] = 'Blocked'
else:
ex_ip = ret['ex_ip']
ex_port = ret['ex_port']
if ret['ex_ip'] == in_ip:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'No NAT'
ret = Test2(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test II reported: ' + str(ret)
if ret['resp'] == True:
nat_type[0] = 0
nat_type[1] = 'Open Internet'
else:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'There is a Firewall'
ret = Test3(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test III reported: ' + str(ret)
if ret['resp'] == True:
nat_type[0] = 2
nat_type[1] = 'Restricted Cone Firewall'
else:
nat_type[0] = 3
nat_type[1] = 'Port Restricted Cone Firewall'
else:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'There is a NAT'
ret = Test2(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test II reported: ' + str(ret)
if ret['resp'] == True:
nat_type[0] = 1
nat_type[1] = 'Full Cone NAT'
else:
ret = Test1(udpsock, serveraddr2)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test I reported: ' + str(ret)
if ex_ip == ret['ex_ip'] and ex_port == ret['ex_port']:
ret = Test3(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test III reported: ' + str(ret)
if ret['resp'] == True:
nat_type[0] = 2
nat_type[1] = 'Restricted Cone NAT'
else:
nat_type[0] = 3
nat_type[1] = 'Port Restricted Cone NAT'
else:
nat_type[0] = -1
nat_type[1] = 'Symmetric NAT'
udpsock.close()
return (nat_type,
ex_ip,
ex_port,
in_ip)
|
#Embedded file name: ACEStream\Core\NATFirewall\NatCheck.pyo
import socket
import sys
DEBUG = False
def Test1(udpsock, serveraddr):
retVal = {'resp': False,
'ex_ip': None,
'ex_port': None}
BUFSIZ = 1024
reply = ''
request = 'ping1'
udpsock.sendto(request, serveraddr)
try:
reply, rcvaddr = udpsock.recvfrom(BUFSIZ)
if DEBUG:
print >> sys.stderr, 'NATCheck:Test1', 'Got reply: serveraddr', serveraddr, 'rcvaddr', rcvaddr
except socket.timeout:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Connection attempt to %s timed out' % (serveraddr,)
return retVal
except ValueError as strerror:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
except socket.error as (errno, strerror):
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
ex_ip, ex_port = reply.split(':')
retVal['resp'] = True
retVal['ex_ip'] = ex_ip
retVal['ex_port'] = ex_port
return retVal
def Test2(udpsock, serveraddr):
retVal = {'resp': False}
BUFSIZ = 1024
request = 'ping2'
udpsock.sendto(request, serveraddr)
try:
reply, rcvaddr = udpsock.recvfrom(BUFSIZ)
if DEBUG:
print >> sys.stderr, 'NATCheck:Test2', 'Got reply: serveraddr', serveraddr, 'rcvaddr', rcvaddr
except socket.timeout:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Connection attempt to %s timed out' % (serveraddr,)
return retVal
except ValueError as strerror:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
except socket.error as (errno, strerror):
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
retVal['resp'] = True
return retVal
def Test3(udpsock, serveraddr):
retVal = {'resp': False,
'ex_ip': None,
'ex_port': None}
BUFSIZ = 1024
reply = ''
request = 'ping3'
udpsock.sendto(request, serveraddr)
try:
reply, rcvaddr = udpsock.recvfrom(BUFSIZ)
if DEBUG:
print >> sys.stderr, 'NATCheck:Test3', 'Got reply: serveraddr', serveraddr, 'rcvaddr', rcvaddr
except socket.timeout:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Connection attempt to %s timed out' % (serveraddr,)
return retVal
except ValueError as strerror:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
except socket.error as (errno, strerror):
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Could not receive data: %s' % strerror
return retVal
ex_ip, ex_port = reply.split(':')
retVal['resp'] = True
retVal['ex_ip'] = ex_ip
retVal['ex_port'] = ex_port
return retVal
def GetNATType(in_port, serveraddr1, serveraddr2):
serveraddr1 = ('stun1.tribler.org', 6701)
serveraddr2 = ('stun2.tribler.org', 6702)
nat_type, ex_ip, ex_port, in_ip = ([-1, 'Unknown'],
'0.0.0.0',
'0',
'0.0.0.0')
udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpsock.settimeout(5)
try:
udpsock.bind(('', in_port))
except socket.error as err:
print >> sys.stderr, "Couldn't bind a udp socket on port %d : %s" % (in_port, err)
return (nat_type,
ex_ip,
ex_port,
in_ip)
try:
connectaddr = ('torrentstream.net', 80)
s = socket.socket()
s.connect(connectaddr)
in_ip = s.getsockname()[0]
del s
if DEBUG:
print >> sys.stderr, 'NATCheck: getting the internal ip address by connecting to tribler.org:80', in_ip
except socket.error as err:
print >> sys.stderr, "Couldn't connect to %s:%i" % (connectaddr[0], connectaddr[1])
return (nat_type,
ex_ip,
ex_port,
in_ip)
ret = Test1(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test I reported: ' + str(ret)
if ret['resp'] == False:
nat_type[1] = 'Blocked'
else:
ex_ip = ret['ex_ip']
ex_port = ret['ex_port']
if ret['ex_ip'] == in_ip:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'No NAT'
ret = Test2(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test II reported: ' + str(ret)
if ret['resp'] == True:
nat_type[0] = 0
nat_type[1] = 'Open Internet'
else:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'There is a Firewall'
ret = Test3(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test III reported: ' + str(ret)
if ret['resp'] == True:
nat_type[0] = 2
nat_type[1] = 'Restricted Cone Firewall'
else:
nat_type[0] = 3
nat_type[1] = 'Port Restricted Cone Firewall'
else:
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'There is a NAT'
ret = Test2(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test II reported: ' + str(ret)
if ret['resp'] == True:
nat_type[0] = 1
nat_type[1] = 'Full Cone NAT'
else:
ret = Test1(udpsock, serveraddr2)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test I reported: ' + str(ret)
if ex_ip == ret['ex_ip'] and ex_port == ret['ex_port']:
ret = Test3(udpsock, serveraddr1)
if DEBUG:
print >> sys.stderr, 'NATCheck:', 'Test III reported: ' + str(ret)
if ret['resp'] == True:
nat_type[0] = 2
nat_type[1] = 'Restricted Cone NAT'
else:
nat_type[0] = 3
nat_type[1] = 'Port Restricted Cone NAT'
else:
nat_type[0] = -1
nat_type[1] = 'Symmetric NAT'
udpsock.close()
return (nat_type,
ex_ip,
ex_port,
in_ip)
|
en
| 0.457667
|
#Embedded file name: ACEStream\Core\NATFirewall\NatCheck.pyo
| 2.671549
| 3
|
sdk/lusid_asyncio/models/valuation_schedule.py
|
finbourne/lusid-sdk-python-asyncio-preview
| 0
|
6626224
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3923
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid_asyncio.configuration import Configuration
class ValuationSchedule(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'effective_from': 'str',
'effective_at': 'str',
'tenor': 'str',
'roll_convention': 'str',
'holiday_calendars': 'list[str]',
'valuation_date_times': 'list[str]'
}
attribute_map = {
'effective_from': 'effectiveFrom',
'effective_at': 'effectiveAt',
'tenor': 'tenor',
'roll_convention': 'rollConvention',
'holiday_calendars': 'holidayCalendars',
'valuation_date_times': 'valuationDateTimes'
}
required_map = {
'effective_from': 'optional',
'effective_at': 'required',
'tenor': 'optional',
'roll_convention': 'optional',
'holiday_calendars': 'optional',
'valuation_date_times': 'optional'
}
def __init__(self, effective_from=None, effective_at=None, tenor=None, roll_convention=None, holiday_calendars=None, valuation_date_times=None, local_vars_configuration=None): # noqa: E501
"""ValuationSchedule - a model defined in OpenAPI"
:param effective_from: If present, the EffectiveFrom and EffectiveAt dates are interpreted as a range of dates for which to perform a valuation. In this case, valuation is calculated for the portfolio(s) for each business day in the given range.
:type effective_from: str
:param effective_at: The market data time, i.e. the time to run the valuation request effective of. (required)
:type effective_at: str
:param tenor: Tenor, e.g \"1D\", \"1M\" to be used in generating the date schedule when effectiveFrom and effectiveAt are both given and are not the same.
:type tenor: str
:param roll_convention: When Tenor is given and is not equal to \"1D\", there may be cases where \"date + tenor\" land on non-business days around month end. In that case, the RollConvention, e.g. modified following \"MF\" would be applied to determine the next GBD.
:type roll_convention: str
:param holiday_calendars: The holiday calendar(s) that should be used in determining the date schedule. Holiday calendar(s) are supplied by their names, for example, \"CoppClarke\". Note that when the calendars are not available (e.g. when the user has insufficient permissions), a recipe setting will be used to determine whether the whole batch should then fail or whether the calendar not being available should simply be ignored.
:type holiday_calendars: list[str]
:param valuation_date_times: If given, this is the exact set of dates on which to perform a valuation. This will replace/override all other specified values if given.
:type valuation_date_times: list[str]
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._effective_from = None
self._effective_at = None
self._tenor = None
self._roll_convention = None
self._holiday_calendars = None
self._valuation_date_times = None
self.discriminator = None
self.effective_from = effective_from
self.effective_at = effective_at
self.tenor = tenor
self.roll_convention = roll_convention
self.holiday_calendars = holiday_calendars
self.valuation_date_times = valuation_date_times
@property
def effective_from(self):
"""Gets the effective_from of this ValuationSchedule. # noqa: E501
If present, the EffectiveFrom and EffectiveAt dates are interpreted as a range of dates for which to perform a valuation. In this case, valuation is calculated for the portfolio(s) for each business day in the given range. # noqa: E501
:return: The effective_from of this ValuationSchedule. # noqa: E501
:rtype: str
"""
return self._effective_from
@effective_from.setter
def effective_from(self, effective_from):
"""Sets the effective_from of this ValuationSchedule.
If present, the EffectiveFrom and EffectiveAt dates are interpreted as a range of dates for which to perform a valuation. In this case, valuation is calculated for the portfolio(s) for each business day in the given range. # noqa: E501
:param effective_from: The effective_from of this ValuationSchedule. # noqa: E501
:type effective_from: str
"""
self._effective_from = effective_from
@property
def effective_at(self):
"""Gets the effective_at of this ValuationSchedule. # noqa: E501
The market data time, i.e. the time to run the valuation request effective of. # noqa: E501
:return: The effective_at of this ValuationSchedule. # noqa: E501
:rtype: str
"""
return self._effective_at
@effective_at.setter
def effective_at(self, effective_at):
"""Sets the effective_at of this ValuationSchedule.
The market data time, i.e. the time to run the valuation request effective of. # noqa: E501
:param effective_at: The effective_at of this ValuationSchedule. # noqa: E501
:type effective_at: str
"""
if self.local_vars_configuration.client_side_validation and effective_at is None: # noqa: E501
raise ValueError("Invalid value for `effective_at`, must not be `None`") # noqa: E501
self._effective_at = effective_at
@property
def tenor(self):
"""Gets the tenor of this ValuationSchedule. # noqa: E501
Tenor, e.g \"1D\", \"1M\" to be used in generating the date schedule when effectiveFrom and effectiveAt are both given and are not the same. # noqa: E501
:return: The tenor of this ValuationSchedule. # noqa: E501
:rtype: str
"""
return self._tenor
@tenor.setter
def tenor(self, tenor):
"""Sets the tenor of this ValuationSchedule.
Tenor, e.g \"1D\", \"1M\" to be used in generating the date schedule when effectiveFrom and effectiveAt are both given and are not the same. # noqa: E501
:param tenor: The tenor of this ValuationSchedule. # noqa: E501
:type tenor: str
"""
if (self.local_vars_configuration.client_side_validation and
tenor is not None and len(tenor) > 16):
raise ValueError("Invalid value for `tenor`, length must be less than or equal to `16`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
tenor is not None and len(tenor) < 0):
raise ValueError("Invalid value for `tenor`, length must be greater than or equal to `0`") # noqa: E501
self._tenor = tenor
@property
def roll_convention(self):
"""Gets the roll_convention of this ValuationSchedule. # noqa: E501
When Tenor is given and is not equal to \"1D\", there may be cases where \"date + tenor\" land on non-business days around month end. In that case, the RollConvention, e.g. modified following \"MF\" would be applied to determine the next GBD. # noqa: E501
:return: The roll_convention of this ValuationSchedule. # noqa: E501
:rtype: str
"""
return self._roll_convention
@roll_convention.setter
def roll_convention(self, roll_convention):
"""Sets the roll_convention of this ValuationSchedule.
When Tenor is given and is not equal to \"1D\", there may be cases where \"date + tenor\" land on non-business days around month end. In that case, the RollConvention, e.g. modified following \"MF\" would be applied to determine the next GBD. # noqa: E501
:param roll_convention: The roll_convention of this ValuationSchedule. # noqa: E501
:type roll_convention: str
"""
if (self.local_vars_configuration.client_side_validation and
roll_convention is not None and len(roll_convention) > 16):
raise ValueError("Invalid value for `roll_convention`, length must be less than or equal to `16`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
roll_convention is not None and len(roll_convention) < 0):
raise ValueError("Invalid value for `roll_convention`, length must be greater than or equal to `0`") # noqa: E501
self._roll_convention = roll_convention
@property
def holiday_calendars(self):
"""Gets the holiday_calendars of this ValuationSchedule. # noqa: E501
The holiday calendar(s) that should be used in determining the date schedule. Holiday calendar(s) are supplied by their names, for example, \"CoppClarke\". Note that when the calendars are not available (e.g. when the user has insufficient permissions), a recipe setting will be used to determine whether the whole batch should then fail or whether the calendar not being available should simply be ignored. # noqa: E501
:return: The holiday_calendars of this ValuationSchedule. # noqa: E501
:rtype: list[str]
"""
return self._holiday_calendars
@holiday_calendars.setter
def holiday_calendars(self, holiday_calendars):
"""Sets the holiday_calendars of this ValuationSchedule.
The holiday calendar(s) that should be used in determining the date schedule. Holiday calendar(s) are supplied by their names, for example, \"CoppClarke\". Note that when the calendars are not available (e.g. when the user has insufficient permissions), a recipe setting will be used to determine whether the whole batch should then fail or whether the calendar not being available should simply be ignored. # noqa: E501
:param holiday_calendars: The holiday_calendars of this ValuationSchedule. # noqa: E501
:type holiday_calendars: list[str]
"""
self._holiday_calendars = holiday_calendars
@property
def valuation_date_times(self):
"""Gets the valuation_date_times of this ValuationSchedule. # noqa: E501
If given, this is the exact set of dates on which to perform a valuation. This will replace/override all other specified values if given. # noqa: E501
:return: The valuation_date_times of this ValuationSchedule. # noqa: E501
:rtype: list[str]
"""
return self._valuation_date_times
@valuation_date_times.setter
def valuation_date_times(self, valuation_date_times):
"""Sets the valuation_date_times of this ValuationSchedule.
If given, this is the exact set of dates on which to perform a valuation. This will replace/override all other specified values if given. # noqa: E501
:param valuation_date_times: The valuation_date_times of this ValuationSchedule. # noqa: E501
:type valuation_date_times: list[str]
"""
self._valuation_date_times = valuation_date_times
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ValuationSchedule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ValuationSchedule):
return True
return self.to_dict() != other.to_dict()
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3923
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid_asyncio.configuration import Configuration
class ValuationSchedule(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'effective_from': 'str',
'effective_at': 'str',
'tenor': 'str',
'roll_convention': 'str',
'holiday_calendars': 'list[str]',
'valuation_date_times': 'list[str]'
}
attribute_map = {
'effective_from': 'effectiveFrom',
'effective_at': 'effectiveAt',
'tenor': 'tenor',
'roll_convention': 'rollConvention',
'holiday_calendars': 'holidayCalendars',
'valuation_date_times': 'valuationDateTimes'
}
required_map = {
'effective_from': 'optional',
'effective_at': 'required',
'tenor': 'optional',
'roll_convention': 'optional',
'holiday_calendars': 'optional',
'valuation_date_times': 'optional'
}
def __init__(self, effective_from=None, effective_at=None, tenor=None, roll_convention=None, holiday_calendars=None, valuation_date_times=None, local_vars_configuration=None): # noqa: E501
"""ValuationSchedule - a model defined in OpenAPI"
:param effective_from: If present, the EffectiveFrom and EffectiveAt dates are interpreted as a range of dates for which to perform a valuation. In this case, valuation is calculated for the portfolio(s) for each business day in the given range.
:type effective_from: str
:param effective_at: The market data time, i.e. the time to run the valuation request effective of. (required)
:type effective_at: str
:param tenor: Tenor, e.g \"1D\", \"1M\" to be used in generating the date schedule when effectiveFrom and effectiveAt are both given and are not the same.
:type tenor: str
:param roll_convention: When Tenor is given and is not equal to \"1D\", there may be cases where \"date + tenor\" land on non-business days around month end. In that case, the RollConvention, e.g. modified following \"MF\" would be applied to determine the next GBD.
:type roll_convention: str
:param holiday_calendars: The holiday calendar(s) that should be used in determining the date schedule. Holiday calendar(s) are supplied by their names, for example, \"CoppClarke\". Note that when the calendars are not available (e.g. when the user has insufficient permissions), a recipe setting will be used to determine whether the whole batch should then fail or whether the calendar not being available should simply be ignored.
:type holiday_calendars: list[str]
:param valuation_date_times: If given, this is the exact set of dates on which to perform a valuation. This will replace/override all other specified values if given.
:type valuation_date_times: list[str]
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._effective_from = None
self._effective_at = None
self._tenor = None
self._roll_convention = None
self._holiday_calendars = None
self._valuation_date_times = None
self.discriminator = None
self.effective_from = effective_from
self.effective_at = effective_at
self.tenor = tenor
self.roll_convention = roll_convention
self.holiday_calendars = holiday_calendars
self.valuation_date_times = valuation_date_times
@property
def effective_from(self):
"""Gets the effective_from of this ValuationSchedule. # noqa: E501
If present, the EffectiveFrom and EffectiveAt dates are interpreted as a range of dates for which to perform a valuation. In this case, valuation is calculated for the portfolio(s) for each business day in the given range. # noqa: E501
:return: The effective_from of this ValuationSchedule. # noqa: E501
:rtype: str
"""
return self._effective_from
@effective_from.setter
def effective_from(self, effective_from):
"""Sets the effective_from of this ValuationSchedule.
If present, the EffectiveFrom and EffectiveAt dates are interpreted as a range of dates for which to perform a valuation. In this case, valuation is calculated for the portfolio(s) for each business day in the given range. # noqa: E501
:param effective_from: The effective_from of this ValuationSchedule. # noqa: E501
:type effective_from: str
"""
self._effective_from = effective_from
@property
def effective_at(self):
"""Gets the effective_at of this ValuationSchedule. # noqa: E501
The market data time, i.e. the time to run the valuation request effective of. # noqa: E501
:return: The effective_at of this ValuationSchedule. # noqa: E501
:rtype: str
"""
return self._effective_at
@effective_at.setter
def effective_at(self, effective_at):
"""Sets the effective_at of this ValuationSchedule.
The market data time, i.e. the time to run the valuation request effective of. # noqa: E501
:param effective_at: The effective_at of this ValuationSchedule. # noqa: E501
:type effective_at: str
"""
if self.local_vars_configuration.client_side_validation and effective_at is None: # noqa: E501
raise ValueError("Invalid value for `effective_at`, must not be `None`") # noqa: E501
self._effective_at = effective_at
@property
def tenor(self):
"""Gets the tenor of this ValuationSchedule. # noqa: E501
Tenor, e.g \"1D\", \"1M\" to be used in generating the date schedule when effectiveFrom and effectiveAt are both given and are not the same. # noqa: E501
:return: The tenor of this ValuationSchedule. # noqa: E501
:rtype: str
"""
return self._tenor
@tenor.setter
def tenor(self, tenor):
"""Sets the tenor of this ValuationSchedule.
Tenor, e.g \"1D\", \"1M\" to be used in generating the date schedule when effectiveFrom and effectiveAt are both given and are not the same. # noqa: E501
:param tenor: The tenor of this ValuationSchedule. # noqa: E501
:type tenor: str
"""
if (self.local_vars_configuration.client_side_validation and
tenor is not None and len(tenor) > 16):
raise ValueError("Invalid value for `tenor`, length must be less than or equal to `16`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
tenor is not None and len(tenor) < 0):
raise ValueError("Invalid value for `tenor`, length must be greater than or equal to `0`") # noqa: E501
self._tenor = tenor
@property
def roll_convention(self):
"""Gets the roll_convention of this ValuationSchedule. # noqa: E501
When Tenor is given and is not equal to \"1D\", there may be cases where \"date + tenor\" land on non-business days around month end. In that case, the RollConvention, e.g. modified following \"MF\" would be applied to determine the next GBD. # noqa: E501
:return: The roll_convention of this ValuationSchedule. # noqa: E501
:rtype: str
"""
return self._roll_convention
@roll_convention.setter
def roll_convention(self, roll_convention):
"""Sets the roll_convention of this ValuationSchedule.
When Tenor is given and is not equal to \"1D\", there may be cases where \"date + tenor\" land on non-business days around month end. In that case, the RollConvention, e.g. modified following \"MF\" would be applied to determine the next GBD. # noqa: E501
:param roll_convention: The roll_convention of this ValuationSchedule. # noqa: E501
:type roll_convention: str
"""
if (self.local_vars_configuration.client_side_validation and
roll_convention is not None and len(roll_convention) > 16):
raise ValueError("Invalid value for `roll_convention`, length must be less than or equal to `16`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
roll_convention is not None and len(roll_convention) < 0):
raise ValueError("Invalid value for `roll_convention`, length must be greater than or equal to `0`") # noqa: E501
self._roll_convention = roll_convention
@property
def holiday_calendars(self):
"""Gets the holiday_calendars of this ValuationSchedule. # noqa: E501
The holiday calendar(s) that should be used in determining the date schedule. Holiday calendar(s) are supplied by their names, for example, \"CoppClarke\". Note that when the calendars are not available (e.g. when the user has insufficient permissions), a recipe setting will be used to determine whether the whole batch should then fail or whether the calendar not being available should simply be ignored. # noqa: E501
:return: The holiday_calendars of this ValuationSchedule. # noqa: E501
:rtype: list[str]
"""
return self._holiday_calendars
@holiday_calendars.setter
def holiday_calendars(self, holiday_calendars):
"""Sets the holiday_calendars of this ValuationSchedule.
The holiday calendar(s) that should be used in determining the date schedule. Holiday calendar(s) are supplied by their names, for example, \"CoppClarke\". Note that when the calendars are not available (e.g. when the user has insufficient permissions), a recipe setting will be used to determine whether the whole batch should then fail or whether the calendar not being available should simply be ignored. # noqa: E501
:param holiday_calendars: The holiday_calendars of this ValuationSchedule. # noqa: E501
:type holiday_calendars: list[str]
"""
self._holiday_calendars = holiday_calendars
@property
def valuation_date_times(self):
"""Gets the valuation_date_times of this ValuationSchedule. # noqa: E501
If given, this is the exact set of dates on which to perform a valuation. This will replace/override all other specified values if given. # noqa: E501
:return: The valuation_date_times of this ValuationSchedule. # noqa: E501
:rtype: list[str]
"""
return self._valuation_date_times
@valuation_date_times.setter
def valuation_date_times(self, valuation_date_times):
"""Sets the valuation_date_times of this ValuationSchedule.
If given, this is the exact set of dates on which to perform a valuation. This will replace/override all other specified values if given. # noqa: E501
:param valuation_date_times: The valuation_date_times of this ValuationSchedule. # noqa: E501
:type valuation_date_times: list[str]
"""
self._valuation_date_times = valuation_date_times
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ValuationSchedule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ValuationSchedule):
return True
return self.to_dict() != other.to_dict()
|
en
| 0.751514
|
# coding: utf-8 LUSID API FINBOURNE Technology # noqa: E501 The version of the OpenAPI document: 0.11.3923 Contact: <EMAIL> Generated by: https://openapi-generator.tech # noqa: F401 NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. required_map (dict): The key is attribute name and the value is whether it is 'required' or 'optional'. # noqa: E501 ValuationSchedule - a model defined in OpenAPI" :param effective_from: If present, the EffectiveFrom and EffectiveAt dates are interpreted as a range of dates for which to perform a valuation. In this case, valuation is calculated for the portfolio(s) for each business day in the given range. :type effective_from: str :param effective_at: The market data time, i.e. the time to run the valuation request effective of. (required) :type effective_at: str :param tenor: Tenor, e.g \"1D\", \"1M\" to be used in generating the date schedule when effectiveFrom and effectiveAt are both given and are not the same. :type tenor: str :param roll_convention: When Tenor is given and is not equal to \"1D\", there may be cases where \"date + tenor\" land on non-business days around month end. In that case, the RollConvention, e.g. modified following \"MF\" would be applied to determine the next GBD. :type roll_convention: str :param holiday_calendars: The holiday calendar(s) that should be used in determining the date schedule. Holiday calendar(s) are supplied by their names, for example, \"CoppClarke\". Note that when the calendars are not available (e.g. when the user has insufficient permissions), a recipe setting will be used to determine whether the whole batch should then fail or whether the calendar not being available should simply be ignored. :type holiday_calendars: list[str] :param valuation_date_times: If given, this is the exact set of dates on which to perform a valuation. This will replace/override all other specified values if given. :type valuation_date_times: list[str] # noqa: E501 Gets the effective_from of this ValuationSchedule. # noqa: E501 If present, the EffectiveFrom and EffectiveAt dates are interpreted as a range of dates for which to perform a valuation. In this case, valuation is calculated for the portfolio(s) for each business day in the given range. # noqa: E501 :return: The effective_from of this ValuationSchedule. # noqa: E501 :rtype: str Sets the effective_from of this ValuationSchedule. If present, the EffectiveFrom and EffectiveAt dates are interpreted as a range of dates for which to perform a valuation. In this case, valuation is calculated for the portfolio(s) for each business day in the given range. # noqa: E501 :param effective_from: The effective_from of this ValuationSchedule. # noqa: E501 :type effective_from: str Gets the effective_at of this ValuationSchedule. # noqa: E501 The market data time, i.e. the time to run the valuation request effective of. # noqa: E501 :return: The effective_at of this ValuationSchedule. # noqa: E501 :rtype: str Sets the effective_at of this ValuationSchedule. The market data time, i.e. the time to run the valuation request effective of. # noqa: E501 :param effective_at: The effective_at of this ValuationSchedule. # noqa: E501 :type effective_at: str # noqa: E501 # noqa: E501 Gets the tenor of this ValuationSchedule. # noqa: E501 Tenor, e.g \"1D\", \"1M\" to be used in generating the date schedule when effectiveFrom and effectiveAt are both given and are not the same. # noqa: E501 :return: The tenor of this ValuationSchedule. # noqa: E501 :rtype: str Sets the tenor of this ValuationSchedule. Tenor, e.g \"1D\", \"1M\" to be used in generating the date schedule when effectiveFrom and effectiveAt are both given and are not the same. # noqa: E501 :param tenor: The tenor of this ValuationSchedule. # noqa: E501 :type tenor: str # noqa: E501 # noqa: E501 Gets the roll_convention of this ValuationSchedule. # noqa: E501 When Tenor is given and is not equal to \"1D\", there may be cases where \"date + tenor\" land on non-business days around month end. In that case, the RollConvention, e.g. modified following \"MF\" would be applied to determine the next GBD. # noqa: E501 :return: The roll_convention of this ValuationSchedule. # noqa: E501 :rtype: str Sets the roll_convention of this ValuationSchedule. When Tenor is given and is not equal to \"1D\", there may be cases where \"date + tenor\" land on non-business days around month end. In that case, the RollConvention, e.g. modified following \"MF\" would be applied to determine the next GBD. # noqa: E501 :param roll_convention: The roll_convention of this ValuationSchedule. # noqa: E501 :type roll_convention: str # noqa: E501 # noqa: E501 Gets the holiday_calendars of this ValuationSchedule. # noqa: E501 The holiday calendar(s) that should be used in determining the date schedule. Holiday calendar(s) are supplied by their names, for example, \"CoppClarke\". Note that when the calendars are not available (e.g. when the user has insufficient permissions), a recipe setting will be used to determine whether the whole batch should then fail or whether the calendar not being available should simply be ignored. # noqa: E501 :return: The holiday_calendars of this ValuationSchedule. # noqa: E501 :rtype: list[str] Sets the holiday_calendars of this ValuationSchedule. The holiday calendar(s) that should be used in determining the date schedule. Holiday calendar(s) are supplied by their names, for example, \"CoppClarke\". Note that when the calendars are not available (e.g. when the user has insufficient permissions), a recipe setting will be used to determine whether the whole batch should then fail or whether the calendar not being available should simply be ignored. # noqa: E501 :param holiday_calendars: The holiday_calendars of this ValuationSchedule. # noqa: E501 :type holiday_calendars: list[str] Gets the valuation_date_times of this ValuationSchedule. # noqa: E501 If given, this is the exact set of dates on which to perform a valuation. This will replace/override all other specified values if given. # noqa: E501 :return: The valuation_date_times of this ValuationSchedule. # noqa: E501 :rtype: list[str] Sets the valuation_date_times of this ValuationSchedule. If given, this is the exact set of dates on which to perform a valuation. This will replace/override all other specified values if given. # noqa: E501 :param valuation_date_times: The valuation_date_times of this ValuationSchedule. # noqa: E501 :type valuation_date_times: list[str] Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal
| 1.780438
| 2
|
scripts/maint/practracker/practracker_tests.py
|
golegen/tor
| 1
|
6626225
|
<gh_stars>1-10
#!/usr/bin/python
"""Some simple tests for practracker metrics"""
import unittest
import StringIO
import metrics
function_file = """static void
fun(directory_request_t *req, const char *resource)
{
time_t if_modified_since = 0;
uint8_t or_diff_from[DIGEST256_LEN];
}
static void
fun(directory_request_t *req,
const char *resource)
{
time_t if_modified_since = 0;
uint8_t or_diff_from[DIGEST256_LEN];
}
MOCK_IMPL(void,
fun,(
uint8_t dir_purpose,
uint8_t router_purpose,
const char *resource,
int pds_flags,
download_want_authority_t want_authority))
{
const routerstatus_t *rs = NULL;
const or_options_t *options = get_options();
}
"""
class TestFunctionLength(unittest.TestCase):
def test_function_length(self):
funcs = StringIO.StringIO(function_file)
# All functions should have length 2
for name, lines in metrics.get_function_lines(funcs):
self.assertEqual(name, "fun")
funcs.seek(0)
for name, lines in metrics.get_function_lines(funcs):
self.assertEqual(lines, 4)
class TestIncludeCount(unittest.TestCase):
def test_include_count(self):
f = StringIO.StringIO("""
# include <abc.h>
# include "def.h"
#include "ghi.h"
\t#\t include "jkl.h"
""")
self.assertEqual(metrics.get_include_count(f),4)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
"""Some simple tests for practracker metrics"""
import unittest
import StringIO
import metrics
function_file = """static void
fun(directory_request_t *req, const char *resource)
{
time_t if_modified_since = 0;
uint8_t or_diff_from[DIGEST256_LEN];
}
static void
fun(directory_request_t *req,
const char *resource)
{
time_t if_modified_since = 0;
uint8_t or_diff_from[DIGEST256_LEN];
}
MOCK_IMPL(void,
fun,(
uint8_t dir_purpose,
uint8_t router_purpose,
const char *resource,
int pds_flags,
download_want_authority_t want_authority))
{
const routerstatus_t *rs = NULL;
const or_options_t *options = get_options();
}
"""
class TestFunctionLength(unittest.TestCase):
def test_function_length(self):
funcs = StringIO.StringIO(function_file)
# All functions should have length 2
for name, lines in metrics.get_function_lines(funcs):
self.assertEqual(name, "fun")
funcs.seek(0)
for name, lines in metrics.get_function_lines(funcs):
self.assertEqual(lines, 4)
class TestIncludeCount(unittest.TestCase):
def test_include_count(self):
f = StringIO.StringIO("""
# include <abc.h>
# include "def.h"
#include "ghi.h"
\t#\t include "jkl.h"
""")
self.assertEqual(metrics.get_include_count(f),4)
if __name__ == '__main__':
unittest.main()
|
en
| 0.540426
|
#!/usr/bin/python Some simple tests for practracker metrics static void fun(directory_request_t *req, const char *resource) { time_t if_modified_since = 0; uint8_t or_diff_from[DIGEST256_LEN]; } static void fun(directory_request_t *req, const char *resource) { time_t if_modified_since = 0; uint8_t or_diff_from[DIGEST256_LEN]; } MOCK_IMPL(void, fun,( uint8_t dir_purpose, uint8_t router_purpose, const char *resource, int pds_flags, download_want_authority_t want_authority)) { const routerstatus_t *rs = NULL; const or_options_t *options = get_options(); } # All functions should have length 2 # include <abc.h> # include "def.h" #include "ghi.h" \t#\t include "jkl.h"
| 2.627759
| 3
|
test/unit/common/test_manager.py
|
OyTao/swift-learning
| 0
|
6626226
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test.unit import temptree
import os
import sys
import resource
import signal
import errno
from collections import defaultdict
from time import sleep, time
from six.moves import reload_module
from swift.common import manager
from swift.common.exceptions import InvalidPidFileException
import eventlet
threading = eventlet.patcher.original('threading')
DUMMY_SIG = 1
class MockOs(object):
RAISE_EPERM_SIG = 99
def __init__(self, pids):
self.running_pids = pids
self.pid_sigs = defaultdict(list)
self.closed_fds = []
self.child_pid = 9999 # fork defaults to test parent process path
self.execlp_called = False
def kill(self, pid, sig):
if sig == self.RAISE_EPERM_SIG:
raise OSError(errno.EPERM, 'Operation not permitted')
if pid not in self.running_pids:
raise OSError(3, 'No such process')
self.pid_sigs[pid].append(sig)
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
def pop_stream(f):
"""read everything out of file from the top and clear it out
"""
f.flush()
f.seek(0)
output = f.read()
f.seek(0)
f.truncate()
return output
class TestManagerModule(unittest.TestCase):
def test_servers(self):
main_plus_rest = set(manager.MAIN_SERVERS + manager.REST_SERVERS)
self.assertEqual(set(manager.ALL_SERVERS), main_plus_rest)
# make sure there's no server listed in both
self.assertEqual(len(main_plus_rest), len(manager.MAIN_SERVERS) +
len(manager.REST_SERVERS))
def test_setup_env(self):
class MockResource(object):
def __init__(self, error=None):
self.error = error
self.called_with_args = []
def setrlimit(self, resource, limits):
if self.error:
raise self.error
self.called_with_args.append((resource, limits))
def __getattr__(self, name):
# I only over-ride portions of the resource module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(resource, name)
_orig_resource = manager.resource
_orig_environ = os.environ
try:
manager.resource = MockResource()
manager.os.environ = {}
manager.setup_env()
expected = [
(resource.RLIMIT_NOFILE, (manager.MAX_DESCRIPTORS,
manager.MAX_DESCRIPTORS)),
(resource.RLIMIT_DATA, (manager.MAX_MEMORY,
manager.MAX_MEMORY)),
(resource.RLIMIT_NPROC, (manager.MAX_PROCS,
manager.MAX_PROCS)),
]
self.assertEqual(manager.resource.called_with_args, expected)
self.assertTrue(
manager.os.environ['PYTHON_EGG_CACHE'].startswith('/tmp'))
# test error condition
manager.resource = MockResource(error=ValueError())
manager.os.environ = {}
manager.setup_env()
self.assertEqual(manager.resource.called_with_args, [])
self.assertTrue(
manager.os.environ['PYTHON_EGG_CACHE'].startswith('/tmp'))
manager.resource = MockResource(error=OSError())
manager.os.environ = {}
self.assertRaises(OSError, manager.setup_env)
self.assertEqual(manager.os.environ.get('PYTHON_EGG_CACHE'), None)
finally:
manager.resource = _orig_resource
os.environ = _orig_environ
def test_command_wrapper(self):
@manager.command
def myfunc(arg1):
"""test doc
"""
return arg1
self.assertEqual(myfunc.__doc__.strip(), 'test doc')
self.assertEqual(myfunc(1), 1)
self.assertEqual(myfunc(0), 0)
self.assertEqual(myfunc(True), 1)
self.assertEqual(myfunc(False), 0)
self.assertTrue(hasattr(myfunc, 'publicly_accessible'))
self.assertTrue(myfunc.publicly_accessible)
def test_watch_server_pids(self):
class MockOs(object):
WNOHANG = os.WNOHANG
def __init__(self, pid_map=None):
if pid_map is None:
pid_map = {}
self.pid_map = {}
for pid, v in pid_map.items():
self.pid_map[pid] = (x for x in v)
def waitpid(self, pid, options):
try:
rv = next(self.pid_map[pid])
except StopIteration:
raise OSError(errno.ECHILD, os.strerror(errno.ECHILD))
except KeyError:
raise OSError(errno.ESRCH, os.strerror(errno.ESRCH))
if isinstance(rv, Exception):
raise rv
else:
return rv
class MockTime(object):
def __init__(self, ticks=None):
self.tock = time()
if not ticks:
ticks = []
self.ticks = (t for t in ticks)
def time(self):
try:
self.tock += next(self.ticks)
except StopIteration:
self.tock += 1
return self.tock
def sleep(*args):
return
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR, zombie=0):
self.heartbeat = (pids for _ in range(zombie))
def get_running_pids(self):
try:
rv = next(self.heartbeat)
return rv
except StopIteration:
return {}
_orig_os = manager.os
_orig_time = manager.time
_orig_server = manager.Server
try:
manager.time = MockTime()
manager.os = MockOs()
# this server always says it's dead when you ask for running pids
server = MockServer([1])
# list of pids keyed on servers to watch
server_pids = {
server: [1],
}
# basic test, server dies
gen = manager.watch_server_pids(server_pids)
expected = [(server, 1)]
self.assertEqual([x for x in gen], expected)
# start long running server and short interval
server = MockServer([1], zombie=15)
server_pids = {
server: [1],
}
gen = manager.watch_server_pids(server_pids)
self.assertEqual([x for x in gen], [])
# wait a little longer
gen = manager.watch_server_pids(server_pids, interval=15)
self.assertEqual([x for x in gen], [(server, 1)])
# zombie process
server = MockServer([1], zombie=200)
server_pids = {
server: [1],
}
# test weird os error
manager.os = MockOs({1: [OSError()]})
gen = manager.watch_server_pids(server_pids)
self.assertRaises(OSError, lambda: [x for x in gen])
# test multi-server
server1 = MockServer([1, 10], zombie=200)
server2 = MockServer([2, 20], zombie=8)
server_pids = {
server1: [1, 10],
server2: [2, 20],
}
pid_map = {
1: [None for _ in range(10)],
2: [None for _ in range(8)],
20: [None for _ in range(4)],
}
manager.os = MockOs(pid_map)
gen = manager.watch_server_pids(server_pids,
interval=manager.KILL_WAIT)
expected = [
(server2, 2),
(server2, 20),
]
self.assertEqual([x for x in gen], expected)
finally:
manager.os = _orig_os
manager.time = _orig_time
manager.Server = _orig_server
def test_safe_kill(self):
manager.os = MockOs([1, 2, 3, 4])
proc_files = (
('1/cmdline', 'same-procname'),
('2/cmdline', 'another-procname'),
('4/cmdline', 'another-procname'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
manager.safe_kill(1, signal.SIG_DFL, 'same-procname')
self.assertRaises(InvalidPidFileException, manager.safe_kill,
2, signal.SIG_DFL, 'same-procname')
manager.safe_kill(3, signal.SIG_DFL, 'same-procname')
manager.safe_kill(4, signal.SIGHUP, 'same-procname')
def test_exc(self):
self.assertTrue(issubclass(manager.UnknownCommandError, Exception))
class TestServer(unittest.TestCase):
def tearDown(self):
reload_module(manager)
def join_swift_dir(self, path):
return os.path.join(manager.SWIFT_DIR, path)
def join_run_dir(self, path):
return os.path.join(manager.RUN_DIR, path)
def test_create_server(self):
server = manager.Server('proxy')
self.assertEqual(server.server, 'proxy-server')
self.assertEqual(server.type, 'proxy')
self.assertEqual(server.cmd, 'swift-proxy-server')
server = manager.Server('object-replicator')
self.assertEqual(server.server, 'object-replicator')
self.assertEqual(server.type, 'object')
self.assertEqual(server.cmd, 'swift-object-replicator')
def test_server_to_string(self):
server = manager.Server('Proxy')
self.assertEqual(str(server), 'proxy-server')
server = manager.Server('object-replicator')
self.assertEqual(str(server), 'object-replicator')
def test_server_repr(self):
server = manager.Server('proxy')
self.assertTrue(server.__class__.__name__ in repr(server))
self.assertTrue(str(server) in repr(server))
def test_server_equality(self):
server1 = manager.Server('Proxy')
server2 = manager.Server('proxy-server')
self.assertEqual(server1, server2)
# it is NOT a string
self.assertNotEqual(server1, 'proxy-server')
def test_get_pid_file_name(self):
server = manager.Server('proxy')
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = self.join_run_dir('proxy-server.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('object-replicator')
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = self.join_run_dir('object-replicator/1.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('container-auditor')
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = self.join_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
def test_get_custom_pid_file_name(self):
random_run_dir = "/random/dir"
get_random_run_dir = lambda x: os.path.join(random_run_dir, x)
server = manager.Server('proxy', run_dir=random_run_dir)
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = get_random_run_dir('proxy-server.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('object-replicator', run_dir=random_run_dir)
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = get_random_run_dir('object-replicator/1.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('container-auditor', run_dir=random_run_dir)
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = get_random_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
def test_get_conf_file_name(self):
server = manager.Server('proxy')
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = self.join_run_dir('proxy-server.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
server = manager.Server('object-replicator')
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = self.join_run_dir('object-replicator/1.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
server = manager.Server('container-auditor')
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = self.join_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
server_name = manager.STANDALONE_SERVERS[0]
server = manager.Server(server_name)
conf_file = self.join_swift_dir(server_name + '.conf')
pid_file = self.join_run_dir(server_name + '.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
def test_conf_files(self):
# test get single conf file
conf_files = (
'proxy-server.conf',
'proxy-server.ini',
'auth-server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('proxy')
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
proxy_conf = self.join_swift_dir('proxy-server.conf')
self.assertEqual(conf_file, proxy_conf)
# test multi server conf files & grouping of server-type config
conf_files = (
'object-server1.conf',
'object-server/2.conf',
'object-server/object3.conf',
'object-server/conf/server4.conf',
'object-server.txt',
'proxy-server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object-replicator')
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 4)
c1 = self.join_swift_dir('object-server1.conf')
c2 = self.join_swift_dir('object-server/2.conf')
c3 = self.join_swift_dir('object-server/object3.conf')
c4 = self.join_swift_dir('object-server/conf/server4.conf')
for c in [c1, c2, c3, c4]:
self.assertTrue(c in conf_files)
# test configs returned sorted
sorted_confs = sorted([c1, c2, c3, c4])
self.assertEqual(conf_files, sorted_confs)
# test get single numbered conf
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('account')
conf_files = server.conf_files(number=2)
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
self.assertEqual(conf_file,
self.join_swift_dir('account-server/2.conf'))
# test missing config number
conf_files = server.conf_files(number=5)
self.assertFalse(conf_files)
# test getting specific conf
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('account.2')
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
self.assertEqual(conf_file,
self.join_swift_dir('account-server/2.conf'))
# test verbose & quiet
conf_files = (
'auth-server.ini',
'container-server/1.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
server = manager.Server('auth')
# check warn "unable to locate"
conf_files = server.conf_files()
self.assertFalse(conf_files)
self.assertTrue('unable to locate config for auth'
in pop_stream(f).lower())
# check quiet will silence warning
conf_files = server.conf_files(verbose=True, quiet=True)
self.assertEqual(pop_stream(f), '')
# check found config no warning
server = manager.Server('container-auditor')
conf_files = server.conf_files()
self.assertEqual(pop_stream(f), '')
# check missing config number warn "unable to locate"
conf_files = server.conf_files(number=2)
self.assertTrue(
'unable to locate config number 2 for ' +
'container-auditor' in pop_stream(f).lower())
# check verbose lists configs
conf_files = server.conf_files(number=2, verbose=True)
c1 = self.join_swift_dir('container-server/1.conf')
self.assertTrue(c1 in pop_stream(f))
finally:
sys.stdout = old_stdout
# test standalone conf file
server_name = manager.STANDALONE_SERVERS[0]
conf_files = (server_name + '.conf',)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server(server_name)
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
conf = self.join_swift_dir(server_name + '.conf')
self.assertEqual(conf_file, conf)
def test_proxy_conf_dir(self):
conf_files = (
'proxy-server.conf.d/00.conf',
'proxy-server.conf.d/01.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('proxy')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
proxy_conf_dir = self.join_swift_dir('proxy-server.conf.d')
self.assertEqual(proxy_conf_dir, conf_dir)
def test_named_conf_dir(self):
conf_files = (
'object-server/base.conf-template',
'object-server/object-server.conf.d/00_base.conf',
'object-server/object-server.conf.d/10_server.conf',
'object-server/object-replication.conf.d/00_base.conf',
'object-server/object-replication.conf.d/10_server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object.replication')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
replication_server_conf_dir = self.join_swift_dir(
'object-server/object-replication.conf.d')
self.assertEqual(replication_server_conf_dir, conf_dir)
# and again with no named filter
server = manager.Server('object')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 2)
for named_conf in ('server', 'replication'):
conf_dir = self.join_swift_dir(
'object-server/object-%s.conf.d' % named_conf)
self.assertTrue(conf_dir in conf_dirs)
def test_conf_dir(self):
conf_files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object-replicator')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 4)
c1 = self.join_swift_dir('object-server/1.conf.d')
c2 = self.join_swift_dir('object-server/2.conf.d')
c3 = self.join_swift_dir('object-server/3.conf.d')
c4 = self.join_swift_dir('object-server/4.conf.d')
for c in [c1, c2, c3, c4]:
self.assertTrue(c in conf_dirs)
# test configs returned sorted
sorted_confs = sorted([c1, c2, c3, c4])
self.assertEqual(conf_dirs, sorted_confs)
def test_named_conf_dir_pid_files(self):
conf_files = (
'object-server/object-server.pid.d',
'object-server/object-replication.pid.d',
)
with temptree(conf_files) as t:
manager.RUN_DIR = t
server = manager.Server('object.replication', run_dir=t)
pid_files = server.pid_files()
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
replication_server_pid = self.join_run_dir(
'object-server/object-replication.pid.d')
self.assertEqual(replication_server_pid, pid_file)
# and again with no named filter
server = manager.Server('object', run_dir=t)
pid_files = server.pid_files()
self.assertEqual(len(pid_files), 2)
for named_pid in ('server', 'replication'):
pid_file = self.join_run_dir(
'object-server/object-%s.pid.d' % named_pid)
self.assertTrue(pid_file in pid_files)
def test_iter_pid_files(self):
"""
Server.iter_pid_files is kinda boring, test the
Server.pid_files stuff here as well
"""
pid_files = (
('proxy-server.pid', 1),
('auth-server.pid', 'blah'),
('object-replicator/1.pid', 11),
('object-replicator/2.pid', 12),
)
files, contents = zip(*pid_files)
with temptree(files, contents) as t:
manager.RUN_DIR = t
server = manager.Server('proxy', run_dir=t)
# test get one file
iterator = server.iter_pid_files()
pid_file, pid = next(iterator)
self.assertEqual(pid_file, self.join_run_dir('proxy-server.pid'))
self.assertEqual(pid, 1)
# ... and only one file
self.assertRaises(StopIteration, next, iterator)
# test invalid value in pid file
server = manager.Server('auth', run_dir=t)
pid_file, pid = next(server.iter_pid_files())
self.assertIsNone(pid)
# test object-server doesn't steal pids from object-replicator
server = manager.Server('object', run_dir=t)
self.assertRaises(StopIteration, next, server.iter_pid_files())
# test multi-pid iter
server = manager.Server('object-replicator', run_dir=t)
real_map = {
11: self.join_run_dir('object-replicator/1.pid'),
12: self.join_run_dir('object-replicator/2.pid'),
}
pid_map = {}
for pid_file, pid in server.iter_pid_files():
pid_map[pid] = pid_file
self.assertEqual(pid_map, real_map)
# test get pid_files by number
conf_files = (
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('object-server/1.pid', 1),
('object-server/2.pid', 2),
('object-server/5.pid', 5),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
server = manager.Server('object', run_dir=t)
# test get all pid files
real_map = {
1: self.join_run_dir('object-server/1.pid'),
2: self.join_run_dir('object-server/2.pid'),
5: self.join_run_dir('object-server/5.pid'),
}
pid_map = {}
for pid_file, pid in server.iter_pid_files():
pid_map[pid] = pid_file
self.assertEqual(pid_map, real_map)
# test get pid with matching conf
pids = list(server.iter_pid_files(number=2))
self.assertEqual(len(pids), 1)
pid_file, pid = pids[0]
self.assertEqual(pid, 2)
pid_two = self.join_run_dir('object-server/2.pid')
self.assertEqual(pid_file, pid_two)
# try to iter on a pid number with a matching conf but no pid
pids = list(server.iter_pid_files(number=3))
self.assertFalse(pids)
# test get pids w/o matching conf
pids = list(server.iter_pid_files(number=5))
self.assertFalse(pids)
# test get pid_files by conf name
conf_files = (
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('object-server/1.pid', 1),
('object-server/2.pid', 2),
('object-server/5.pid', 5),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
server = manager.Server('object.2', run_dir=t)
# test get pid with matching conf
pids = list(server.iter_pid_files())
self.assertEqual(len(pids), 1)
pid_file, pid = pids[0]
self.assertEqual(pid, 2)
pid_two = self.join_run_dir('object-server/2.pid')
self.assertEqual(pid_file, pid_two)
def test_signal_pids(self):
temp_files = (
('var/run/zero-server.pid', 0),
('var/run/proxy-server.pid', 1),
('var/run/auth-server.pid', 2),
('var/run/one-server.pid', 3),
('var/run/object-server.pid', 4),
('var/run/invalid-server.pid', 'Forty-Two'),
('proc/3/cmdline', 'swift-another-server')
)
with temptree(*zip(*temp_files)) as t:
manager.RUN_DIR = os.path.join(t, 'var/run')
manager.PROC_DIR = os.path.join(t, 'proc')
# mock os with so both the first and second are running
manager.os = MockOs([1, 2])
server = manager.Server('proxy', run_dir=manager.RUN_DIR)
pids = server.signal_pids(DUMMY_SIG)
self.assertEqual(len(pids), 1)
self.assertTrue(1 in pids)
self.assertEqual(manager.os.pid_sigs[1], [DUMMY_SIG])
# make sure other process not signaled
self.assertFalse(2 in pids)
self.assertFalse(2 in manager.os.pid_sigs)
# capture stdio
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# test print details
pids = server.signal_pids(DUMMY_SIG)
output = pop_stream(f)
self.assertTrue('pid: %s' % 1 in output)
self.assertTrue('signal: %s' % DUMMY_SIG in output)
# test no details on signal.SIG_DFL
pids = server.signal_pids(signal.SIG_DFL)
self.assertEqual(pop_stream(f), '')
# reset mock os so only the second server is running
manager.os = MockOs([2])
# test pid not running
pids = server.signal_pids(signal.SIG_DFL)
self.assertNotIn(1, pids)
self.assertNotIn(1, manager.os.pid_sigs)
# test remove stale pid file
self.assertFalse(os.path.exists(
self.join_run_dir('proxy-server.pid')))
# reset mock os with no running pids
manager.os = MockOs([])
server = manager.Server('auth', run_dir=manager.RUN_DIR)
# test verbose warns on removing stale pid file
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
self.assertTrue('stale pid' in output.lower())
auth_pid = self.join_run_dir('auth-server.pid')
self.assertTrue(auth_pid in output)
# reset mock os so only the third server is running
manager.os = MockOs([3])
server = manager.Server('one', run_dir=manager.RUN_DIR)
# test verbose warns on removing invalid pid file
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
old_stdout.write('output %s' % output)
self.assertTrue('removing pid file' in output.lower())
one_pid = self.join_run_dir('one-server.pid')
self.assertTrue(one_pid in output)
server = manager.Server('zero', run_dir=manager.RUN_DIR)
self.assertTrue(os.path.exists(
self.join_run_dir('zero-server.pid'))) # sanity
# test verbose warns on removing pid file with invalid pid
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
old_stdout.write('output %s' % output)
self.assertTrue('with invalid pid' in output.lower())
self.assertFalse(os.path.exists(
self.join_run_dir('zero-server.pid')))
server = manager.Server('invalid-server',
run_dir=manager.RUN_DIR)
self.assertTrue(os.path.exists(
self.join_run_dir('invalid-server.pid'))) # sanity
# test verbose warns on removing pid file with invalid pid
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
old_stdout.write('output %s' % output)
self.assertTrue('with invalid pid' in output.lower())
self.assertFalse(os.path.exists(
self.join_run_dir('invalid-server.pid')))
# reset mock os with no running pids
manager.os = MockOs([])
# test warning with insufficient permissions
server = manager.Server('object', run_dir=manager.RUN_DIR)
pids = server.signal_pids(manager.os.RAISE_EPERM_SIG)
output = pop_stream(f)
self.assertTrue('no permission to signal pid 4' in
output.lower(), output)
finally:
sys.stdout = old_stdout
def test_get_running_pids(self):
# test only gets running pids
temp_files = (
('var/run/test-server1.pid', 1),
('var/run/test-server2.pid', 2),
('var/run/test-server3.pid', 3),
('proc/1/cmdline', 'swift-test-server'),
('proc/3/cmdline', 'swift-another-server')
)
with temptree(*zip(*temp_files)) as t:
manager.RUN_DIR = os.path.join(t, 'var/run')
manager.PROC_DIR = os.path.join(t, 'proc')
server = manager.Server(
'test-server', run_dir=manager.RUN_DIR)
# mock os, only pid '1' is running
manager.os = MockOs([1, 3])
running_pids = server.get_running_pids()
self.assertEqual(len(running_pids), 1)
self.assertTrue(1 in running_pids)
self.assertNotIn(2, running_pids)
self.assertNotIn(3, running_pids)
# test persistent running pid files
self.assertTrue(os.path.exists(
os.path.join(manager.RUN_DIR, 'test-server1.pid')))
# test clean up stale pids
pid_two = self.join_swift_dir('test-server2.pid')
self.assertFalse(os.path.exists(pid_two))
pid_three = self.join_swift_dir('test-server3.pid')
self.assertFalse(os.path.exists(pid_three))
# reset mock os, no pids running
manager.os = MockOs([])
running_pids = server.get_running_pids()
self.assertFalse(running_pids)
# and now all pid files are cleaned out
pid_one = self.join_run_dir('test-server1.pid')
self.assertFalse(os.path.exists(pid_one))
all_pids = os.listdir(manager.RUN_DIR)
self.assertEqual(len(all_pids), 0)
# test only get pids for right server
pid_files = (
('thing-doer.pid', 1),
('thing-sayer.pid', 2),
('other-doer.pid', 3),
('other-sayer.pid', 4),
)
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# all pids are running
manager.os = MockOs(pids)
server = manager.Server('thing-doer', run_dir=t)
running_pids = server.get_running_pids()
# only thing-doer.pid, 1
self.assertEqual(len(running_pids), 1)
self.assertTrue(1 in running_pids)
# no other pids returned
for n in (2, 3, 4):
self.assertNotIn(n, running_pids)
# assert stale pids for other servers ignored
manager.os = MockOs([1]) # only thing-doer is running
running_pids = server.get_running_pids()
for f in ('thing-sayer.pid', 'other-doer.pid', 'other-sayer.pid'):
# other server pid files persist
self.assertTrue(os.path.exists, os.path.join(t, f))
# verify that servers are in fact not running
for server_name in ('thing-sayer', 'other-doer', 'other-sayer'):
server = manager.Server(server_name, run_dir=t)
running_pids = server.get_running_pids()
self.assertFalse(running_pids)
# and now all OTHER pid files are cleaned out
all_pids = os.listdir(t)
self.assertEqual(len(all_pids), 1)
self.assertTrue(os.path.exists(os.path.join(t, 'thing-doer.pid')))
def test_kill_running_pids(self):
pid_files = (
('object-server.pid', 1),
('object-replicator1.pid', 11),
('object-replicator2.pid', 12),
)
files, running_pids = zip(*pid_files)
with temptree(files, running_pids) as t:
manager.RUN_DIR = t
server = manager.Server('object', run_dir=t)
# test no servers running
manager.os = MockOs([])
pids = server.kill_running_pids()
self.assertFalse(pids, pids)
files, running_pids = zip(*pid_files)
with temptree(files, running_pids) as t:
manager.RUN_DIR = t
server.run_dir = t
# start up pid
manager.os = MockOs([1])
server = manager.Server('object', run_dir=t)
# test kill one pid
pids = server.kill_running_pids()
self.assertEqual(len(pids), 1)
self.assertTrue(1 in pids)
self.assertEqual(manager.os.pid_sigs[1], [signal.SIGTERM])
# reset os mock
manager.os = MockOs([1])
# test shutdown
self.assertTrue('object-server' in
manager.GRACEFUL_SHUTDOWN_SERVERS)
pids = server.kill_running_pids(graceful=True)
self.assertEqual(len(pids), 1)
self.assertTrue(1 in pids)
self.assertEqual(manager.os.pid_sigs[1], [signal.SIGHUP])
# start up other servers
manager.os = MockOs([11, 12])
# test multi server kill & ignore graceful on unsupported server
self.assertFalse('object-replicator' in
manager.GRACEFUL_SHUTDOWN_SERVERS)
server = manager.Server('object-replicator', run_dir=t)
pids = server.kill_running_pids(graceful=True)
self.assertEqual(len(pids), 2)
for pid in (11, 12):
self.assertTrue(pid in pids)
self.assertEqual(manager.os.pid_sigs[pid],
[signal.SIGTERM])
# and the other pid is of course not signaled
self.assertNotIn(1, manager.os.pid_sigs)
def test_status(self):
conf_files = (
'test-server/1.conf',
'test-server/2.conf',
'test-server/3.conf',
'test-server/4.conf',
)
pid_files = (
('test-server/1.pid', 1),
('test-server/2.pid', 2),
('test-server/3.pid', 3),
('test-server/4.pid', 4),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# setup running servers
server = manager.Server('test', run_dir=t)
# capture stdio
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# test status for all running
manager.os = MockOs(pids)
proc_files = (
('1/cmdline', 'swift-test-server'),
('2/cmdline', 'swift-test-server'),
('3/cmdline', 'swift-test-server'),
('4/cmdline', 'swift-test-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(), 0)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 4)
for line in output:
self.assertTrue('test-server running' in line)
# test get single server by number
with temptree([], []) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(number=4), 0)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 1)
line = output[0]
self.assertTrue('test-server running' in line)
conf_four = self.join_swift_dir(conf_files[3])
self.assertTrue('4 - %s' % conf_four in line)
# test some servers not running
manager.os = MockOs([1, 2, 3])
proc_files = (
('1/cmdline', 'swift-test-server'),
('2/cmdline', 'swift-test-server'),
('3/cmdline', 'swift-test-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(), 0)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 3)
for line in output:
self.assertTrue('test-server running' in line)
# test single server not running
manager.os = MockOs([1, 2])
proc_files = (
('1/cmdline', 'swift-test-server'),
('2/cmdline', 'swift-test-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(number=3), 1)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 1)
line = output[0]
self.assertTrue('not running' in line)
conf_three = self.join_swift_dir(conf_files[2])
self.assertTrue(conf_three in line)
# test no running pids
manager.os = MockOs([])
with temptree([], []) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(), 1)
output = pop_stream(f).lower()
self.assertTrue('no test-server running' in output)
# test use provided pids
pids = {
1: '1.pid',
2: '2.pid',
}
# shouldn't call get_running_pids
called = []
def mock(*args, **kwargs):
called.append(True)
server.get_running_pids = mock
status = server.status(pids=pids)
self.assertEqual(status, 0)
self.assertFalse(called)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 2)
for line in output:
self.assertTrue('test-server running' in line)
finally:
sys.stdout = old_stdout
def test_spawn(self):
# mocks
class MockProcess(object):
NOTHING = 'default besides None'
STDOUT = 'stdout'
PIPE = 'pipe'
def __init__(self, pids=None):
if pids is None:
pids = []
self.pids = (p for p in pids)
def Popen(self, args, **kwargs):
return MockProc(next(self.pids), args, **kwargs)
class MockProc(object):
def __init__(self, pid, args, stdout=MockProcess.NOTHING,
stderr=MockProcess.NOTHING):
self.pid = pid
self.args = args
self.stdout = stdout
if stderr == MockProcess.STDOUT:
self.stderr = self.stdout
else:
self.stderr = stderr
# setup running servers
server = manager.Server('test')
with temptree(['test-server.conf']) as swift_dir:
manager.SWIFT_DIR = swift_dir
with temptree([]) as t:
manager.RUN_DIR = t
server.run_dir = t
old_subprocess = manager.subprocess
try:
# test single server process calls spawn once
manager.subprocess = MockProcess([1])
conf_file = self.join_swift_dir('test-server.conf')
# spawn server no kwargs
server.spawn(conf_file)
# test pid file
pid_file = self.join_run_dir('test-server.pid')
self.assertTrue(os.path.exists(pid_file))
pid_on_disk = int(open(pid_file).read().strip())
self.assertEqual(pid_on_disk, 1)
# assert procs args
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 1)
proc = server.procs[0]
expected_args = [
'swift-test-server',
conf_file,
]
self.assertEqual(proc.args, expected_args)
# assert stdout is piped
self.assertEqual(proc.stdout, MockProcess.PIPE)
self.assertEqual(proc.stderr, proc.stdout)
# test multi server process calls spawn multiple times
manager.subprocess = MockProcess([11, 12, 13, 14])
conf1 = self.join_swift_dir('test-server/1.conf')
conf2 = self.join_swift_dir('test-server/2.conf')
conf3 = self.join_swift_dir('test-server/3.conf')
conf4 = self.join_swift_dir('test-server/4.conf')
server = manager.Server('test', run_dir=t)
# test server run once
server.spawn(conf1, once=True)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 1)
proc = server.procs[0]
expected_args = ['swift-test-server', conf1, 'once']
# assert stdout is piped
self.assertEqual(proc.stdout, MockProcess.PIPE)
self.assertEqual(proc.stderr, proc.stdout)
# test server not daemon
server.spawn(conf2, daemon=False)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 2)
proc = server.procs[1]
expected_args = ['swift-test-server', conf2, 'verbose']
self.assertEqual(proc.args, expected_args)
# assert stdout is not changed
self.assertEqual(proc.stdout, None)
self.assertEqual(proc.stderr, None)
# test server wait
server.spawn(conf3, wait=False)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 3)
proc = server.procs[2]
# assert stdout is /dev/null
with open('/dev/null', 'wb+') as fp:
self.assertTrue(isinstance(proc.stdout, type(fp)))
self.assertEqual(proc.stdout.name, os.devnull)
self.assertIn('b', proc.stdout.mode)
self.assertTrue(any(x in proc.stdout.mode for x in 'aw+'),
'mode must be writable, not %r' %
proc.stdout.mode)
self.assertEqual(proc.stderr, proc.stdout)
# test not daemon over-rides wait
server.spawn(conf4, wait=False, daemon=False, once=True)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 4)
proc = server.procs[3]
expected_args = ['swift-test-server', conf4, 'once',
'verbose']
self.assertEqual(proc.args, expected_args)
# daemon behavior should trump wait, once shouldn't matter
self.assertEqual(proc.stdout, None)
self.assertEqual(proc.stderr, None)
# assert pids
for i, proc in enumerate(server.procs):
pid_file = self.join_run_dir('test-server/%d.pid' %
(i + 1))
pid_on_disk = int(open(pid_file).read().strip())
self.assertEqual(pid_on_disk, proc.pid)
finally:
manager.subprocess = old_subprocess
def test_wait(self):
server = manager.Server('test')
self.assertEqual(server.wait(), 0)
class MockProcess(threading.Thread):
def __init__(self, delay=0.1, fail_to_start=False):
threading.Thread.__init__(self)
# setup pipe
rfd, wfd = os.pipe()
# subprocess connection to read stdout
self.stdout = os.fdopen(rfd)
# real process connection to write stdout
self._stdout = os.fdopen(wfd, 'w')
self.delay = delay
self.finished = False
self.returncode = None
if fail_to_start:
self._returncode = 1
self.run = self.fail
else:
self._returncode = 0
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
if self.isAlive():
self.join()
def close_stdout(self):
self._stdout.flush()
with open(os.devnull, 'wb') as nullfile:
try:
os.dup2(nullfile.fileno(), self._stdout.fileno())
except OSError:
pass
def fail(self):
print('mock process started', file=self._stdout)
sleep(self.delay) # perform setup processing
print('mock process failed to start', file=self._stdout)
self.close_stdout()
def poll(self):
self.returncode = self._returncode
return self.returncode or None
def run(self):
print('mock process started', file=self._stdout)
sleep(self.delay) # perform setup processing
print('setup complete!', file=self._stdout)
self.close_stdout()
sleep(self.delay) # do some more processing
print('mock process finished', file=self._stdout)
self.finished = True
class MockTime(object):
def time(self):
return time()
def sleep(self, *args, **kwargs):
pass
with temptree([]) as t:
old_stdout = sys.stdout
old_wait = manager.WARNING_WAIT
old_time = manager.time
try:
manager.WARNING_WAIT = 0.01
manager.time = MockTime()
with open(os.path.join(t, 'output'), 'w+') as f:
# actually capture the read stdout (for prints)
sys.stdout = f
# test closing pipe in subprocess unblocks read
with MockProcess() as proc:
server.procs = [proc]
status = server.wait()
self.assertEqual(status, 0)
# wait should return before process exits
self.assertTrue(proc.isAlive())
self.assertFalse(proc.finished)
self.assertTrue(proc.finished) # make sure it did finish
# test output kwarg prints subprocess output
with MockProcess() as proc:
server.procs = [proc]
status = server.wait(output=True)
output = pop_stream(f)
self.assertTrue('mock process started' in output)
self.assertTrue('setup complete' in output)
# make sure we don't get prints after stdout was closed
self.assertNotIn('mock process finished', output)
# test process which fails to start
with MockProcess(fail_to_start=True) as proc:
server.procs = [proc]
status = server.wait()
self.assertEqual(status, 1)
self.assertTrue('failed' in pop_stream(f))
# test multiple procs
procs = [MockProcess(delay=.5) for i in range(3)]
for proc in procs:
proc.start()
server.procs = procs
status = server.wait()
self.assertEqual(status, 0)
for proc in procs:
self.assertTrue(proc.isAlive())
for proc in procs:
proc.join()
finally:
sys.stdout = old_stdout
manager.WARNING_WAIT = old_wait
manager.time = old_time
def test_interact(self):
class MockProcess(object):
def __init__(self, fail=False):
self.returncode = None
if fail:
self._returncode = 1
else:
self._returncode = 0
def communicate(self):
self.returncode = self._returncode
return '', ''
server = manager.Server('test')
server.procs = [MockProcess()]
self.assertEqual(server.interact(), 0)
server.procs = [MockProcess(fail=True)]
self.assertEqual(server.interact(), 1)
procs = []
for fail in (False, True, True):
procs.append(MockProcess(fail=fail))
server.procs = procs
self.assertTrue(server.interact() > 0)
def test_launch(self):
# stubs
conf_files = (
'proxy-server.conf',
'auth-server.conf',
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('proxy-server.pid', 1),
('proxy-server/2.pid', 2),
)
# mocks
class MockSpawn(object):
def __init__(self, pids=None):
self.conf_files = []
self.kwargs = []
if not pids:
def one_forever():
while True:
yield 1
self.pids = one_forever()
else:
self.pids = (x for x in pids)
def __call__(self, conf_file, **kwargs):
self.conf_files.append(conf_file)
self.kwargs.append(kwargs)
rv = next(self.pids)
if isinstance(rv, Exception):
raise rv
else:
return rv
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# can't start server w/o an conf
server = manager.Server('test', run_dir=t)
self.assertFalse(server.launch())
# start mock os running all pids
manager.os = MockOs(pids)
proc_files = (
('1/cmdline', 'swift-proxy-server'),
('2/cmdline', 'swift-proxy-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as proc_dir:
manager.PROC_DIR = proc_dir
server = manager.Server('proxy', run_dir=t)
# can't start server if it's already running
self.assertFalse(server.launch())
output = pop_stream(f)
self.assertTrue('running' in output)
conf_file = self.join_swift_dir(
'proxy-server.conf')
self.assertTrue(conf_file in output)
pid_file = self.join_run_dir('proxy-server/2.pid')
self.assertTrue(pid_file in output)
self.assertTrue('already started' in output)
# no running pids
manager.os = MockOs([])
with temptree([], []) as proc_dir:
manager.PROC_DIR = proc_dir
# test ignore once for non-start-once server
mock_spawn = MockSpawn([1])
server.spawn = mock_spawn
conf_file = self.join_swift_dir(
'proxy-server.conf')
expected = {
1: conf_file,
}
self.assertEqual(server.launch(once=True),
expected)
self.assertEqual(mock_spawn.conf_files,
[conf_file])
expected = {
'once': False,
}
self.assertEqual(mock_spawn.kwargs, [expected])
output = pop_stream(f)
self.assertTrue('Starting' in output)
self.assertNotIn('once', output)
# test multi-server kwarg once
server = manager.Server('object-replicator')
with temptree([], []) as proc_dir:
manager.PROC_DIR = proc_dir
mock_spawn = MockSpawn([1, 2, 3, 4])
server.spawn = mock_spawn
conf1 = self.join_swift_dir('object-server/1.conf')
conf2 = self.join_swift_dir('object-server/2.conf')
conf3 = self.join_swift_dir('object-server/3.conf')
conf4 = self.join_swift_dir('object-server/4.conf')
expected = {
1: conf1,
2: conf2,
3: conf3,
4: conf4,
}
self.assertEqual(server.launch(once=True),
expected)
self.assertEqual(mock_spawn.conf_files, [
conf1, conf2, conf3, conf4])
expected = {
'once': True,
}
self.assertEqual(len(mock_spawn.kwargs), 4)
for kwargs in mock_spawn.kwargs:
self.assertEqual(kwargs, expected)
# test number kwarg
mock_spawn = MockSpawn([4])
manager.PROC_DIR = proc_dir
server.spawn = mock_spawn
expected = {
4: conf4,
}
self.assertEqual(server.launch(number=4),
expected)
self.assertEqual(mock_spawn.conf_files, [conf4])
expected = {
'number': 4
}
self.assertEqual(mock_spawn.kwargs, [expected])
# test cmd does not exist
server = manager.Server('auth')
with temptree([], []) as proc_dir:
manager.PROC_DIR = proc_dir
mock_spawn = MockSpawn([OSError(errno.ENOENT,
'blah')])
server.spawn = mock_spawn
self.assertEqual(server.launch(), {})
self.assertTrue(
'swift-auth-server does not exist' in
pop_stream(f))
finally:
sys.stdout = old_stdout
def test_stop(self):
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
pid_files = (
('account-reaper/1.pid', 1),
('account-reaper/2.pid', 2),
('account-reaper/3.pid', 3),
('account-reaper/4.pid', 4),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# start all pids in mock os
manager.os = MockOs(pids)
server = manager.Server('account-reaper', run_dir=t)
# test kill all running pids
pids = server.stop()
self.assertEqual(len(pids), 4)
for pid in (1, 2, 3, 4):
self.assertTrue(pid in pids)
self.assertEqual(manager.os.pid_sigs[pid],
[signal.SIGTERM])
conf1 = self.join_swift_dir('account-reaper/1.conf')
conf2 = self.join_swift_dir('account-reaper/2.conf')
conf3 = self.join_swift_dir('account-reaper/3.conf')
conf4 = self.join_swift_dir('account-reaper/4.conf')
# reset mock os with only 2 running pids
manager.os = MockOs([3, 4])
pids = server.stop()
self.assertEqual(len(pids), 2)
for pid in (3, 4):
self.assertTrue(pid in pids)
self.assertEqual(manager.os.pid_sigs[pid],
[signal.SIGTERM])
self.assertFalse(os.path.exists(conf1))
self.assertFalse(os.path.exists(conf2))
# test number kwarg
manager.os = MockOs([3, 4])
pids = server.stop(number=3)
self.assertEqual(len(pids), 1)
expected = {
3: conf3,
}
self.assertTrue(pids, expected)
self.assertEqual(manager.os.pid_sigs[3], [signal.SIGTERM])
self.assertFalse(os.path.exists(conf4))
self.assertFalse(os.path.exists(conf3))
class TestManager(unittest.TestCase):
def test_create(self):
m = manager.Manager(['test'])
self.assertEqual(len(m.servers), 1)
server = m.servers.pop()
self.assertTrue(isinstance(server, manager.Server))
self.assertEqual(server.server, 'test-server')
# test multi-server and simple dedupe
servers = ['object-replicator', 'object-auditor', 'object-replicator']
m = manager.Manager(servers)
self.assertEqual(len(m.servers), 2)
for server in m.servers:
self.assertTrue(server.server in servers)
# test all
m = manager.Manager(['all'])
self.assertEqual(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.ALL_SERVERS)
# test main
m = manager.Manager(['main'])
self.assertEqual(len(m.servers), len(manager.MAIN_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.MAIN_SERVERS)
# test rest
m = manager.Manager(['rest'])
self.assertEqual(len(m.servers), len(manager.REST_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.REST_SERVERS)
# test main + rest == all
m = manager.Manager(['main', 'rest'])
self.assertEqual(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.ALL_SERVERS)
# test dedupe
m = manager.Manager(['main', 'rest', 'proxy', 'object',
'container', 'account'])
self.assertEqual(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.ALL_SERVERS)
# test glob
m = manager.Manager(['object-*'])
object_servers = [s for s in manager.ALL_SERVERS if
s.startswith('object')]
self.assertEqual(len(m.servers), len(object_servers))
for s in m.servers:
self.assertTrue(str(s) in object_servers)
m = manager.Manager(['*-replicator'])
replicators = [s for s in manager.ALL_SERVERS if
s.endswith('replicator')]
for s in m.servers:
self.assertTrue(str(s) in replicators)
def test_iter(self):
m = manager.Manager(['all'])
self.assertEqual(len(list(m)), len(manager.ALL_SERVERS))
for server in m:
self.assertTrue(server.server in manager.ALL_SERVERS)
def test_default_strict(self):
# test default strict
m = manager.Manager(['proxy'])
self.assertEqual(m._default_strict, True)
# aliases
m = manager.Manager(['main'])
self.assertEqual(m._default_strict, False)
m = manager.Manager(['proxy*'])
self.assertEqual(m._default_strict, False)
def test_status(self):
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called_kwargs = []
def status(self, **kwargs):
self.called_kwargs.append(kwargs)
if 'error' in self.server:
return 1
else:
return 0
old_server_class = manager.Server
try:
manager.Server = MockServer
m = manager.Manager(['test'])
status = m.status()
self.assertEqual(status, 0)
m = manager.Manager(['error'])
status = m.status()
self.assertEqual(status, 1)
# test multi-server
m = manager.Manager(['test', 'error'])
kwargs = {'key': 'value'}
status = m.status(**kwargs)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called_kwargs, [kwargs])
finally:
manager.Server = old_server_class
def test_start(self):
def mock_setup_env():
getattr(mock_setup_env, 'called', []).append(True)
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
if 'noconfig' in self.server:
return {}
elif 'somerunning' in self.server:
return {}
else:
return {1: self.server[0]}
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
return int('error' in self.server)
def stop(self, **kwargs):
self.called['stop'].append(kwargs)
def interact(self, **kwargs):
self.called['interact'].append(kwargs)
if 'raise' in self.server:
raise KeyboardInterrupt
elif 'error' in self.server:
return 1
else:
return 0
old_setup_env = manager.setup_env
old_swift_server = manager.Server
try:
manager.setup_env = mock_setup_env
manager.Server = MockServer
# test no errors on launch
m = manager.Manager(['proxy'])
status = m.start()
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
# test error on launch
m = manager.Manager(['proxy', 'error'])
status = m.start()
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test interact
m = manager.Manager(['proxy', 'error'])
kwargs = {'daemon': False}
status = m.start(**kwargs)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [kwargs])
self.assertEqual(server.called['interact'], [kwargs])
m = manager.Manager(['raise'])
kwargs = {'daemon': False}
status = m.start(**kwargs)
# test no config
m = manager.Manager(['proxy', 'noconfig'])
status = m.start()
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test no config with --non-strict
m = manager.Manager(['proxy', 'noconfig'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test no config --strict
m = manager.Manager(['proxy', 'noconfig'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
# test no config with alias
m = manager.Manager(['main', 'noconfig'])
status = m.start()
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test no config with alias and --non-strict
m = manager.Manager(['main', 'noconfig'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test no config with alias and --strict
m = manager.Manager(['main', 'noconfig'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
# test already all running
m = manager.Manager(['proxy', 'somerunning'])
status = m.start()
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test already all running --non-strict
m = manager.Manager(['proxy', 'somerunning'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test already all running --strict
m = manager.Manager(['proxy', 'somerunning'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
# test already all running with alias
m = manager.Manager(['main', 'somerunning'])
status = m.start()
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test already all running with alias and --non-strict
m = manager.Manager(['main', 'somerunning'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test already all running with alias and --strict
m = manager.Manager(['main', 'somerunning'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
finally:
manager.setup_env = old_setup_env
manager.Server = old_swift_server
def test_no_wait(self):
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
# must return non-empty dict if launch succeeded
return {1: self.server[0]}
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
return int('error' in self.server)
orig_swift_server = manager.Server
try:
manager.Server = MockServer
# test success
init = manager.Manager(['proxy'])
status = init.no_wait()
self.assertEqual(status, 0)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertFalse(called_kwargs['wait'])
self.assertFalse(server.called['wait'])
# test no errocode status even on error
init = manager.Manager(['error'])
status = init.no_wait()
self.assertEqual(status, 0)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertTrue('wait' in called_kwargs)
self.assertFalse(called_kwargs['wait'])
self.assertFalse(server.called['wait'])
# test wait with once option
init = manager.Manager(['updater', 'replicator-error'])
status = init.no_wait(once=True)
self.assertEqual(status, 0)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertTrue('wait' in called_kwargs)
self.assertFalse(called_kwargs['wait'])
self.assertTrue('once' in called_kwargs)
self.assertTrue(called_kwargs['once'])
self.assertFalse(server.called['wait'])
finally:
manager.Server = orig_swift_server
def test_no_daemon(self):
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
# must return non-empty dict if launch succeeded
return {1: self.server[0]}
def interact(self, **kwargs):
self.called['interact'].append(kwargs)
return int('error' in self.server)
orig_swift_server = manager.Server
try:
manager.Server = MockServer
# test success
init = manager.Manager(['proxy'])
stats = init.no_daemon()
self.assertEqual(stats, 0)
# test error
init = manager.Manager(['proxy', 'object-error'])
stats = init.no_daemon()
self.assertEqual(stats, 1)
# test once
init = manager.Manager(['proxy', 'object-error'])
stats = init.no_daemon()
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
self.assertEqual(len(server.called['wait']), 0)
self.assertEqual(len(server.called['interact']), 1)
finally:
manager.Server = orig_swift_server
def test_once(self):
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
if 'error' in self.server:
return 1
else:
return 0
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
return {1: 'account-reaper'}
orig_swift_server = manager.Server
try:
manager.Server = MockServer
# test no errors
init = manager.Manager(['account-reaper'])
status = init.once()
self.assertEqual(status, 0)
# test error code on error
init = manager.Manager(['error-reaper'])
status = init.once()
self.assertEqual(status, 1)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertEqual(called_kwargs, {'once': True})
self.assertEqual(len(server.called['wait']), 1)
self.assertEqual(len(server.called['interact']), 0)
finally:
manager.Server = orig_swift_server
def test_stop(self):
class MockServerFactory(object):
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR):
self.pids = pids
def stop(self, **kwargs):
return self.pids
def status(self, **kwargs):
return not self.pids
def __init__(self, server_pids, run_dir=manager.RUN_DIR):
self.server_pids = server_pids
def __call__(self, server, run_dir=manager.RUN_DIR):
return MockServerFactory.MockServer(self.server_pids[server])
def mock_watch_server_pids(server_pids, **kwargs):
for server, pids in server_pids.items():
for pid in pids:
if pid is None:
continue
yield server, pid
def mock_kill_group(pid, sig):
self.fail('kill_group should not be called')
_orig_server = manager.Server
_orig_watch_server_pids = manager.watch_server_pids
_orig_kill_group = manager.kill_group
try:
manager.watch_server_pids = mock_watch_server_pids
manager.kill_group = mock_kill_group
# test stop one server
server_pids = {
'test': {1: "dummy.pid"}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEqual(status, 0)
# test not running
server_pids = {
'test': {}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEqual(status, 1)
# test kill not running
server_pids = {
'test': {}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.kill()
self.assertEqual(status, 0)
# test won't die
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEqual(status, 1)
finally:
manager.Server = _orig_server
manager.watch_server_pids = _orig_watch_server_pids
manager.kill_group = _orig_kill_group
def test_stop_kill_after_timeout(self):
class MockServerFactory(object):
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR):
self.pids = pids
def stop(self, **kwargs):
return self.pids
def status(self, **kwargs):
return not self.pids
def __init__(self, server_pids, run_dir=manager.RUN_DIR):
self.server_pids = server_pids
def __call__(self, server, run_dir=manager.RUN_DIR):
return MockServerFactory.MockServer(self.server_pids[server])
def mock_watch_server_pids(server_pids, **kwargs):
for server, pids in server_pids.items():
for pid in pids:
if pid is None:
continue
yield server, pid
mock_kill_group_called = []
def mock_kill_group(*args):
mock_kill_group_called.append(args)
def mock_kill_group_oserr(*args):
raise OSError()
def mock_kill_group_oserr_ESRCH(*args):
raise OSError(errno.ESRCH, 'No such process')
_orig_server = manager.Server
_orig_watch_server_pids = manager.watch_server_pids
_orig_kill_group = manager.kill_group
try:
manager.watch_server_pids = mock_watch_server_pids
manager.kill_group = mock_kill_group
# test stop one server
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop(kill_after_timeout=True)
self.assertEqual(status, 1)
self.assertEqual(mock_kill_group_called, [(None, 9)])
manager.kill_group = mock_kill_group_oserr
# test stop one server - OSError
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
with self.assertRaises(OSError):
status = m.stop(kill_after_timeout=True)
manager.kill_group = mock_kill_group_oserr_ESRCH
# test stop one server - OSError: No such process
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop(kill_after_timeout=True)
self.assertEqual(status, 1)
finally:
manager.Server = _orig_server
manager.watch_server_pids = _orig_watch_server_pids
manager.kill_group = _orig_kill_group
def test_shutdown(self):
m = manager.Manager(['test'])
m.stop_was_called = False
def mock_stop(*args, **kwargs):
m.stop_was_called = True
expected = {'graceful': True}
self.assertEqual(kwargs, expected)
return 0
m.stop = mock_stop
status = m.shutdown()
self.assertEqual(status, 0)
self.assertEqual(m.stop_was_called, True)
def test_restart(self):
m = manager.Manager(['test'])
m.stop_was_called = False
def mock_stop(*args, **kwargs):
m.stop_was_called = True
return 0
m.start_was_called = False
def mock_start(*args, **kwargs):
m.start_was_called = True
return 0
m.stop = mock_stop
m.start = mock_start
status = m.restart()
self.assertEqual(status, 0)
self.assertEqual(m.stop_was_called, True)
self.assertEqual(m.start_was_called, True)
def test_reload(self):
class MockManager(object):
called = defaultdict(list)
def __init__(self, servers):
pass
@classmethod
def reset_called(cls):
cls.called = defaultdict(list)
def stop(self, **kwargs):
MockManager.called['stop'].append(kwargs)
return 0
def start(self, **kwargs):
MockManager.called['start'].append(kwargs)
return 0
_orig_manager = manager.Manager
try:
m = _orig_manager(['auth'])
for server in m.servers:
self.assertTrue(server.server in
manager.GRACEFUL_SHUTDOWN_SERVERS)
manager.Manager = MockManager
status = m.reload()
self.assertEqual(status, 0)
expected = {
'start': [{'graceful': True}],
'stop': [{'graceful': True}],
}
self.assertEqual(MockManager.called, expected)
# test force graceful
MockManager.reset_called()
m = _orig_manager(['*-server'])
self.assertEqual(len(m.servers), 4)
for server in m.servers:
self.assertTrue(server.server in
manager.GRACEFUL_SHUTDOWN_SERVERS)
manager.Manager = MockManager
status = m.reload(graceful=False)
self.assertEqual(status, 0)
expected = {
'start': [{'graceful': True}] * 4,
'stop': [{'graceful': True}] * 4,
}
self.assertEqual(MockManager.called, expected)
finally:
manager.Manager = _orig_manager
def test_force_reload(self):
m = manager.Manager(['test'])
m.reload_was_called = False
def mock_reload(*args, **kwargs):
m.reload_was_called = True
return 0
m.reload = mock_reload
status = m.force_reload()
self.assertEqual(status, 0)
self.assertEqual(m.reload_was_called, True)
def test_get_command(self):
m = manager.Manager(['test'])
self.assertEqual(m.start, m.get_command('start'))
self.assertEqual(m.force_reload, m.get_command('force-reload'))
self.assertEqual(m.get_command('force-reload'),
m.get_command('force_reload'))
self.assertRaises(manager.UnknownCommandError, m.get_command,
'no_command')
self.assertRaises(manager.UnknownCommandError, m.get_command,
'__init__')
def test_list_commands(self):
for cmd, help in manager.Manager.list_commands():
method = getattr(manager.Manager, cmd.replace('-', '_'), None)
self.assertTrue(method, '%s is not a command' % cmd)
self.assertTrue(getattr(method, 'publicly_accessible', False))
self.assertEqual(method.__doc__.strip(), help)
def test_run_command(self):
m = manager.Manager(['test'])
m.cmd_was_called = False
def mock_cmd(*args, **kwargs):
m.cmd_was_called = True
expected = {'kw1': True, 'kw2': False}
self.assertEqual(kwargs, expected)
return 0
mock_cmd.publicly_accessible = True
m.mock_cmd = mock_cmd
kwargs = {'kw1': True, 'kw2': False}
status = m.run_command('mock_cmd', **kwargs)
self.assertEqual(status, 0)
self.assertEqual(m.cmd_was_called, True)
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test.unit import temptree
import os
import sys
import resource
import signal
import errno
from collections import defaultdict
from time import sleep, time
from six.moves import reload_module
from swift.common import manager
from swift.common.exceptions import InvalidPidFileException
import eventlet
threading = eventlet.patcher.original('threading')
DUMMY_SIG = 1
class MockOs(object):
RAISE_EPERM_SIG = 99
def __init__(self, pids):
self.running_pids = pids
self.pid_sigs = defaultdict(list)
self.closed_fds = []
self.child_pid = 9999 # fork defaults to test parent process path
self.execlp_called = False
def kill(self, pid, sig):
if sig == self.RAISE_EPERM_SIG:
raise OSError(errno.EPERM, 'Operation not permitted')
if pid not in self.running_pids:
raise OSError(3, 'No such process')
self.pid_sigs[pid].append(sig)
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
def pop_stream(f):
"""read everything out of file from the top and clear it out
"""
f.flush()
f.seek(0)
output = f.read()
f.seek(0)
f.truncate()
return output
class TestManagerModule(unittest.TestCase):
def test_servers(self):
main_plus_rest = set(manager.MAIN_SERVERS + manager.REST_SERVERS)
self.assertEqual(set(manager.ALL_SERVERS), main_plus_rest)
# make sure there's no server listed in both
self.assertEqual(len(main_plus_rest), len(manager.MAIN_SERVERS) +
len(manager.REST_SERVERS))
def test_setup_env(self):
class MockResource(object):
def __init__(self, error=None):
self.error = error
self.called_with_args = []
def setrlimit(self, resource, limits):
if self.error:
raise self.error
self.called_with_args.append((resource, limits))
def __getattr__(self, name):
# I only over-ride portions of the resource module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(resource, name)
_orig_resource = manager.resource
_orig_environ = os.environ
try:
manager.resource = MockResource()
manager.os.environ = {}
manager.setup_env()
expected = [
(resource.RLIMIT_NOFILE, (manager.MAX_DESCRIPTORS,
manager.MAX_DESCRIPTORS)),
(resource.RLIMIT_DATA, (manager.MAX_MEMORY,
manager.MAX_MEMORY)),
(resource.RLIMIT_NPROC, (manager.MAX_PROCS,
manager.MAX_PROCS)),
]
self.assertEqual(manager.resource.called_with_args, expected)
self.assertTrue(
manager.os.environ['PYTHON_EGG_CACHE'].startswith('/tmp'))
# test error condition
manager.resource = MockResource(error=ValueError())
manager.os.environ = {}
manager.setup_env()
self.assertEqual(manager.resource.called_with_args, [])
self.assertTrue(
manager.os.environ['PYTHON_EGG_CACHE'].startswith('/tmp'))
manager.resource = MockResource(error=OSError())
manager.os.environ = {}
self.assertRaises(OSError, manager.setup_env)
self.assertEqual(manager.os.environ.get('PYTHON_EGG_CACHE'), None)
finally:
manager.resource = _orig_resource
os.environ = _orig_environ
def test_command_wrapper(self):
@manager.command
def myfunc(arg1):
"""test doc
"""
return arg1
self.assertEqual(myfunc.__doc__.strip(), 'test doc')
self.assertEqual(myfunc(1), 1)
self.assertEqual(myfunc(0), 0)
self.assertEqual(myfunc(True), 1)
self.assertEqual(myfunc(False), 0)
self.assertTrue(hasattr(myfunc, 'publicly_accessible'))
self.assertTrue(myfunc.publicly_accessible)
def test_watch_server_pids(self):
class MockOs(object):
WNOHANG = os.WNOHANG
def __init__(self, pid_map=None):
if pid_map is None:
pid_map = {}
self.pid_map = {}
for pid, v in pid_map.items():
self.pid_map[pid] = (x for x in v)
def waitpid(self, pid, options):
try:
rv = next(self.pid_map[pid])
except StopIteration:
raise OSError(errno.ECHILD, os.strerror(errno.ECHILD))
except KeyError:
raise OSError(errno.ESRCH, os.strerror(errno.ESRCH))
if isinstance(rv, Exception):
raise rv
else:
return rv
class MockTime(object):
def __init__(self, ticks=None):
self.tock = time()
if not ticks:
ticks = []
self.ticks = (t for t in ticks)
def time(self):
try:
self.tock += next(self.ticks)
except StopIteration:
self.tock += 1
return self.tock
def sleep(*args):
return
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR, zombie=0):
self.heartbeat = (pids for _ in range(zombie))
def get_running_pids(self):
try:
rv = next(self.heartbeat)
return rv
except StopIteration:
return {}
_orig_os = manager.os
_orig_time = manager.time
_orig_server = manager.Server
try:
manager.time = MockTime()
manager.os = MockOs()
# this server always says it's dead when you ask for running pids
server = MockServer([1])
# list of pids keyed on servers to watch
server_pids = {
server: [1],
}
# basic test, server dies
gen = manager.watch_server_pids(server_pids)
expected = [(server, 1)]
self.assertEqual([x for x in gen], expected)
# start long running server and short interval
server = MockServer([1], zombie=15)
server_pids = {
server: [1],
}
gen = manager.watch_server_pids(server_pids)
self.assertEqual([x for x in gen], [])
# wait a little longer
gen = manager.watch_server_pids(server_pids, interval=15)
self.assertEqual([x for x in gen], [(server, 1)])
# zombie process
server = MockServer([1], zombie=200)
server_pids = {
server: [1],
}
# test weird os error
manager.os = MockOs({1: [OSError()]})
gen = manager.watch_server_pids(server_pids)
self.assertRaises(OSError, lambda: [x for x in gen])
# test multi-server
server1 = MockServer([1, 10], zombie=200)
server2 = MockServer([2, 20], zombie=8)
server_pids = {
server1: [1, 10],
server2: [2, 20],
}
pid_map = {
1: [None for _ in range(10)],
2: [None for _ in range(8)],
20: [None for _ in range(4)],
}
manager.os = MockOs(pid_map)
gen = manager.watch_server_pids(server_pids,
interval=manager.KILL_WAIT)
expected = [
(server2, 2),
(server2, 20),
]
self.assertEqual([x for x in gen], expected)
finally:
manager.os = _orig_os
manager.time = _orig_time
manager.Server = _orig_server
def test_safe_kill(self):
manager.os = MockOs([1, 2, 3, 4])
proc_files = (
('1/cmdline', 'same-procname'),
('2/cmdline', 'another-procname'),
('4/cmdline', 'another-procname'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
manager.safe_kill(1, signal.SIG_DFL, 'same-procname')
self.assertRaises(InvalidPidFileException, manager.safe_kill,
2, signal.SIG_DFL, 'same-procname')
manager.safe_kill(3, signal.SIG_DFL, 'same-procname')
manager.safe_kill(4, signal.SIGHUP, 'same-procname')
def test_exc(self):
self.assertTrue(issubclass(manager.UnknownCommandError, Exception))
class TestServer(unittest.TestCase):
def tearDown(self):
reload_module(manager)
def join_swift_dir(self, path):
return os.path.join(manager.SWIFT_DIR, path)
def join_run_dir(self, path):
return os.path.join(manager.RUN_DIR, path)
def test_create_server(self):
server = manager.Server('proxy')
self.assertEqual(server.server, 'proxy-server')
self.assertEqual(server.type, 'proxy')
self.assertEqual(server.cmd, 'swift-proxy-server')
server = manager.Server('object-replicator')
self.assertEqual(server.server, 'object-replicator')
self.assertEqual(server.type, 'object')
self.assertEqual(server.cmd, 'swift-object-replicator')
def test_server_to_string(self):
server = manager.Server('Proxy')
self.assertEqual(str(server), 'proxy-server')
server = manager.Server('object-replicator')
self.assertEqual(str(server), 'object-replicator')
def test_server_repr(self):
server = manager.Server('proxy')
self.assertTrue(server.__class__.__name__ in repr(server))
self.assertTrue(str(server) in repr(server))
def test_server_equality(self):
server1 = manager.Server('Proxy')
server2 = manager.Server('proxy-server')
self.assertEqual(server1, server2)
# it is NOT a string
self.assertNotEqual(server1, 'proxy-server')
def test_get_pid_file_name(self):
server = manager.Server('proxy')
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = self.join_run_dir('proxy-server.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('object-replicator')
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = self.join_run_dir('object-replicator/1.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('container-auditor')
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = self.join_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
def test_get_custom_pid_file_name(self):
random_run_dir = "/random/dir"
get_random_run_dir = lambda x: os.path.join(random_run_dir, x)
server = manager.Server('proxy', run_dir=random_run_dir)
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = get_random_run_dir('proxy-server.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('object-replicator', run_dir=random_run_dir)
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = get_random_run_dir('object-replicator/1.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
server = manager.Server('container-auditor', run_dir=random_run_dir)
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = get_random_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEqual(pid_file, server.get_pid_file_name(conf_file))
def test_get_conf_file_name(self):
server = manager.Server('proxy')
conf_file = self.join_swift_dir('proxy-server.conf')
pid_file = self.join_run_dir('proxy-server.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
server = manager.Server('object-replicator')
conf_file = self.join_swift_dir('object-server/1.conf')
pid_file = self.join_run_dir('object-replicator/1.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
server = manager.Server('container-auditor')
conf_file = self.join_swift_dir(
'container-server/1/container-auditor.conf')
pid_file = self.join_run_dir(
'container-auditor/1/container-auditor.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
server_name = manager.STANDALONE_SERVERS[0]
server = manager.Server(server_name)
conf_file = self.join_swift_dir(server_name + '.conf')
pid_file = self.join_run_dir(server_name + '.pid')
self.assertEqual(conf_file, server.get_conf_file_name(pid_file))
def test_conf_files(self):
# test get single conf file
conf_files = (
'proxy-server.conf',
'proxy-server.ini',
'auth-server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('proxy')
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
proxy_conf = self.join_swift_dir('proxy-server.conf')
self.assertEqual(conf_file, proxy_conf)
# test multi server conf files & grouping of server-type config
conf_files = (
'object-server1.conf',
'object-server/2.conf',
'object-server/object3.conf',
'object-server/conf/server4.conf',
'object-server.txt',
'proxy-server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object-replicator')
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 4)
c1 = self.join_swift_dir('object-server1.conf')
c2 = self.join_swift_dir('object-server/2.conf')
c3 = self.join_swift_dir('object-server/object3.conf')
c4 = self.join_swift_dir('object-server/conf/server4.conf')
for c in [c1, c2, c3, c4]:
self.assertTrue(c in conf_files)
# test configs returned sorted
sorted_confs = sorted([c1, c2, c3, c4])
self.assertEqual(conf_files, sorted_confs)
# test get single numbered conf
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('account')
conf_files = server.conf_files(number=2)
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
self.assertEqual(conf_file,
self.join_swift_dir('account-server/2.conf'))
# test missing config number
conf_files = server.conf_files(number=5)
self.assertFalse(conf_files)
# test getting specific conf
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('account.2')
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
self.assertEqual(conf_file,
self.join_swift_dir('account-server/2.conf'))
# test verbose & quiet
conf_files = (
'auth-server.ini',
'container-server/1.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
server = manager.Server('auth')
# check warn "unable to locate"
conf_files = server.conf_files()
self.assertFalse(conf_files)
self.assertTrue('unable to locate config for auth'
in pop_stream(f).lower())
# check quiet will silence warning
conf_files = server.conf_files(verbose=True, quiet=True)
self.assertEqual(pop_stream(f), '')
# check found config no warning
server = manager.Server('container-auditor')
conf_files = server.conf_files()
self.assertEqual(pop_stream(f), '')
# check missing config number warn "unable to locate"
conf_files = server.conf_files(number=2)
self.assertTrue(
'unable to locate config number 2 for ' +
'container-auditor' in pop_stream(f).lower())
# check verbose lists configs
conf_files = server.conf_files(number=2, verbose=True)
c1 = self.join_swift_dir('container-server/1.conf')
self.assertTrue(c1 in pop_stream(f))
finally:
sys.stdout = old_stdout
# test standalone conf file
server_name = manager.STANDALONE_SERVERS[0]
conf_files = (server_name + '.conf',)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server(server_name)
conf_files = server.conf_files()
self.assertEqual(len(conf_files), 1)
conf_file = conf_files[0]
conf = self.join_swift_dir(server_name + '.conf')
self.assertEqual(conf_file, conf)
def test_proxy_conf_dir(self):
conf_files = (
'proxy-server.conf.d/00.conf',
'proxy-server.conf.d/01.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('proxy')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
proxy_conf_dir = self.join_swift_dir('proxy-server.conf.d')
self.assertEqual(proxy_conf_dir, conf_dir)
def test_named_conf_dir(self):
conf_files = (
'object-server/base.conf-template',
'object-server/object-server.conf.d/00_base.conf',
'object-server/object-server.conf.d/10_server.conf',
'object-server/object-replication.conf.d/00_base.conf',
'object-server/object-replication.conf.d/10_server.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object.replication')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
replication_server_conf_dir = self.join_swift_dir(
'object-server/object-replication.conf.d')
self.assertEqual(replication_server_conf_dir, conf_dir)
# and again with no named filter
server = manager.Server('object')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 2)
for named_conf in ('server', 'replication'):
conf_dir = self.join_swift_dir(
'object-server/object-%s.conf.d' % named_conf)
self.assertTrue(conf_dir in conf_dirs)
def test_conf_dir(self):
conf_files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(conf_files) as t:
manager.SWIFT_DIR = t
server = manager.Server('object-replicator')
conf_dirs = server.conf_files()
self.assertEqual(len(conf_dirs), 4)
c1 = self.join_swift_dir('object-server/1.conf.d')
c2 = self.join_swift_dir('object-server/2.conf.d')
c3 = self.join_swift_dir('object-server/3.conf.d')
c4 = self.join_swift_dir('object-server/4.conf.d')
for c in [c1, c2, c3, c4]:
self.assertTrue(c in conf_dirs)
# test configs returned sorted
sorted_confs = sorted([c1, c2, c3, c4])
self.assertEqual(conf_dirs, sorted_confs)
def test_named_conf_dir_pid_files(self):
conf_files = (
'object-server/object-server.pid.d',
'object-server/object-replication.pid.d',
)
with temptree(conf_files) as t:
manager.RUN_DIR = t
server = manager.Server('object.replication', run_dir=t)
pid_files = server.pid_files()
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
replication_server_pid = self.join_run_dir(
'object-server/object-replication.pid.d')
self.assertEqual(replication_server_pid, pid_file)
# and again with no named filter
server = manager.Server('object', run_dir=t)
pid_files = server.pid_files()
self.assertEqual(len(pid_files), 2)
for named_pid in ('server', 'replication'):
pid_file = self.join_run_dir(
'object-server/object-%s.pid.d' % named_pid)
self.assertTrue(pid_file in pid_files)
def test_iter_pid_files(self):
"""
Server.iter_pid_files is kinda boring, test the
Server.pid_files stuff here as well
"""
pid_files = (
('proxy-server.pid', 1),
('auth-server.pid', 'blah'),
('object-replicator/1.pid', 11),
('object-replicator/2.pid', 12),
)
files, contents = zip(*pid_files)
with temptree(files, contents) as t:
manager.RUN_DIR = t
server = manager.Server('proxy', run_dir=t)
# test get one file
iterator = server.iter_pid_files()
pid_file, pid = next(iterator)
self.assertEqual(pid_file, self.join_run_dir('proxy-server.pid'))
self.assertEqual(pid, 1)
# ... and only one file
self.assertRaises(StopIteration, next, iterator)
# test invalid value in pid file
server = manager.Server('auth', run_dir=t)
pid_file, pid = next(server.iter_pid_files())
self.assertIsNone(pid)
# test object-server doesn't steal pids from object-replicator
server = manager.Server('object', run_dir=t)
self.assertRaises(StopIteration, next, server.iter_pid_files())
# test multi-pid iter
server = manager.Server('object-replicator', run_dir=t)
real_map = {
11: self.join_run_dir('object-replicator/1.pid'),
12: self.join_run_dir('object-replicator/2.pid'),
}
pid_map = {}
for pid_file, pid in server.iter_pid_files():
pid_map[pid] = pid_file
self.assertEqual(pid_map, real_map)
# test get pid_files by number
conf_files = (
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('object-server/1.pid', 1),
('object-server/2.pid', 2),
('object-server/5.pid', 5),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
server = manager.Server('object', run_dir=t)
# test get all pid files
real_map = {
1: self.join_run_dir('object-server/1.pid'),
2: self.join_run_dir('object-server/2.pid'),
5: self.join_run_dir('object-server/5.pid'),
}
pid_map = {}
for pid_file, pid in server.iter_pid_files():
pid_map[pid] = pid_file
self.assertEqual(pid_map, real_map)
# test get pid with matching conf
pids = list(server.iter_pid_files(number=2))
self.assertEqual(len(pids), 1)
pid_file, pid = pids[0]
self.assertEqual(pid, 2)
pid_two = self.join_run_dir('object-server/2.pid')
self.assertEqual(pid_file, pid_two)
# try to iter on a pid number with a matching conf but no pid
pids = list(server.iter_pid_files(number=3))
self.assertFalse(pids)
# test get pids w/o matching conf
pids = list(server.iter_pid_files(number=5))
self.assertFalse(pids)
# test get pid_files by conf name
conf_files = (
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('object-server/1.pid', 1),
('object-server/2.pid', 2),
('object-server/5.pid', 5),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
server = manager.Server('object.2', run_dir=t)
# test get pid with matching conf
pids = list(server.iter_pid_files())
self.assertEqual(len(pids), 1)
pid_file, pid = pids[0]
self.assertEqual(pid, 2)
pid_two = self.join_run_dir('object-server/2.pid')
self.assertEqual(pid_file, pid_two)
def test_signal_pids(self):
temp_files = (
('var/run/zero-server.pid', 0),
('var/run/proxy-server.pid', 1),
('var/run/auth-server.pid', 2),
('var/run/one-server.pid', 3),
('var/run/object-server.pid', 4),
('var/run/invalid-server.pid', 'Forty-Two'),
('proc/3/cmdline', 'swift-another-server')
)
with temptree(*zip(*temp_files)) as t:
manager.RUN_DIR = os.path.join(t, 'var/run')
manager.PROC_DIR = os.path.join(t, 'proc')
# mock os with so both the first and second are running
manager.os = MockOs([1, 2])
server = manager.Server('proxy', run_dir=manager.RUN_DIR)
pids = server.signal_pids(DUMMY_SIG)
self.assertEqual(len(pids), 1)
self.assertTrue(1 in pids)
self.assertEqual(manager.os.pid_sigs[1], [DUMMY_SIG])
# make sure other process not signaled
self.assertFalse(2 in pids)
self.assertFalse(2 in manager.os.pid_sigs)
# capture stdio
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# test print details
pids = server.signal_pids(DUMMY_SIG)
output = pop_stream(f)
self.assertTrue('pid: %s' % 1 in output)
self.assertTrue('signal: %s' % DUMMY_SIG in output)
# test no details on signal.SIG_DFL
pids = server.signal_pids(signal.SIG_DFL)
self.assertEqual(pop_stream(f), '')
# reset mock os so only the second server is running
manager.os = MockOs([2])
# test pid not running
pids = server.signal_pids(signal.SIG_DFL)
self.assertNotIn(1, pids)
self.assertNotIn(1, manager.os.pid_sigs)
# test remove stale pid file
self.assertFalse(os.path.exists(
self.join_run_dir('proxy-server.pid')))
# reset mock os with no running pids
manager.os = MockOs([])
server = manager.Server('auth', run_dir=manager.RUN_DIR)
# test verbose warns on removing stale pid file
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
self.assertTrue('stale pid' in output.lower())
auth_pid = self.join_run_dir('auth-server.pid')
self.assertTrue(auth_pid in output)
# reset mock os so only the third server is running
manager.os = MockOs([3])
server = manager.Server('one', run_dir=manager.RUN_DIR)
# test verbose warns on removing invalid pid file
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
old_stdout.write('output %s' % output)
self.assertTrue('removing pid file' in output.lower())
one_pid = self.join_run_dir('one-server.pid')
self.assertTrue(one_pid in output)
server = manager.Server('zero', run_dir=manager.RUN_DIR)
self.assertTrue(os.path.exists(
self.join_run_dir('zero-server.pid'))) # sanity
# test verbose warns on removing pid file with invalid pid
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
old_stdout.write('output %s' % output)
self.assertTrue('with invalid pid' in output.lower())
self.assertFalse(os.path.exists(
self.join_run_dir('zero-server.pid')))
server = manager.Server('invalid-server',
run_dir=manager.RUN_DIR)
self.assertTrue(os.path.exists(
self.join_run_dir('invalid-server.pid'))) # sanity
# test verbose warns on removing pid file with invalid pid
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
old_stdout.write('output %s' % output)
self.assertTrue('with invalid pid' in output.lower())
self.assertFalse(os.path.exists(
self.join_run_dir('invalid-server.pid')))
# reset mock os with no running pids
manager.os = MockOs([])
# test warning with insufficient permissions
server = manager.Server('object', run_dir=manager.RUN_DIR)
pids = server.signal_pids(manager.os.RAISE_EPERM_SIG)
output = pop_stream(f)
self.assertTrue('no permission to signal pid 4' in
output.lower(), output)
finally:
sys.stdout = old_stdout
def test_get_running_pids(self):
# test only gets running pids
temp_files = (
('var/run/test-server1.pid', 1),
('var/run/test-server2.pid', 2),
('var/run/test-server3.pid', 3),
('proc/1/cmdline', 'swift-test-server'),
('proc/3/cmdline', 'swift-another-server')
)
with temptree(*zip(*temp_files)) as t:
manager.RUN_DIR = os.path.join(t, 'var/run')
manager.PROC_DIR = os.path.join(t, 'proc')
server = manager.Server(
'test-server', run_dir=manager.RUN_DIR)
# mock os, only pid '1' is running
manager.os = MockOs([1, 3])
running_pids = server.get_running_pids()
self.assertEqual(len(running_pids), 1)
self.assertTrue(1 in running_pids)
self.assertNotIn(2, running_pids)
self.assertNotIn(3, running_pids)
# test persistent running pid files
self.assertTrue(os.path.exists(
os.path.join(manager.RUN_DIR, 'test-server1.pid')))
# test clean up stale pids
pid_two = self.join_swift_dir('test-server2.pid')
self.assertFalse(os.path.exists(pid_two))
pid_three = self.join_swift_dir('test-server3.pid')
self.assertFalse(os.path.exists(pid_three))
# reset mock os, no pids running
manager.os = MockOs([])
running_pids = server.get_running_pids()
self.assertFalse(running_pids)
# and now all pid files are cleaned out
pid_one = self.join_run_dir('test-server1.pid')
self.assertFalse(os.path.exists(pid_one))
all_pids = os.listdir(manager.RUN_DIR)
self.assertEqual(len(all_pids), 0)
# test only get pids for right server
pid_files = (
('thing-doer.pid', 1),
('thing-sayer.pid', 2),
('other-doer.pid', 3),
('other-sayer.pid', 4),
)
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# all pids are running
manager.os = MockOs(pids)
server = manager.Server('thing-doer', run_dir=t)
running_pids = server.get_running_pids()
# only thing-doer.pid, 1
self.assertEqual(len(running_pids), 1)
self.assertTrue(1 in running_pids)
# no other pids returned
for n in (2, 3, 4):
self.assertNotIn(n, running_pids)
# assert stale pids for other servers ignored
manager.os = MockOs([1]) # only thing-doer is running
running_pids = server.get_running_pids()
for f in ('thing-sayer.pid', 'other-doer.pid', 'other-sayer.pid'):
# other server pid files persist
self.assertTrue(os.path.exists, os.path.join(t, f))
# verify that servers are in fact not running
for server_name in ('thing-sayer', 'other-doer', 'other-sayer'):
server = manager.Server(server_name, run_dir=t)
running_pids = server.get_running_pids()
self.assertFalse(running_pids)
# and now all OTHER pid files are cleaned out
all_pids = os.listdir(t)
self.assertEqual(len(all_pids), 1)
self.assertTrue(os.path.exists(os.path.join(t, 'thing-doer.pid')))
def test_kill_running_pids(self):
pid_files = (
('object-server.pid', 1),
('object-replicator1.pid', 11),
('object-replicator2.pid', 12),
)
files, running_pids = zip(*pid_files)
with temptree(files, running_pids) as t:
manager.RUN_DIR = t
server = manager.Server('object', run_dir=t)
# test no servers running
manager.os = MockOs([])
pids = server.kill_running_pids()
self.assertFalse(pids, pids)
files, running_pids = zip(*pid_files)
with temptree(files, running_pids) as t:
manager.RUN_DIR = t
server.run_dir = t
# start up pid
manager.os = MockOs([1])
server = manager.Server('object', run_dir=t)
# test kill one pid
pids = server.kill_running_pids()
self.assertEqual(len(pids), 1)
self.assertTrue(1 in pids)
self.assertEqual(manager.os.pid_sigs[1], [signal.SIGTERM])
# reset os mock
manager.os = MockOs([1])
# test shutdown
self.assertTrue('object-server' in
manager.GRACEFUL_SHUTDOWN_SERVERS)
pids = server.kill_running_pids(graceful=True)
self.assertEqual(len(pids), 1)
self.assertTrue(1 in pids)
self.assertEqual(manager.os.pid_sigs[1], [signal.SIGHUP])
# start up other servers
manager.os = MockOs([11, 12])
# test multi server kill & ignore graceful on unsupported server
self.assertFalse('object-replicator' in
manager.GRACEFUL_SHUTDOWN_SERVERS)
server = manager.Server('object-replicator', run_dir=t)
pids = server.kill_running_pids(graceful=True)
self.assertEqual(len(pids), 2)
for pid in (11, 12):
self.assertTrue(pid in pids)
self.assertEqual(manager.os.pid_sigs[pid],
[signal.SIGTERM])
# and the other pid is of course not signaled
self.assertNotIn(1, manager.os.pid_sigs)
def test_status(self):
conf_files = (
'test-server/1.conf',
'test-server/2.conf',
'test-server/3.conf',
'test-server/4.conf',
)
pid_files = (
('test-server/1.pid', 1),
('test-server/2.pid', 2),
('test-server/3.pid', 3),
('test-server/4.pid', 4),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# setup running servers
server = manager.Server('test', run_dir=t)
# capture stdio
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# test status for all running
manager.os = MockOs(pids)
proc_files = (
('1/cmdline', 'swift-test-server'),
('2/cmdline', 'swift-test-server'),
('3/cmdline', 'swift-test-server'),
('4/cmdline', 'swift-test-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(), 0)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 4)
for line in output:
self.assertTrue('test-server running' in line)
# test get single server by number
with temptree([], []) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(number=4), 0)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 1)
line = output[0]
self.assertTrue('test-server running' in line)
conf_four = self.join_swift_dir(conf_files[3])
self.assertTrue('4 - %s' % conf_four in line)
# test some servers not running
manager.os = MockOs([1, 2, 3])
proc_files = (
('1/cmdline', 'swift-test-server'),
('2/cmdline', 'swift-test-server'),
('3/cmdline', 'swift-test-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(), 0)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 3)
for line in output:
self.assertTrue('test-server running' in line)
# test single server not running
manager.os = MockOs([1, 2])
proc_files = (
('1/cmdline', 'swift-test-server'),
('2/cmdline', 'swift-test-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(number=3), 1)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 1)
line = output[0]
self.assertTrue('not running' in line)
conf_three = self.join_swift_dir(conf_files[2])
self.assertTrue(conf_three in line)
# test no running pids
manager.os = MockOs([])
with temptree([], []) as t:
manager.PROC_DIR = t
self.assertEqual(server.status(), 1)
output = pop_stream(f).lower()
self.assertTrue('no test-server running' in output)
# test use provided pids
pids = {
1: '1.pid',
2: '2.pid',
}
# shouldn't call get_running_pids
called = []
def mock(*args, **kwargs):
called.append(True)
server.get_running_pids = mock
status = server.status(pids=pids)
self.assertEqual(status, 0)
self.assertFalse(called)
output = pop_stream(f).strip().splitlines()
self.assertEqual(len(output), 2)
for line in output:
self.assertTrue('test-server running' in line)
finally:
sys.stdout = old_stdout
def test_spawn(self):
# mocks
class MockProcess(object):
NOTHING = 'default besides None'
STDOUT = 'stdout'
PIPE = 'pipe'
def __init__(self, pids=None):
if pids is None:
pids = []
self.pids = (p for p in pids)
def Popen(self, args, **kwargs):
return MockProc(next(self.pids), args, **kwargs)
class MockProc(object):
def __init__(self, pid, args, stdout=MockProcess.NOTHING,
stderr=MockProcess.NOTHING):
self.pid = pid
self.args = args
self.stdout = stdout
if stderr == MockProcess.STDOUT:
self.stderr = self.stdout
else:
self.stderr = stderr
# setup running servers
server = manager.Server('test')
with temptree(['test-server.conf']) as swift_dir:
manager.SWIFT_DIR = swift_dir
with temptree([]) as t:
manager.RUN_DIR = t
server.run_dir = t
old_subprocess = manager.subprocess
try:
# test single server process calls spawn once
manager.subprocess = MockProcess([1])
conf_file = self.join_swift_dir('test-server.conf')
# spawn server no kwargs
server.spawn(conf_file)
# test pid file
pid_file = self.join_run_dir('test-server.pid')
self.assertTrue(os.path.exists(pid_file))
pid_on_disk = int(open(pid_file).read().strip())
self.assertEqual(pid_on_disk, 1)
# assert procs args
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 1)
proc = server.procs[0]
expected_args = [
'swift-test-server',
conf_file,
]
self.assertEqual(proc.args, expected_args)
# assert stdout is piped
self.assertEqual(proc.stdout, MockProcess.PIPE)
self.assertEqual(proc.stderr, proc.stdout)
# test multi server process calls spawn multiple times
manager.subprocess = MockProcess([11, 12, 13, 14])
conf1 = self.join_swift_dir('test-server/1.conf')
conf2 = self.join_swift_dir('test-server/2.conf')
conf3 = self.join_swift_dir('test-server/3.conf')
conf4 = self.join_swift_dir('test-server/4.conf')
server = manager.Server('test', run_dir=t)
# test server run once
server.spawn(conf1, once=True)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 1)
proc = server.procs[0]
expected_args = ['swift-test-server', conf1, 'once']
# assert stdout is piped
self.assertEqual(proc.stdout, MockProcess.PIPE)
self.assertEqual(proc.stderr, proc.stdout)
# test server not daemon
server.spawn(conf2, daemon=False)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 2)
proc = server.procs[1]
expected_args = ['swift-test-server', conf2, 'verbose']
self.assertEqual(proc.args, expected_args)
# assert stdout is not changed
self.assertEqual(proc.stdout, None)
self.assertEqual(proc.stderr, None)
# test server wait
server.spawn(conf3, wait=False)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 3)
proc = server.procs[2]
# assert stdout is /dev/null
with open('/dev/null', 'wb+') as fp:
self.assertTrue(isinstance(proc.stdout, type(fp)))
self.assertEqual(proc.stdout.name, os.devnull)
self.assertIn('b', proc.stdout.mode)
self.assertTrue(any(x in proc.stdout.mode for x in 'aw+'),
'mode must be writable, not %r' %
proc.stdout.mode)
self.assertEqual(proc.stderr, proc.stdout)
# test not daemon over-rides wait
server.spawn(conf4, wait=False, daemon=False, once=True)
self.assertTrue(server.procs)
self.assertEqual(len(server.procs), 4)
proc = server.procs[3]
expected_args = ['swift-test-server', conf4, 'once',
'verbose']
self.assertEqual(proc.args, expected_args)
# daemon behavior should trump wait, once shouldn't matter
self.assertEqual(proc.stdout, None)
self.assertEqual(proc.stderr, None)
# assert pids
for i, proc in enumerate(server.procs):
pid_file = self.join_run_dir('test-server/%d.pid' %
(i + 1))
pid_on_disk = int(open(pid_file).read().strip())
self.assertEqual(pid_on_disk, proc.pid)
finally:
manager.subprocess = old_subprocess
def test_wait(self):
server = manager.Server('test')
self.assertEqual(server.wait(), 0)
class MockProcess(threading.Thread):
def __init__(self, delay=0.1, fail_to_start=False):
threading.Thread.__init__(self)
# setup pipe
rfd, wfd = os.pipe()
# subprocess connection to read stdout
self.stdout = os.fdopen(rfd)
# real process connection to write stdout
self._stdout = os.fdopen(wfd, 'w')
self.delay = delay
self.finished = False
self.returncode = None
if fail_to_start:
self._returncode = 1
self.run = self.fail
else:
self._returncode = 0
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
if self.isAlive():
self.join()
def close_stdout(self):
self._stdout.flush()
with open(os.devnull, 'wb') as nullfile:
try:
os.dup2(nullfile.fileno(), self._stdout.fileno())
except OSError:
pass
def fail(self):
print('mock process started', file=self._stdout)
sleep(self.delay) # perform setup processing
print('mock process failed to start', file=self._stdout)
self.close_stdout()
def poll(self):
self.returncode = self._returncode
return self.returncode or None
def run(self):
print('mock process started', file=self._stdout)
sleep(self.delay) # perform setup processing
print('setup complete!', file=self._stdout)
self.close_stdout()
sleep(self.delay) # do some more processing
print('mock process finished', file=self._stdout)
self.finished = True
class MockTime(object):
def time(self):
return time()
def sleep(self, *args, **kwargs):
pass
with temptree([]) as t:
old_stdout = sys.stdout
old_wait = manager.WARNING_WAIT
old_time = manager.time
try:
manager.WARNING_WAIT = 0.01
manager.time = MockTime()
with open(os.path.join(t, 'output'), 'w+') as f:
# actually capture the read stdout (for prints)
sys.stdout = f
# test closing pipe in subprocess unblocks read
with MockProcess() as proc:
server.procs = [proc]
status = server.wait()
self.assertEqual(status, 0)
# wait should return before process exits
self.assertTrue(proc.isAlive())
self.assertFalse(proc.finished)
self.assertTrue(proc.finished) # make sure it did finish
# test output kwarg prints subprocess output
with MockProcess() as proc:
server.procs = [proc]
status = server.wait(output=True)
output = pop_stream(f)
self.assertTrue('mock process started' in output)
self.assertTrue('setup complete' in output)
# make sure we don't get prints after stdout was closed
self.assertNotIn('mock process finished', output)
# test process which fails to start
with MockProcess(fail_to_start=True) as proc:
server.procs = [proc]
status = server.wait()
self.assertEqual(status, 1)
self.assertTrue('failed' in pop_stream(f))
# test multiple procs
procs = [MockProcess(delay=.5) for i in range(3)]
for proc in procs:
proc.start()
server.procs = procs
status = server.wait()
self.assertEqual(status, 0)
for proc in procs:
self.assertTrue(proc.isAlive())
for proc in procs:
proc.join()
finally:
sys.stdout = old_stdout
manager.WARNING_WAIT = old_wait
manager.time = old_time
def test_interact(self):
class MockProcess(object):
def __init__(self, fail=False):
self.returncode = None
if fail:
self._returncode = 1
else:
self._returncode = 0
def communicate(self):
self.returncode = self._returncode
return '', ''
server = manager.Server('test')
server.procs = [MockProcess()]
self.assertEqual(server.interact(), 0)
server.procs = [MockProcess(fail=True)]
self.assertEqual(server.interact(), 1)
procs = []
for fail in (False, True, True):
procs.append(MockProcess(fail=fail))
server.procs = procs
self.assertTrue(server.interact() > 0)
def test_launch(self):
# stubs
conf_files = (
'proxy-server.conf',
'auth-server.conf',
'object-server/1.conf',
'object-server/2.conf',
'object-server/3.conf',
'object-server/4.conf',
)
pid_files = (
('proxy-server.pid', 1),
('proxy-server/2.pid', 2),
)
# mocks
class MockSpawn(object):
def __init__(self, pids=None):
self.conf_files = []
self.kwargs = []
if not pids:
def one_forever():
while True:
yield 1
self.pids = one_forever()
else:
self.pids = (x for x in pids)
def __call__(self, conf_file, **kwargs):
self.conf_files.append(conf_file)
self.kwargs.append(kwargs)
rv = next(self.pids)
if isinstance(rv, Exception):
raise rv
else:
return rv
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
old_stdout = sys.stdout
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
# can't start server w/o an conf
server = manager.Server('test', run_dir=t)
self.assertFalse(server.launch())
# start mock os running all pids
manager.os = MockOs(pids)
proc_files = (
('1/cmdline', 'swift-proxy-server'),
('2/cmdline', 'swift-proxy-server'),
)
files, contents = zip(*proc_files)
with temptree(files, contents) as proc_dir:
manager.PROC_DIR = proc_dir
server = manager.Server('proxy', run_dir=t)
# can't start server if it's already running
self.assertFalse(server.launch())
output = pop_stream(f)
self.assertTrue('running' in output)
conf_file = self.join_swift_dir(
'proxy-server.conf')
self.assertTrue(conf_file in output)
pid_file = self.join_run_dir('proxy-server/2.pid')
self.assertTrue(pid_file in output)
self.assertTrue('already started' in output)
# no running pids
manager.os = MockOs([])
with temptree([], []) as proc_dir:
manager.PROC_DIR = proc_dir
# test ignore once for non-start-once server
mock_spawn = MockSpawn([1])
server.spawn = mock_spawn
conf_file = self.join_swift_dir(
'proxy-server.conf')
expected = {
1: conf_file,
}
self.assertEqual(server.launch(once=True),
expected)
self.assertEqual(mock_spawn.conf_files,
[conf_file])
expected = {
'once': False,
}
self.assertEqual(mock_spawn.kwargs, [expected])
output = pop_stream(f)
self.assertTrue('Starting' in output)
self.assertNotIn('once', output)
# test multi-server kwarg once
server = manager.Server('object-replicator')
with temptree([], []) as proc_dir:
manager.PROC_DIR = proc_dir
mock_spawn = MockSpawn([1, 2, 3, 4])
server.spawn = mock_spawn
conf1 = self.join_swift_dir('object-server/1.conf')
conf2 = self.join_swift_dir('object-server/2.conf')
conf3 = self.join_swift_dir('object-server/3.conf')
conf4 = self.join_swift_dir('object-server/4.conf')
expected = {
1: conf1,
2: conf2,
3: conf3,
4: conf4,
}
self.assertEqual(server.launch(once=True),
expected)
self.assertEqual(mock_spawn.conf_files, [
conf1, conf2, conf3, conf4])
expected = {
'once': True,
}
self.assertEqual(len(mock_spawn.kwargs), 4)
for kwargs in mock_spawn.kwargs:
self.assertEqual(kwargs, expected)
# test number kwarg
mock_spawn = MockSpawn([4])
manager.PROC_DIR = proc_dir
server.spawn = mock_spawn
expected = {
4: conf4,
}
self.assertEqual(server.launch(number=4),
expected)
self.assertEqual(mock_spawn.conf_files, [conf4])
expected = {
'number': 4
}
self.assertEqual(mock_spawn.kwargs, [expected])
# test cmd does not exist
server = manager.Server('auth')
with temptree([], []) as proc_dir:
manager.PROC_DIR = proc_dir
mock_spawn = MockSpawn([OSError(errno.ENOENT,
'blah')])
server.spawn = mock_spawn
self.assertEqual(server.launch(), {})
self.assertTrue(
'swift-auth-server does not exist' in
pop_stream(f))
finally:
sys.stdout = old_stdout
def test_stop(self):
conf_files = (
'account-server/1.conf',
'account-server/2.conf',
'account-server/3.conf',
'account-server/4.conf',
)
pid_files = (
('account-reaper/1.pid', 1),
('account-reaper/2.pid', 2),
('account-reaper/3.pid', 3),
('account-reaper/4.pid', 4),
)
with temptree(conf_files) as swift_dir:
manager.SWIFT_DIR = swift_dir
files, pids = zip(*pid_files)
with temptree(files, pids) as t:
manager.RUN_DIR = t
# start all pids in mock os
manager.os = MockOs(pids)
server = manager.Server('account-reaper', run_dir=t)
# test kill all running pids
pids = server.stop()
self.assertEqual(len(pids), 4)
for pid in (1, 2, 3, 4):
self.assertTrue(pid in pids)
self.assertEqual(manager.os.pid_sigs[pid],
[signal.SIGTERM])
conf1 = self.join_swift_dir('account-reaper/1.conf')
conf2 = self.join_swift_dir('account-reaper/2.conf')
conf3 = self.join_swift_dir('account-reaper/3.conf')
conf4 = self.join_swift_dir('account-reaper/4.conf')
# reset mock os with only 2 running pids
manager.os = MockOs([3, 4])
pids = server.stop()
self.assertEqual(len(pids), 2)
for pid in (3, 4):
self.assertTrue(pid in pids)
self.assertEqual(manager.os.pid_sigs[pid],
[signal.SIGTERM])
self.assertFalse(os.path.exists(conf1))
self.assertFalse(os.path.exists(conf2))
# test number kwarg
manager.os = MockOs([3, 4])
pids = server.stop(number=3)
self.assertEqual(len(pids), 1)
expected = {
3: conf3,
}
self.assertTrue(pids, expected)
self.assertEqual(manager.os.pid_sigs[3], [signal.SIGTERM])
self.assertFalse(os.path.exists(conf4))
self.assertFalse(os.path.exists(conf3))
class TestManager(unittest.TestCase):
def test_create(self):
m = manager.Manager(['test'])
self.assertEqual(len(m.servers), 1)
server = m.servers.pop()
self.assertTrue(isinstance(server, manager.Server))
self.assertEqual(server.server, 'test-server')
# test multi-server and simple dedupe
servers = ['object-replicator', 'object-auditor', 'object-replicator']
m = manager.Manager(servers)
self.assertEqual(len(m.servers), 2)
for server in m.servers:
self.assertTrue(server.server in servers)
# test all
m = manager.Manager(['all'])
self.assertEqual(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.ALL_SERVERS)
# test main
m = manager.Manager(['main'])
self.assertEqual(len(m.servers), len(manager.MAIN_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.MAIN_SERVERS)
# test rest
m = manager.Manager(['rest'])
self.assertEqual(len(m.servers), len(manager.REST_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.REST_SERVERS)
# test main + rest == all
m = manager.Manager(['main', 'rest'])
self.assertEqual(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.ALL_SERVERS)
# test dedupe
m = manager.Manager(['main', 'rest', 'proxy', 'object',
'container', 'account'])
self.assertEqual(len(m.servers), len(manager.ALL_SERVERS))
for server in m.servers:
self.assertTrue(server.server in manager.ALL_SERVERS)
# test glob
m = manager.Manager(['object-*'])
object_servers = [s for s in manager.ALL_SERVERS if
s.startswith('object')]
self.assertEqual(len(m.servers), len(object_servers))
for s in m.servers:
self.assertTrue(str(s) in object_servers)
m = manager.Manager(['*-replicator'])
replicators = [s for s in manager.ALL_SERVERS if
s.endswith('replicator')]
for s in m.servers:
self.assertTrue(str(s) in replicators)
def test_iter(self):
m = manager.Manager(['all'])
self.assertEqual(len(list(m)), len(manager.ALL_SERVERS))
for server in m:
self.assertTrue(server.server in manager.ALL_SERVERS)
def test_default_strict(self):
# test default strict
m = manager.Manager(['proxy'])
self.assertEqual(m._default_strict, True)
# aliases
m = manager.Manager(['main'])
self.assertEqual(m._default_strict, False)
m = manager.Manager(['proxy*'])
self.assertEqual(m._default_strict, False)
def test_status(self):
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called_kwargs = []
def status(self, **kwargs):
self.called_kwargs.append(kwargs)
if 'error' in self.server:
return 1
else:
return 0
old_server_class = manager.Server
try:
manager.Server = MockServer
m = manager.Manager(['test'])
status = m.status()
self.assertEqual(status, 0)
m = manager.Manager(['error'])
status = m.status()
self.assertEqual(status, 1)
# test multi-server
m = manager.Manager(['test', 'error'])
kwargs = {'key': 'value'}
status = m.status(**kwargs)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called_kwargs, [kwargs])
finally:
manager.Server = old_server_class
def test_start(self):
def mock_setup_env():
getattr(mock_setup_env, 'called', []).append(True)
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
if 'noconfig' in self.server:
return {}
elif 'somerunning' in self.server:
return {}
else:
return {1: self.server[0]}
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
return int('error' in self.server)
def stop(self, **kwargs):
self.called['stop'].append(kwargs)
def interact(self, **kwargs):
self.called['interact'].append(kwargs)
if 'raise' in self.server:
raise KeyboardInterrupt
elif 'error' in self.server:
return 1
else:
return 0
old_setup_env = manager.setup_env
old_swift_server = manager.Server
try:
manager.setup_env = mock_setup_env
manager.Server = MockServer
# test no errors on launch
m = manager.Manager(['proxy'])
status = m.start()
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
# test error on launch
m = manager.Manager(['proxy', 'error'])
status = m.start()
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test interact
m = manager.Manager(['proxy', 'error'])
kwargs = {'daemon': False}
status = m.start(**kwargs)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [kwargs])
self.assertEqual(server.called['interact'], [kwargs])
m = manager.Manager(['raise'])
kwargs = {'daemon': False}
status = m.start(**kwargs)
# test no config
m = manager.Manager(['proxy', 'noconfig'])
status = m.start()
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test no config with --non-strict
m = manager.Manager(['proxy', 'noconfig'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test no config --strict
m = manager.Manager(['proxy', 'noconfig'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
# test no config with alias
m = manager.Manager(['main', 'noconfig'])
status = m.start()
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test no config with alias and --non-strict
m = manager.Manager(['main', 'noconfig'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test no config with alias and --strict
m = manager.Manager(['main', 'noconfig'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
# test already all running
m = manager.Manager(['proxy', 'somerunning'])
status = m.start()
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test already all running --non-strict
m = manager.Manager(['proxy', 'somerunning'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test already all running --strict
m = manager.Manager(['proxy', 'somerunning'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
# test already all running with alias
m = manager.Manager(['main', 'somerunning'])
status = m.start()
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{}])
self.assertEqual(server.called['wait'], [{}])
# test already all running with alias and --non-strict
m = manager.Manager(['main', 'somerunning'])
status = m.start(strict=False)
self.assertEqual(status, 0)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': False}])
self.assertEqual(server.called['wait'], [{'strict': False}])
# test already all running with alias and --strict
m = manager.Manager(['main', 'somerunning'])
status = m.start(strict=True)
self.assertEqual(status, 1)
for server in m.servers:
self.assertEqual(server.called['launch'], [{'strict': True}])
self.assertEqual(server.called['wait'], [{'strict': True}])
finally:
manager.setup_env = old_setup_env
manager.Server = old_swift_server
def test_no_wait(self):
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
# must return non-empty dict if launch succeeded
return {1: self.server[0]}
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
return int('error' in self.server)
orig_swift_server = manager.Server
try:
manager.Server = MockServer
# test success
init = manager.Manager(['proxy'])
status = init.no_wait()
self.assertEqual(status, 0)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertFalse(called_kwargs['wait'])
self.assertFalse(server.called['wait'])
# test no errocode status even on error
init = manager.Manager(['error'])
status = init.no_wait()
self.assertEqual(status, 0)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertTrue('wait' in called_kwargs)
self.assertFalse(called_kwargs['wait'])
self.assertFalse(server.called['wait'])
# test wait with once option
init = manager.Manager(['updater', 'replicator-error'])
status = init.no_wait(once=True)
self.assertEqual(status, 0)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertTrue('wait' in called_kwargs)
self.assertFalse(called_kwargs['wait'])
self.assertTrue('once' in called_kwargs)
self.assertTrue(called_kwargs['once'])
self.assertFalse(server.called['wait'])
finally:
manager.Server = orig_swift_server
def test_no_daemon(self):
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
# must return non-empty dict if launch succeeded
return {1: self.server[0]}
def interact(self, **kwargs):
self.called['interact'].append(kwargs)
return int('error' in self.server)
orig_swift_server = manager.Server
try:
manager.Server = MockServer
# test success
init = manager.Manager(['proxy'])
stats = init.no_daemon()
self.assertEqual(stats, 0)
# test error
init = manager.Manager(['proxy', 'object-error'])
stats = init.no_daemon()
self.assertEqual(stats, 1)
# test once
init = manager.Manager(['proxy', 'object-error'])
stats = init.no_daemon()
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
self.assertEqual(len(server.called['wait']), 0)
self.assertEqual(len(server.called['interact']), 1)
finally:
manager.Server = orig_swift_server
def test_once(self):
class MockServer(object):
def __init__(self, server, run_dir=manager.RUN_DIR):
self.server = server
self.called = defaultdict(list)
def wait(self, **kwargs):
self.called['wait'].append(kwargs)
if 'error' in self.server:
return 1
else:
return 0
def launch(self, **kwargs):
self.called['launch'].append(kwargs)
return {1: 'account-reaper'}
orig_swift_server = manager.Server
try:
manager.Server = MockServer
# test no errors
init = manager.Manager(['account-reaper'])
status = init.once()
self.assertEqual(status, 0)
# test error code on error
init = manager.Manager(['error-reaper'])
status = init.once()
self.assertEqual(status, 1)
for server in init.servers:
self.assertEqual(len(server.called['launch']), 1)
called_kwargs = server.called['launch'][0]
self.assertEqual(called_kwargs, {'once': True})
self.assertEqual(len(server.called['wait']), 1)
self.assertEqual(len(server.called['interact']), 0)
finally:
manager.Server = orig_swift_server
def test_stop(self):
class MockServerFactory(object):
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR):
self.pids = pids
def stop(self, **kwargs):
return self.pids
def status(self, **kwargs):
return not self.pids
def __init__(self, server_pids, run_dir=manager.RUN_DIR):
self.server_pids = server_pids
def __call__(self, server, run_dir=manager.RUN_DIR):
return MockServerFactory.MockServer(self.server_pids[server])
def mock_watch_server_pids(server_pids, **kwargs):
for server, pids in server_pids.items():
for pid in pids:
if pid is None:
continue
yield server, pid
def mock_kill_group(pid, sig):
self.fail('kill_group should not be called')
_orig_server = manager.Server
_orig_watch_server_pids = manager.watch_server_pids
_orig_kill_group = manager.kill_group
try:
manager.watch_server_pids = mock_watch_server_pids
manager.kill_group = mock_kill_group
# test stop one server
server_pids = {
'test': {1: "dummy.pid"}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEqual(status, 0)
# test not running
server_pids = {
'test': {}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEqual(status, 1)
# test kill not running
server_pids = {
'test': {}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.kill()
self.assertEqual(status, 0)
# test won't die
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop()
self.assertEqual(status, 1)
finally:
manager.Server = _orig_server
manager.watch_server_pids = _orig_watch_server_pids
manager.kill_group = _orig_kill_group
def test_stop_kill_after_timeout(self):
class MockServerFactory(object):
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR):
self.pids = pids
def stop(self, **kwargs):
return self.pids
def status(self, **kwargs):
return not self.pids
def __init__(self, server_pids, run_dir=manager.RUN_DIR):
self.server_pids = server_pids
def __call__(self, server, run_dir=manager.RUN_DIR):
return MockServerFactory.MockServer(self.server_pids[server])
def mock_watch_server_pids(server_pids, **kwargs):
for server, pids in server_pids.items():
for pid in pids:
if pid is None:
continue
yield server, pid
mock_kill_group_called = []
def mock_kill_group(*args):
mock_kill_group_called.append(args)
def mock_kill_group_oserr(*args):
raise OSError()
def mock_kill_group_oserr_ESRCH(*args):
raise OSError(errno.ESRCH, 'No such process')
_orig_server = manager.Server
_orig_watch_server_pids = manager.watch_server_pids
_orig_kill_group = manager.kill_group
try:
manager.watch_server_pids = mock_watch_server_pids
manager.kill_group = mock_kill_group
# test stop one server
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop(kill_after_timeout=True)
self.assertEqual(status, 1)
self.assertEqual(mock_kill_group_called, [(None, 9)])
manager.kill_group = mock_kill_group_oserr
# test stop one server - OSError
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
with self.assertRaises(OSError):
status = m.stop(kill_after_timeout=True)
manager.kill_group = mock_kill_group_oserr_ESRCH
# test stop one server - OSError: No such process
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop(kill_after_timeout=True)
self.assertEqual(status, 1)
finally:
manager.Server = _orig_server
manager.watch_server_pids = _orig_watch_server_pids
manager.kill_group = _orig_kill_group
def test_shutdown(self):
m = manager.Manager(['test'])
m.stop_was_called = False
def mock_stop(*args, **kwargs):
m.stop_was_called = True
expected = {'graceful': True}
self.assertEqual(kwargs, expected)
return 0
m.stop = mock_stop
status = m.shutdown()
self.assertEqual(status, 0)
self.assertEqual(m.stop_was_called, True)
def test_restart(self):
m = manager.Manager(['test'])
m.stop_was_called = False
def mock_stop(*args, **kwargs):
m.stop_was_called = True
return 0
m.start_was_called = False
def mock_start(*args, **kwargs):
m.start_was_called = True
return 0
m.stop = mock_stop
m.start = mock_start
status = m.restart()
self.assertEqual(status, 0)
self.assertEqual(m.stop_was_called, True)
self.assertEqual(m.start_was_called, True)
def test_reload(self):
class MockManager(object):
called = defaultdict(list)
def __init__(self, servers):
pass
@classmethod
def reset_called(cls):
cls.called = defaultdict(list)
def stop(self, **kwargs):
MockManager.called['stop'].append(kwargs)
return 0
def start(self, **kwargs):
MockManager.called['start'].append(kwargs)
return 0
_orig_manager = manager.Manager
try:
m = _orig_manager(['auth'])
for server in m.servers:
self.assertTrue(server.server in
manager.GRACEFUL_SHUTDOWN_SERVERS)
manager.Manager = MockManager
status = m.reload()
self.assertEqual(status, 0)
expected = {
'start': [{'graceful': True}],
'stop': [{'graceful': True}],
}
self.assertEqual(MockManager.called, expected)
# test force graceful
MockManager.reset_called()
m = _orig_manager(['*-server'])
self.assertEqual(len(m.servers), 4)
for server in m.servers:
self.assertTrue(server.server in
manager.GRACEFUL_SHUTDOWN_SERVERS)
manager.Manager = MockManager
status = m.reload(graceful=False)
self.assertEqual(status, 0)
expected = {
'start': [{'graceful': True}] * 4,
'stop': [{'graceful': True}] * 4,
}
self.assertEqual(MockManager.called, expected)
finally:
manager.Manager = _orig_manager
def test_force_reload(self):
m = manager.Manager(['test'])
m.reload_was_called = False
def mock_reload(*args, **kwargs):
m.reload_was_called = True
return 0
m.reload = mock_reload
status = m.force_reload()
self.assertEqual(status, 0)
self.assertEqual(m.reload_was_called, True)
def test_get_command(self):
m = manager.Manager(['test'])
self.assertEqual(m.start, m.get_command('start'))
self.assertEqual(m.force_reload, m.get_command('force-reload'))
self.assertEqual(m.get_command('force-reload'),
m.get_command('force_reload'))
self.assertRaises(manager.UnknownCommandError, m.get_command,
'no_command')
self.assertRaises(manager.UnknownCommandError, m.get_command,
'__init__')
def test_list_commands(self):
for cmd, help in manager.Manager.list_commands():
method = getattr(manager.Manager, cmd.replace('-', '_'), None)
self.assertTrue(method, '%s is not a command' % cmd)
self.assertTrue(getattr(method, 'publicly_accessible', False))
self.assertEqual(method.__doc__.strip(), help)
def test_run_command(self):
m = manager.Manager(['test'])
m.cmd_was_called = False
def mock_cmd(*args, **kwargs):
m.cmd_was_called = True
expected = {'kw1': True, 'kw2': False}
self.assertEqual(kwargs, expected)
return 0
mock_cmd.publicly_accessible = True
m.mock_cmd = mock_cmd
kwargs = {'kw1': True, 'kw2': False}
status = m.run_command('mock_cmd', **kwargs)
self.assertEqual(status, 0)
self.assertEqual(m.cmd_was_called, True)
if __name__ == '__main__':
unittest.main()
|
en
| 0.787095
|
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # fork defaults to test parent process path # I only over-ride portions of the os module read everything out of file from the top and clear it out # make sure there's no server listed in both # I only over-ride portions of the resource module # test error condition test doc # this server always says it's dead when you ask for running pids # list of pids keyed on servers to watch # basic test, server dies # start long running server and short interval # wait a little longer # zombie process # test weird os error # test multi-server # it is NOT a string # test get single conf file # test multi server conf files & grouping of server-type config # test configs returned sorted # test get single numbered conf # test missing config number # test getting specific conf # test verbose & quiet # check warn "unable to locate" # check quiet will silence warning # check found config no warning # check missing config number warn "unable to locate" # check verbose lists configs # test standalone conf file # and again with no named filter # test configs returned sorted # and again with no named filter Server.iter_pid_files is kinda boring, test the Server.pid_files stuff here as well # test get one file # ... and only one file # test invalid value in pid file # test object-server doesn't steal pids from object-replicator # test multi-pid iter # test get pid_files by number # test get all pid files # test get pid with matching conf # try to iter on a pid number with a matching conf but no pid # test get pids w/o matching conf # test get pid_files by conf name # test get pid with matching conf # mock os with so both the first and second are running # make sure other process not signaled # capture stdio # test print details # test no details on signal.SIG_DFL # reset mock os so only the second server is running # test pid not running # test remove stale pid file # reset mock os with no running pids # test verbose warns on removing stale pid file # reset mock os so only the third server is running # test verbose warns on removing invalid pid file # sanity # test verbose warns on removing pid file with invalid pid # sanity # test verbose warns on removing pid file with invalid pid # reset mock os with no running pids # test warning with insufficient permissions # test only gets running pids # mock os, only pid '1' is running # test persistent running pid files # test clean up stale pids # reset mock os, no pids running # and now all pid files are cleaned out # test only get pids for right server # all pids are running # only thing-doer.pid, 1 # no other pids returned # assert stale pids for other servers ignored # only thing-doer is running # other server pid files persist # verify that servers are in fact not running # and now all OTHER pid files are cleaned out # test no servers running # start up pid # test kill one pid # reset os mock # test shutdown # start up other servers # test multi server kill & ignore graceful on unsupported server # and the other pid is of course not signaled # setup running servers # capture stdio # test status for all running # test get single server by number # test some servers not running # test single server not running # test no running pids # test use provided pids # shouldn't call get_running_pids # mocks # setup running servers # test single server process calls spawn once # spawn server no kwargs # test pid file # assert procs args # assert stdout is piped # test multi server process calls spawn multiple times # test server run once # assert stdout is piped # test server not daemon # assert stdout is not changed # test server wait # assert stdout is /dev/null # test not daemon over-rides wait # daemon behavior should trump wait, once shouldn't matter # assert pids # setup pipe # subprocess connection to read stdout # real process connection to write stdout # perform setup processing # perform setup processing # do some more processing # actually capture the read stdout (for prints) # test closing pipe in subprocess unblocks read # wait should return before process exits # make sure it did finish # test output kwarg prints subprocess output # make sure we don't get prints after stdout was closed # test process which fails to start # test multiple procs # stubs # mocks # can't start server w/o an conf # start mock os running all pids # can't start server if it's already running # no running pids # test ignore once for non-start-once server # test multi-server kwarg once # test number kwarg # test cmd does not exist # start all pids in mock os # test kill all running pids # reset mock os with only 2 running pids # test number kwarg # test multi-server and simple dedupe # test all # test main # test rest # test main + rest == all # test dedupe # test glob # test default strict # aliases # test multi-server # test no errors on launch # test error on launch # test interact # test no config # test no config with --non-strict # test no config --strict # test no config with alias # test no config with alias and --non-strict # test no config with alias and --strict # test already all running # test already all running --non-strict # test already all running --strict # test already all running with alias # test already all running with alias and --non-strict # test already all running with alias and --strict # must return non-empty dict if launch succeeded # test success # test no errocode status even on error # test wait with once option # must return non-empty dict if launch succeeded # test success # test error # test once # test no errors # test error code on error # test stop one server # test not running # test kill not running # test won't die # test stop one server # test stop one server - OSError # test stop one server - OSError: No such process # test force graceful
| 1.758685
| 2
|
web3d/web3d.py
|
romain-li/Web3dXBlock
| 0
|
6626227
|
<reponame>romain-li/Web3dXBlock<gh_stars>0
"""TO-DO: Write a description of what this XBlock is."""
import pkg_resources
from xblock.core import XBlock
from xblock.fields import Scope, String
from xblock.fragment import Fragment
class Web3dXBlock(XBlock):
"""
A web 3D Xblock.
"""
display_name = String(display_name="Display name", default="web3d", scope=Scope.settings,
help="This name appears in the horizontal navigation at the top of the page.")
obj = String(default="", scope=Scope.content, help="URL for obj file.")
mtl = String(default="", scope=Scope.content, help="URL for mtl file.")
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def student_view(self, context=None):
"""
The primary view of the Web3dXBlock, shown to students
when viewing courses.
"""
html = self.resource_string("static/html/web3d.html")
frag = Fragment(
html.format(obj=self.obj or self.runtime.local_resource_url(self, "public/skylab/skylab_carbajal.obj"),
mtl=self.mtl or self.runtime.local_resource_url(self, "public/skylab/skylab_carbajal.mtl")))
frag.add_javascript(self.resource_string("static/js/src/web3d.js"))
frag.add_javascript(self.resource_string("static/js/lib/three.min.js"))
frag.add_javascript(self.resource_string("static/js/lib/loaders/DDSLoader.js"))
frag.add_javascript(self.resource_string("static/js/lib/loaders/MTLLoader.js"))
frag.add_javascript(self.resource_string("static/js/lib/loaders/OBJMTLLoader.js"))
frag.add_javascript(self.resource_string("static/js/lib/controls/TrackballControls.js"))
frag.initialize_js('Web3dXBlock')
return frag
def studio_view(self, context=None):
html = self.resource_string("static/html/web3d_studio.html")
frag = Fragment(
html.format(display_name=self.display_name,
obj=self.obj,
mtl=self.mtl))
frag.add_javascript(self.resource_string("static/js/src/web3d_studio.js"))
frag.initialize_js("Web3dXBlockStudio")
return frag
@XBlock.json_handler
def studio_submit(self, data, suffix=''):
self.obj = data.get('display_name')
self.obj = data.get('obj')
self.mtl = data.get('mtl')
return {'result': 'success'}
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("Web3dXBlock",
"""<vertical_demo>
<web3d/>
</vertical_demo>
"""),
]
|
"""TO-DO: Write a description of what this XBlock is."""
import pkg_resources
from xblock.core import XBlock
from xblock.fields import Scope, String
from xblock.fragment import Fragment
class Web3dXBlock(XBlock):
"""
A web 3D Xblock.
"""
display_name = String(display_name="Display name", default="web3d", scope=Scope.settings,
help="This name appears in the horizontal navigation at the top of the page.")
obj = String(default="", scope=Scope.content, help="URL for obj file.")
mtl = String(default="", scope=Scope.content, help="URL for mtl file.")
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def student_view(self, context=None):
"""
The primary view of the Web3dXBlock, shown to students
when viewing courses.
"""
html = self.resource_string("static/html/web3d.html")
frag = Fragment(
html.format(obj=self.obj or self.runtime.local_resource_url(self, "public/skylab/skylab_carbajal.obj"),
mtl=self.mtl or self.runtime.local_resource_url(self, "public/skylab/skylab_carbajal.mtl")))
frag.add_javascript(self.resource_string("static/js/src/web3d.js"))
frag.add_javascript(self.resource_string("static/js/lib/three.min.js"))
frag.add_javascript(self.resource_string("static/js/lib/loaders/DDSLoader.js"))
frag.add_javascript(self.resource_string("static/js/lib/loaders/MTLLoader.js"))
frag.add_javascript(self.resource_string("static/js/lib/loaders/OBJMTLLoader.js"))
frag.add_javascript(self.resource_string("static/js/lib/controls/TrackballControls.js"))
frag.initialize_js('Web3dXBlock')
return frag
def studio_view(self, context=None):
html = self.resource_string("static/html/web3d_studio.html")
frag = Fragment(
html.format(display_name=self.display_name,
obj=self.obj,
mtl=self.mtl))
frag.add_javascript(self.resource_string("static/js/src/web3d_studio.js"))
frag.initialize_js("Web3dXBlockStudio")
return frag
@XBlock.json_handler
def studio_submit(self, data, suffix=''):
self.obj = data.get('display_name')
self.obj = data.get('obj')
self.mtl = data.get('mtl')
return {'result': 'success'}
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("Web3dXBlock",
"""<vertical_demo>
<web3d/>
</vertical_demo>
"""),
]
|
en
| 0.749601
|
TO-DO: Write a description of what this XBlock is. A web 3D Xblock. Handy helper for getting resources from our kit. The primary view of the Web3dXBlock, shown to students when viewing courses. A canned scenario for display in the workbench. <vertical_demo> <web3d/> </vertical_demo>
| 2.601725
| 3
|
src/DroneVision/DroneVision_src/hardware/ImageLink.py
|
swipswaps/Wind-Blade-Inspection
| 0
|
6626228
|
<reponame>swipswaps/Wind-Blade-Inspection
'''
Author: <NAME>
Email: <EMAIL>
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision)
'''
import os, glob
from imageTools import GetImage
from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape
'''
@brief Set up image linke.
@param folder (folder where the files are located)
@param image_filenames (Single or multiple (list) images without structured light)
@param sl_image_filenames (Single or multiple (list) images with structured light)
'''
class ImageLink():
def __init__(self, folder, image_filenames, sl_image_filenames):
'''CONSTRUCTOR'''
self.CheckFilenames(folder, image_filenames, sl_image_filenames)
self.OpenImage()
def CheckManualTriggering(self):
'''
@brief Check if manual triggering is turned ON
@return True/False
'''
return False
def CheckFilenames(self, folder, image_filenames, sl_image_filenames):
'''
@brief Check if filenames are list of files, a directory with consistent files, or a single file.
@param image_filenames
@param sl_image_filenames
'''
if isinstance(image_filenames, list) or isinstance(sl_image_filenames, list):
if not(isinstance(image_filenames, list) and isinstance(sl_image_filenames, list)):
raise Exception('Both sl filenams and normal filenams must be a list')
self.__image_filenames = image_filenames
self.__sl_image_filenames = sl_image_filenames
else:
if (os.path.isdir(folder + image_filenames) or os.path.isdir(folder + sl_image_filenames)) or (image_filenames[-1:][0] == '/' or sl_image_filenames[-1:][0] == '/'):
image_filenames = folder + image_filenames
sl_image_filenames = folder + sl_image_filenames
folder = ''
if not(os.path.isdir(image_filenames) and os.path.isdir(sl_image_filenames)):
raise Exception('Both sl filenams and normal filenams must be a directory')
self.__image_filenames = glob.glob(image_filenames+'/*.*')
self.__sl_image_filenames = glob.glob(sl_image_filenames+'/*.*')
else:
self.__image_filenames = [image_filenames]
self.__sl_image_filenames = [sl_image_filenames]
self.__folder = folder
def StartCamera(self):
'''
@brief Method for syncronizing with the CameraLink class.
'''
self.OpenImage()
def OpenImage(self):
'''
@brief Initialize video
'''
self.__frame_i = 0
self.__n_frames = len(self.__image_filenames)
self.GetFrame() # Get frames for property possibilities
self.__frame_i = 0 #reset
def GetTotalFrames(self):
'''
@brief Get total frames in video
@return n_frames
'''
return self.__n_frames
def GetFrameProperties(self):
'''
@brief Get frame properties such as fps, width, length
@return fps, width, height
'''
dim = GetShape(self.__frame)
return 1.0, dim[0], dim[1]
def GetFrame(self, get_normal_frame_only=False):
'''
@brief Pull image and structured light image
@param get_normal_frame_only (Only implemented to fit with the CameraLink)
@return frame, sl_frame
'''
self.__frame = GetImage(self.__folder + self.__image_filenames[self.__frame_i], gray=False)
self.__sl_frame = GetImage(self.__folder + self.__sl_image_filenames[self.__frame_i], gray=False)
if not(GetShape(self.__frame)[0] == GetShape(self.__sl_frame)[0]) or not(GetShape(self.__frame)[1] == GetShape(self.__sl_frame)[1]):
raise Exception('Normal image and sl image dimensions are not consistent.')
self.__frame_i += 1
return self.__frame, self.__sl_frame
def GetFrameNumber(self):
'''
@brief Get current frame number
@return frame_i
'''
return self.__frame_i
def StopCamera(self):
'''
@brief Method for syncronizing with the CameraLink class.
'''
self.StopImage()
def StopImage(self):
'''
@brief Stop image (do nothing actually..)
'''
pass
def RestartCamera(self):
'''
@brief Simulating restart
'''
pass
def __del__(self):
'''
@brief Stop video
'''
self.StopImage()
|
'''
Author: <NAME>
Email: <EMAIL>
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision)
'''
import os, glob
from imageTools import GetImage
from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape
'''
@brief Set up image linke.
@param folder (folder where the files are located)
@param image_filenames (Single or multiple (list) images without structured light)
@param sl_image_filenames (Single or multiple (list) images with structured light)
'''
class ImageLink():
def __init__(self, folder, image_filenames, sl_image_filenames):
'''CONSTRUCTOR'''
self.CheckFilenames(folder, image_filenames, sl_image_filenames)
self.OpenImage()
def CheckManualTriggering(self):
'''
@brief Check if manual triggering is turned ON
@return True/False
'''
return False
def CheckFilenames(self, folder, image_filenames, sl_image_filenames):
'''
@brief Check if filenames are list of files, a directory with consistent files, or a single file.
@param image_filenames
@param sl_image_filenames
'''
if isinstance(image_filenames, list) or isinstance(sl_image_filenames, list):
if not(isinstance(image_filenames, list) and isinstance(sl_image_filenames, list)):
raise Exception('Both sl filenams and normal filenams must be a list')
self.__image_filenames = image_filenames
self.__sl_image_filenames = sl_image_filenames
else:
if (os.path.isdir(folder + image_filenames) or os.path.isdir(folder + sl_image_filenames)) or (image_filenames[-1:][0] == '/' or sl_image_filenames[-1:][0] == '/'):
image_filenames = folder + image_filenames
sl_image_filenames = folder + sl_image_filenames
folder = ''
if not(os.path.isdir(image_filenames) and os.path.isdir(sl_image_filenames)):
raise Exception('Both sl filenams and normal filenams must be a directory')
self.__image_filenames = glob.glob(image_filenames+'/*.*')
self.__sl_image_filenames = glob.glob(sl_image_filenames+'/*.*')
else:
self.__image_filenames = [image_filenames]
self.__sl_image_filenames = [sl_image_filenames]
self.__folder = folder
def StartCamera(self):
'''
@brief Method for syncronizing with the CameraLink class.
'''
self.OpenImage()
def OpenImage(self):
'''
@brief Initialize video
'''
self.__frame_i = 0
self.__n_frames = len(self.__image_filenames)
self.GetFrame() # Get frames for property possibilities
self.__frame_i = 0 #reset
def GetTotalFrames(self):
'''
@brief Get total frames in video
@return n_frames
'''
return self.__n_frames
def GetFrameProperties(self):
'''
@brief Get frame properties such as fps, width, length
@return fps, width, height
'''
dim = GetShape(self.__frame)
return 1.0, dim[0], dim[1]
def GetFrame(self, get_normal_frame_only=False):
'''
@brief Pull image and structured light image
@param get_normal_frame_only (Only implemented to fit with the CameraLink)
@return frame, sl_frame
'''
self.__frame = GetImage(self.__folder + self.__image_filenames[self.__frame_i], gray=False)
self.__sl_frame = GetImage(self.__folder + self.__sl_image_filenames[self.__frame_i], gray=False)
if not(GetShape(self.__frame)[0] == GetShape(self.__sl_frame)[0]) or not(GetShape(self.__frame)[1] == GetShape(self.__sl_frame)[1]):
raise Exception('Normal image and sl image dimensions are not consistent.')
self.__frame_i += 1
return self.__frame, self.__sl_frame
def GetFrameNumber(self):
'''
@brief Get current frame number
@return frame_i
'''
return self.__frame_i
def StopCamera(self):
'''
@brief Method for syncronizing with the CameraLink class.
'''
self.StopImage()
def StopImage(self):
'''
@brief Stop image (do nothing actually..)
'''
pass
def RestartCamera(self):
'''
@brief Simulating restart
'''
pass
def __del__(self):
'''
@brief Stop video
'''
self.StopImage()
|
en
| 0.669579
|
Author: <NAME> Email: <EMAIL> Project: Master's Thesis - Autonomous Inspection Of Wind Blades Repository: Master's Thesis - CV (Computer Vision) @brief Set up image linke. @param folder (folder where the files are located) @param image_filenames (Single or multiple (list) images without structured light) @param sl_image_filenames (Single or multiple (list) images with structured light) CONSTRUCTOR @brief Check if manual triggering is turned ON @return True/False @brief Check if filenames are list of files, a directory with consistent files, or a single file. @param image_filenames @param sl_image_filenames @brief Method for syncronizing with the CameraLink class. @brief Initialize video # Get frames for property possibilities #reset @brief Get total frames in video @return n_frames @brief Get frame properties such as fps, width, length @return fps, width, height @brief Pull image and structured light image @param get_normal_frame_only (Only implemented to fit with the CameraLink) @return frame, sl_frame @brief Get current frame number @return frame_i @brief Method for syncronizing with the CameraLink class. @brief Stop image (do nothing actually..) @brief Simulating restart @brief Stop video
| 2.469517
| 2
|
othertests/recurse.py
|
SebastianOpiyo/algorithms
| 6
|
6626229
|
<reponame>SebastianOpiyo/algorithms<filename>othertests/recurse.py
def recurse(level):
print('recurse(%s)', level)
if level:
recurse(level - 1)
return
def not_called():
print('This function is never called.')
print(recurse(8))
|
def recurse(level):
print('recurse(%s)', level)
if level:
recurse(level - 1)
return
def not_called():
print('This function is never called.')
print(recurse(8))
|
none
| 1
| 3.540471
| 4
|
|
python/common.py
|
VictorAtPL/random-global-optimization-methods
| 1
|
6626230
|
from enum import Enum
from hyperopt import hp
class Functions(Enum):
RASTRIGIN = "RASTRIGIN"
ROSENBROCK = "ROSENBROCK"
def __str__(self):
return self.name.lower()
def __repr__(self):
return str(self)
@staticmethod
def argparse(s):
try:
return Functions[s.upper()]
except KeyError:
return s
class Algorithms(Enum):
MONTE_CARLO = "MC"
GRID_SEARCH = "GS"
HILL_CLIMBING = "HC"
HILL_CLIMBING_AD_SS = "HC + AdSS"
BIT_SWITCH_HILL_CLIMBING = "BSHC"
SIMULATED_ANNEALING = "SA"
BIT_SWITCH_HILL_CLIMBING_VNS = "BSHC + VNS"
EVOLUTION_STRATEGY = "ES"
BIOLOGICAL_EVOLUTION = "BE"
PARTICLE_SWARM_OPTIMIZATION = "PSO"
DIFFERENCE_EVOLUTION = "DE"
def __str__(self):
return self.name.upper()
def __repr__(self):
return str(self).upper()
@staticmethod
def argparse(s):
try:
return Algorithms[s.upper()]
except KeyError:
return s
STEPS = {
Functions.RASTRIGIN: 1000,
Functions.ROSENBROCK: 600,
}
BOUNDARIES = {
Functions.RASTRIGIN: 5.12,
Functions.ROSENBROCK: 2.048,
}
SEED = 1
def get_opt_space(algorithm, function):
if algorithm == Algorithms.HILL_CLIMBING:
return {
'--failures-to-reset': hp.uniformint("x_failures_to_reset", 1, 50),
'--step': hp.uniform("x_step", 0., BOUNDARIES[function] / 3),
'--reset-resets-failures-counter': hp.choice("x_reset_resets_failures_counter", [True, False]),
}
if algorithm == Algorithms.HILL_CLIMBING_AD_SS:
return {
'--number-of-particles': hp.uniformint("x_number_of_particles", 5, 40)
}, 50
if algorithm == Algorithms.BIT_SWITCH_HILL_CLIMBING:
return {
'--failures-to-reset': hp.uniformint("x_failures_to_reset", 1, 50),
'--step': hp.uniformint("x_step", 1, 8),
'--reset-resets-failures-counter': hp.choice("x_reset_resets_failures_counter", [True, False]),
'--no-of-bits-for-grid-mapping-per-dim': hp.uniformint("x_no_of_bits_for_grid_mapping_per_dim", 1, 32)
}
if algorithm == Algorithms.BIT_SWITCH_HILL_CLIMBING_VNS:
return {
'--no-of-bits-for-grid-mapping-per-dim': hp.uniformint("x_no_of_bits_for_grid_mapping_per_dim", 1, 32),
'--neighbour-looks': hp.quniform("x_neighbour_looks", 50, STEPS[function], 50)
}
if algorithm == Algorithms.EVOLUTION_STRATEGY:
return {
'--improvements-loop-iteration': hp.quniform("x_improvements_loop_iteration", 50, STEPS[function], 50),
'--step': hp.uniform("x_step", 0., BOUNDARIES[function] / 3),
'--step-mutation-coefficient': hp.uniform("x_step_mutation_coefficient", 0.85, 0.99)
}
if algorithm == Algorithms.BIOLOGICAL_EVOLUTION:
return {
'--step': hp.uniform("x_step", 0., BOUNDARIES[function] / 3),
'--population': hp.uniformint("x_population", 5, 40),
'--crossover-population': hp.uniformint("x_crossover_population", 5, 20),
'--mutation-probability': hp.quniform("x_mutation_probability", 0., 1., .1)
}
if algorithm == Algorithms.PARTICLE_SWARM_OPTIMIZATION:
return {
'--number-of-particles': hp.uniformint("x_number_of_particles", 5, 40),
'--omega': hp.uniform("x_omega", -0.1, 0.837),
'--c-1': hp.uniform("x_c_1", 0.875, 2.0412),
'--c-2': hp.uniform("x_c_2", 0.9477, 2.85),
}
if algorithm == Algorithms.DIFFERENCE_EVOLUTION:
return {
'--population': hp.uniformint("x_population", 5, 40),
'--f': hp.uniform("x_f", 0., 1.),
}
|
from enum import Enum
from hyperopt import hp
class Functions(Enum):
RASTRIGIN = "RASTRIGIN"
ROSENBROCK = "ROSENBROCK"
def __str__(self):
return self.name.lower()
def __repr__(self):
return str(self)
@staticmethod
def argparse(s):
try:
return Functions[s.upper()]
except KeyError:
return s
class Algorithms(Enum):
MONTE_CARLO = "MC"
GRID_SEARCH = "GS"
HILL_CLIMBING = "HC"
HILL_CLIMBING_AD_SS = "HC + AdSS"
BIT_SWITCH_HILL_CLIMBING = "BSHC"
SIMULATED_ANNEALING = "SA"
BIT_SWITCH_HILL_CLIMBING_VNS = "BSHC + VNS"
EVOLUTION_STRATEGY = "ES"
BIOLOGICAL_EVOLUTION = "BE"
PARTICLE_SWARM_OPTIMIZATION = "PSO"
DIFFERENCE_EVOLUTION = "DE"
def __str__(self):
return self.name.upper()
def __repr__(self):
return str(self).upper()
@staticmethod
def argparse(s):
try:
return Algorithms[s.upper()]
except KeyError:
return s
STEPS = {
Functions.RASTRIGIN: 1000,
Functions.ROSENBROCK: 600,
}
BOUNDARIES = {
Functions.RASTRIGIN: 5.12,
Functions.ROSENBROCK: 2.048,
}
SEED = 1
def get_opt_space(algorithm, function):
if algorithm == Algorithms.HILL_CLIMBING:
return {
'--failures-to-reset': hp.uniformint("x_failures_to_reset", 1, 50),
'--step': hp.uniform("x_step", 0., BOUNDARIES[function] / 3),
'--reset-resets-failures-counter': hp.choice("x_reset_resets_failures_counter", [True, False]),
}
if algorithm == Algorithms.HILL_CLIMBING_AD_SS:
return {
'--number-of-particles': hp.uniformint("x_number_of_particles", 5, 40)
}, 50
if algorithm == Algorithms.BIT_SWITCH_HILL_CLIMBING:
return {
'--failures-to-reset': hp.uniformint("x_failures_to_reset", 1, 50),
'--step': hp.uniformint("x_step", 1, 8),
'--reset-resets-failures-counter': hp.choice("x_reset_resets_failures_counter", [True, False]),
'--no-of-bits-for-grid-mapping-per-dim': hp.uniformint("x_no_of_bits_for_grid_mapping_per_dim", 1, 32)
}
if algorithm == Algorithms.BIT_SWITCH_HILL_CLIMBING_VNS:
return {
'--no-of-bits-for-grid-mapping-per-dim': hp.uniformint("x_no_of_bits_for_grid_mapping_per_dim", 1, 32),
'--neighbour-looks': hp.quniform("x_neighbour_looks", 50, STEPS[function], 50)
}
if algorithm == Algorithms.EVOLUTION_STRATEGY:
return {
'--improvements-loop-iteration': hp.quniform("x_improvements_loop_iteration", 50, STEPS[function], 50),
'--step': hp.uniform("x_step", 0., BOUNDARIES[function] / 3),
'--step-mutation-coefficient': hp.uniform("x_step_mutation_coefficient", 0.85, 0.99)
}
if algorithm == Algorithms.BIOLOGICAL_EVOLUTION:
return {
'--step': hp.uniform("x_step", 0., BOUNDARIES[function] / 3),
'--population': hp.uniformint("x_population", 5, 40),
'--crossover-population': hp.uniformint("x_crossover_population", 5, 20),
'--mutation-probability': hp.quniform("x_mutation_probability", 0., 1., .1)
}
if algorithm == Algorithms.PARTICLE_SWARM_OPTIMIZATION:
return {
'--number-of-particles': hp.uniformint("x_number_of_particles", 5, 40),
'--omega': hp.uniform("x_omega", -0.1, 0.837),
'--c-1': hp.uniform("x_c_1", 0.875, 2.0412),
'--c-2': hp.uniform("x_c_2", 0.9477, 2.85),
}
if algorithm == Algorithms.DIFFERENCE_EVOLUTION:
return {
'--population': hp.uniformint("x_population", 5, 40),
'--f': hp.uniform("x_f", 0., 1.),
}
|
none
| 1
| 2.975575
| 3
|
|
setup_run.py
|
opteroncx/MoePhoto
| 192
|
6626231
|
import os
import json
from python import moe_utils
from python.updater import update_ffmpeg, isWindows
import shutil
import argparse
manifestPath = './manifest.json'
parser = argparse.ArgumentParser(description='Moe build')
parser.add_argument('--npm', default=True, nargs='?', const=True, type=eval,
help='install and update npm packages (default: %(default)s)')
parser.add_argument('--compile', default=True, nargs='?', const=True, type=eval,
help='recompile files (default: %(default)s)')
parser.add_argument('--clean', default=True, nargs='?', const=True, type=eval,
help='clean old built files (default: %(default)s)')
parser.add_argument('--ffmpeg', default=False, nargs='?', const=True, type=eval,
help='download latest ffmpeg from Internet (default: %(default)s)')
parser.add_argument('--copy', default=True, nargs='?', const=True, type=eval,
help='copy built files to deploy folder (default: %(default)s)')
parser.add_argument('--platform', default='native', choices=['native', 'win', 'linux'],
help='destination platform to deploy (default: %(default)s)')
with open('package.json','r',encoding='utf-8') as manifest:
version = json.load(manifest)['version'].split('-')[0]
parser.add_argument('--version', action='version', version=version)
args = parser.parse_args()
manifest = {
'version': version,
'releases': 'https://moephoto.tech/moephoto/version.html',
'ufile': 'https://moephoto.tech/moephoto/files/',
'ffmpeg-win': 'https://ffmpeg.zeranoe.com/builds/win64/static/ffmpeg-latest-win64-static.zip',
'ffmpeg-linux': 'https://johnvansickle.com/ffmpeg/builds/ffmpeg-git-amd64-static.tar.xz'
}
with open(manifestPath, 'w') as f:
json.dump(manifest, f, ensure_ascii=False)
if args.npm:
os.system('npm install --no-save --no-audit')
os.system('npm update --no-save')
if args.compile:
os.system('npm run build')
moe_utils.compile_pyc()
files = {
'presets': '.user',
'model': 'model',
'python scripts': 'pyc',
'site-packages': 'site-packages',
'ffmpeg': 'ffmpeg',
'static': 'static',
'templates': 'templates',
'manifest': 'manifest.json',
'update_log': 'update_log.txt',
}
cleanFiles = { 'download': 'download' }
cleanFiles.update(files)
getBuild = lambda v: '../build/{}'.format(v)
getDev = lambda v: './{}'.format(v)
if args.clean:
for key in cleanFiles:
moe_utils.delete_files(getBuild(cleanFiles[key]))
if args.ffmpeg:
platform = isWindows if args.platform == 'native' else (args.platform == 'win')
try:
update_ffmpeg(manifest, platform)
except Exception:
print('update ffmpeg failed')
if args.copy:
for key in cleanFiles:
print('copying {}'.format(key))
v = cleanFiles[key]
try:
shutil.copytree(src=getDev(v),dst=getBuild(v))
except:
shutil.copy(src=getDev(v),dst=getBuild(v))
|
import os
import json
from python import moe_utils
from python.updater import update_ffmpeg, isWindows
import shutil
import argparse
manifestPath = './manifest.json'
parser = argparse.ArgumentParser(description='Moe build')
parser.add_argument('--npm', default=True, nargs='?', const=True, type=eval,
help='install and update npm packages (default: %(default)s)')
parser.add_argument('--compile', default=True, nargs='?', const=True, type=eval,
help='recompile files (default: %(default)s)')
parser.add_argument('--clean', default=True, nargs='?', const=True, type=eval,
help='clean old built files (default: %(default)s)')
parser.add_argument('--ffmpeg', default=False, nargs='?', const=True, type=eval,
help='download latest ffmpeg from Internet (default: %(default)s)')
parser.add_argument('--copy', default=True, nargs='?', const=True, type=eval,
help='copy built files to deploy folder (default: %(default)s)')
parser.add_argument('--platform', default='native', choices=['native', 'win', 'linux'],
help='destination platform to deploy (default: %(default)s)')
with open('package.json','r',encoding='utf-8') as manifest:
version = json.load(manifest)['version'].split('-')[0]
parser.add_argument('--version', action='version', version=version)
args = parser.parse_args()
manifest = {
'version': version,
'releases': 'https://moephoto.tech/moephoto/version.html',
'ufile': 'https://moephoto.tech/moephoto/files/',
'ffmpeg-win': 'https://ffmpeg.zeranoe.com/builds/win64/static/ffmpeg-latest-win64-static.zip',
'ffmpeg-linux': 'https://johnvansickle.com/ffmpeg/builds/ffmpeg-git-amd64-static.tar.xz'
}
with open(manifestPath, 'w') as f:
json.dump(manifest, f, ensure_ascii=False)
if args.npm:
os.system('npm install --no-save --no-audit')
os.system('npm update --no-save')
if args.compile:
os.system('npm run build')
moe_utils.compile_pyc()
files = {
'presets': '.user',
'model': 'model',
'python scripts': 'pyc',
'site-packages': 'site-packages',
'ffmpeg': 'ffmpeg',
'static': 'static',
'templates': 'templates',
'manifest': 'manifest.json',
'update_log': 'update_log.txt',
}
cleanFiles = { 'download': 'download' }
cleanFiles.update(files)
getBuild = lambda v: '../build/{}'.format(v)
getDev = lambda v: './{}'.format(v)
if args.clean:
for key in cleanFiles:
moe_utils.delete_files(getBuild(cleanFiles[key]))
if args.ffmpeg:
platform = isWindows if args.platform == 'native' else (args.platform == 'win')
try:
update_ffmpeg(manifest, platform)
except Exception:
print('update ffmpeg failed')
if args.copy:
for key in cleanFiles:
print('copying {}'.format(key))
v = cleanFiles[key]
try:
shutil.copytree(src=getDev(v),dst=getBuild(v))
except:
shutil.copy(src=getDev(v),dst=getBuild(v))
|
none
| 1
| 2.312636
| 2
|
|
minecraftinventario/users/models.py
|
E2PC/MineChest
| 4
|
6626232
|
<gh_stars>1-10
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
bio = models.TextField(blank=True)
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
bio = models.TextField(blank=True)
|
none
| 1
| 1.90344
| 2
|
|
hw/vendor/pulp_platform_register_interface/vendor/lowrisc_opentitan/util/topgen/__init__.py
|
diorga/snitch
| 62
|
6626233
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
# noqa: F401 These functions are used in topgen.py
from .merge import merge_top, amend_clocks # noqa: F401
from .validate import validate_top # noqa: F401
from .lib import search_ips, get_hjsonobj_xbars # noqa: F401
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
# noqa: F401 These functions are used in topgen.py
from .merge import merge_top, amend_clocks # noqa: F401
from .validate import validate_top # noqa: F401
from .lib import search_ips, get_hjsonobj_xbars # noqa: F401
|
en
| 0.530735
|
# Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 # noqa: F401 These functions are used in topgen.py # noqa: F401 # noqa: F401 # noqa: F401
| 1.381234
| 1
|
migrations/versions/20200705_16-09-04__add_slack_user_table.py
|
alysivji/github-adapter
| 55
|
6626234
|
<filename>migrations/versions/20200705_16-09-04__add_slack_user_table.py
"""add slack_user table
Revision ID: eddd9fbf0db6
Revises: <PASSWORD>
Create Date: 2020-07-05 16:09:04.051330
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>6"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"slack_user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("date_created", sa.DateTime(), nullable=True),
sa.Column("date_modified", sa.DateTime(), nullable=True),
sa.Column("installation_id", sa.Integer(), nullable=False),
sa.Column("slack_id", sa.String(length=30), nullable=False),
sa.Column("slack_oauth_state", sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(
["installation_id"], ["slack_installation.id"], name="fk_installation_id"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_slack_user_installation_id"),
"slack_user",
["installation_id"],
unique=False,
)
op.create_index(
op.f("ix_slack_user_slack_id"), "slack_user", ["slack_id"], unique=False
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_slack_user_slack_id"), table_name="slack_user")
op.drop_index(op.f("ix_slack_user_installation_id"), table_name="slack_user")
op.drop_table("slack_user")
# ### end Alembic commands ###
|
<filename>migrations/versions/20200705_16-09-04__add_slack_user_table.py
"""add slack_user table
Revision ID: eddd9fbf0db6
Revises: <PASSWORD>
Create Date: 2020-07-05 16:09:04.051330
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>6"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"slack_user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("date_created", sa.DateTime(), nullable=True),
sa.Column("date_modified", sa.DateTime(), nullable=True),
sa.Column("installation_id", sa.Integer(), nullable=False),
sa.Column("slack_id", sa.String(length=30), nullable=False),
sa.Column("slack_oauth_state", sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(
["installation_id"], ["slack_installation.id"], name="fk_installation_id"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_slack_user_installation_id"),
"slack_user",
["installation_id"],
unique=False,
)
op.create_index(
op.f("ix_slack_user_slack_id"), "slack_user", ["slack_id"], unique=False
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_slack_user_slack_id"), table_name="slack_user")
op.drop_index(op.f("ix_slack_user_installation_id"), table_name="slack_user")
op.drop_table("slack_user")
# ### end Alembic commands ###
|
en
| 0.46171
|
add slack_user table Revision ID: eddd9fbf0db6 Revises: <PASSWORD> Create Date: 2020-07-05 16:09:04.051330 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ###
| 1.540864
| 2
|
proto-tests/test_tim.py
|
kaushikcfd/loopy
| 0
|
6626235
|
from __future__ import division
import numpy as np
import pyopencl as cl
import loopy as lp
from pyopencl.tools import pytest_generate_tests_for_pyopencl \
as pytest_generate_tests
1/0 # see sem_reagan?
def test_tim2d(ctx_factory):
dtype = np.float32
ctx = ctx_factory()
order = "C"
n = 8
from pymbolic import var
K_sym = var("K")
field_shape = (K_sym, n, n)
# K - run-time symbolic
knl = lp.make_kernel(ctx.devices[0],
"[K] -> {[i,j,e,m,o,gi]: 0<=i,j,m,o<%d and 0<=e<K and 0<=gi<3}" % n,
[
"ur(a,b) := sum_float32(@o, D[a,o]*u[e,o,b])",
"us(a,b) := sum_float32(@o, D[b,o]*u[e,a,o])",
"lap[e,i,j] = "
" sum_float32(m, D[m,i]*(G[0,e,m,j]*ur(m,j) + G[1,e,m,j]*us(m,j)))"
"+ sum_float32(m, D[m,j]*(G[1,e,i,m]*ur(i,m) + G[2,e,i,m]*us(i,m)))"
],
[
lp.ArrayArg("u", dtype, shape=field_shape, order=order),
lp.ArrayArg("lap", dtype, shape=field_shape, order=order),
lp.ArrayArg("G", dtype, shape=(3,)+field_shape, order=order),
# lp.ConstantArrayArg("D", dtype, shape=(n, n), order=order),
lp.ArrayArg("D", dtype, shape=(n, n), order=order),
# lp.ImageArg("D", dtype, shape=(n, n)),
lp.ValueArg("K", np.int32, approximately=1000),
],
name="semlap2D", assumptions="K>=1")
unroll = 32
seq_knl = knl
knl = lp.add_prefetch(knl, "D", ["m", "j", "i","o"])
knl = lp.add_prefetch(knl, "u", ["i", "j", "o"])
knl = lp.precompute(knl, "ur", np.float32, ["a", "b"])
knl = lp.precompute(knl, "us", np.float32, ["a", "b"])
knl = lp.split_iname(knl, "e", 1, outer_tag="g.0")#, slabs=(0, 1))
knl = lp.tag_inames(knl, dict(i="l.0", j="l.1"))
knl = lp.tag_inames(knl, dict(o="unr"))
knl = lp.tag_inames(knl, dict(m="unr"))
# knl = lp.add_prefetch(knl, "G", [2,3], default_tag=None) # axis/argument indices on G
knl = lp.add_prefetch(knl, "G", [2,3]) # axis/argument indices on G
kernel_gen = lp.generate_loop_schedules(knl)
kernel_gen = lp.check_kernels(kernel_gen, dict(K=1000))
K = 1000
lp.auto_test_vs_ref(seq_knl, ctx, kernel_gen,
op_count=K*(n*n*n*2*2 + n*n*2*3 + n**3 * 2*2)/1e9,
op_label="GFlops",
parameters={"K": K})
#TW: ^^^^^^^^^^^^^^^ TypeError: auto_test_vs_ref() got an unexpected keyword argument 'print_seq_code'
def test_red2d(ctx_factory):
dtype = np.float32
ctx = ctx_factory()
order = "C"
n = 16
from pymbolic import var
K_sym = var("K")
field_shape = (K_sym, n, n)
# K - run-time symbolic
knl = lp.make_kernel(ctx.devices[0],
"[K] -> {[i,j,e,m,o,gi]: 0<=i,j,m,o<%d and 0<=e<K and 0<=gi<3}" % n,
[
"ue(a,b) := u[e,a,b]",
"ur(a,b) := sum_float32(@o, D[a,o]*ue(o,b))",
"us(a,b) := sum_float32(@o, D[b,o]*ue(a,o))",
"lap[e,i,j] = "
" sum_float32(m, D[m,i]*(G[0,e,m,j]*ur(m,j)+G[1,e,m,j]*us(m,j)))"
"+ sum_float32(m, D[m,j]*(G[1,e,i,m]*ur(i,m)+G[2,e,i,m]*us(i,m)))"
],
[
lp.ArrayArg("u", dtype, shape=field_shape, order=order),
lp.ArrayArg("lap", dtype, shape=field_shape, order=order),
lp.ArrayArg("G", dtype, shape=(3,)+field_shape, order=order),
lp.ArrayArg("D", dtype, shape=(n, n), order=order),
lp.ValueArg("K", np.int32, approximately=1000),
],
name="semlap2D", assumptions="K>=1")
unroll = 32
seq_knl = knl
knl = lp.add_prefetch(knl, "D", ["m", "j", "i","o"])
knl = lp.add_prefetch(knl, "u", ["i", "j", "o"])
knl = lp.precompute(knl, "ue", np.float32, ["a", "b", "m"])
knl = lp.precompute(knl, "ur", np.float32, ["a", "b"])
knl = lp.precompute(knl, "us", np.float32, ["a", "b"])
knl = lp.split_iname(knl, "e", 2, outer_tag="g.0")
knl = lp.split_iname(knl, "j", n, inner_tag="l.0")#, slabs=(0, 1))
knl = lp.split_iname(knl, "i", n, inner_tag="l.1")#, slabs=(0, 1))
knl = lp.tag_inames(knl, dict(o="unr"))
knl = lp.tag_inames(knl, dict(m="unr"))
knl = lp.add_prefetch(knl, "G", [2,3]) # axis/argument indices on G
kernel_gen = lp.generate_loop_schedules(knl)
kernel_gen = lp.check_kernels(kernel_gen, dict(K=1000))
K = 1000
lp.auto_test_vs_ref(seq_knl, ctx, kernel_gen,
op_count=K*((n**3)*2*2 + n*n*2*3 + (n**3)*2*2)/1e9,
op_label="GFlops",
parameters={"K": K})
#TW: ^^^^^^^^^^^^^^^ TypeError: auto_test_vs_ref() got an unexpected keyword argument 'print_seq_code'
def test_tim3d(ctx_factory):
dtype = np.float32
ctx = ctx_factory()
order = "C"
n = 8
from pymbolic import var
K_sym = var("K")
field_shape = (K_sym, n, n, n)
# K - run-time symbolic
knl = lp.make_kernel(ctx.devices[0],
"[K] -> {[i,j,k,e,m,o,gi]: 0<=i,j,k,m,o<%d and 0<=e<K and 0<=gi<6}" % n,
[
"ur(a,b,c) := sum_float32(@o, D[a,o]*u[e,o,b,c])",
"us(a,b,c) := sum_float32(@o, D[b,o]*u[e,a,o,c])",
"ut(a,b,c) := sum_float32(@o, D[c,o]*u[e,a,b,o])",
"lap[e,i,j,k] = "
" sum_float32(m, D[m,i]*(G[0,e,m,j,k]*ur(m,j,k) + G[1,e,m,j,k]*us(m,j,k) + G[2,e,m,j,k]*ut(m,j,k)))"
" + sum_float32(m, D[m,j]*(G[1,e,i,m,k]*ur(i,m,k) + G[3,e,i,m,k]*us(i,m,k) + G[4,e,i,m,k]*ut(i,m,k)))"
" + sum_float32(m, D[m,k]*(G[2,e,i,j,m]*ur(i,j,m) + G[4,e,i,j,m]*us(i,j,m) + G[5,e,i,j,m]*ut(i,j,m)))"
],
[
lp.ArrayArg("u", dtype, shape=field_shape, order=order),
lp.ArrayArg("lap", dtype, shape=field_shape, order=order),
lp.ArrayArg("G", dtype, shape=(6,)+field_shape, order=order),
# lp.ConstantArrayArg("D", dtype, shape=(n, n), order=order),
lp.ArrayArg("D", dtype, shape=(n, n), order=order),
# lp.ImageArg("D", dtype, shape=(n, n)),
lp.ValueArg("K", np.int32, approximately=1000),
],
name="semlap3D", assumptions="K>=1")
seq_knl = knl
knl = lp.add_prefetch(knl, "D", ["m", "j", "i", "k","o"])
knl = lp.add_prefetch(knl, "u", ["i", "j", "o", "k"])
knl = lp.precompute(knl, "ur", np.float32, ["a", "b", "c"])
knl = lp.precompute(knl, "us", np.float32, ["a", "b", "c"])
knl = lp.precompute(knl, "ut", np.float32, ["a", "b", "c"])
knl = lp.split_iname(knl, "e", 1, outer_tag="g.0")#, slabs=(0, 1))
knl = lp.split_iname(knl, "k", n, inner_tag="l.2")#, slabs=(0, 1))
knl = lp.split_iname(knl, "j", n, inner_tag="l.1")#, slabs=(0, 1))
knl = lp.split_iname(knl, "i", n, inner_tag="l.0")#, slabs=(0, 1))
# knl = lp.tag_inames(knl, dict(k_nner="unr"))
knl = lp.tag_inames(knl, dict(o="unr"))
knl = lp.tag_inames(knl, dict(m="unr"))
# knl = lp.tag_inames(knl, dict(i="unr"))
knl = lp.add_prefetch(knl, "G", [2,3,4]) # axis/argument indices on G
kernel_gen = lp.generate_loop_schedules(knl)
kernel_gen = lp.check_kernels(kernel_gen, dict(K=1000))
K = 4000
lp.auto_test_vs_ref(seq_knl, ctx, kernel_gen,
op_count=K*((n**4)*3*2 + (n**3)*5*3 + (n**4)*3*2)/1e9,
op_label="GFlops",
parameters={"K": K})
#TW: ^^^^^^^^^^^^^^^ TypeError: auto_test_vs_ref() got an unexpected keyword argument 'print_seq_code'
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from py.test.cmdline import main
main([__file__])
|
from __future__ import division
import numpy as np
import pyopencl as cl
import loopy as lp
from pyopencl.tools import pytest_generate_tests_for_pyopencl \
as pytest_generate_tests
1/0 # see sem_reagan?
def test_tim2d(ctx_factory):
dtype = np.float32
ctx = ctx_factory()
order = "C"
n = 8
from pymbolic import var
K_sym = var("K")
field_shape = (K_sym, n, n)
# K - run-time symbolic
knl = lp.make_kernel(ctx.devices[0],
"[K] -> {[i,j,e,m,o,gi]: 0<=i,j,m,o<%d and 0<=e<K and 0<=gi<3}" % n,
[
"ur(a,b) := sum_float32(@o, D[a,o]*u[e,o,b])",
"us(a,b) := sum_float32(@o, D[b,o]*u[e,a,o])",
"lap[e,i,j] = "
" sum_float32(m, D[m,i]*(G[0,e,m,j]*ur(m,j) + G[1,e,m,j]*us(m,j)))"
"+ sum_float32(m, D[m,j]*(G[1,e,i,m]*ur(i,m) + G[2,e,i,m]*us(i,m)))"
],
[
lp.ArrayArg("u", dtype, shape=field_shape, order=order),
lp.ArrayArg("lap", dtype, shape=field_shape, order=order),
lp.ArrayArg("G", dtype, shape=(3,)+field_shape, order=order),
# lp.ConstantArrayArg("D", dtype, shape=(n, n), order=order),
lp.ArrayArg("D", dtype, shape=(n, n), order=order),
# lp.ImageArg("D", dtype, shape=(n, n)),
lp.ValueArg("K", np.int32, approximately=1000),
],
name="semlap2D", assumptions="K>=1")
unroll = 32
seq_knl = knl
knl = lp.add_prefetch(knl, "D", ["m", "j", "i","o"])
knl = lp.add_prefetch(knl, "u", ["i", "j", "o"])
knl = lp.precompute(knl, "ur", np.float32, ["a", "b"])
knl = lp.precompute(knl, "us", np.float32, ["a", "b"])
knl = lp.split_iname(knl, "e", 1, outer_tag="g.0")#, slabs=(0, 1))
knl = lp.tag_inames(knl, dict(i="l.0", j="l.1"))
knl = lp.tag_inames(knl, dict(o="unr"))
knl = lp.tag_inames(knl, dict(m="unr"))
# knl = lp.add_prefetch(knl, "G", [2,3], default_tag=None) # axis/argument indices on G
knl = lp.add_prefetch(knl, "G", [2,3]) # axis/argument indices on G
kernel_gen = lp.generate_loop_schedules(knl)
kernel_gen = lp.check_kernels(kernel_gen, dict(K=1000))
K = 1000
lp.auto_test_vs_ref(seq_knl, ctx, kernel_gen,
op_count=K*(n*n*n*2*2 + n*n*2*3 + n**3 * 2*2)/1e9,
op_label="GFlops",
parameters={"K": K})
#TW: ^^^^^^^^^^^^^^^ TypeError: auto_test_vs_ref() got an unexpected keyword argument 'print_seq_code'
def test_red2d(ctx_factory):
dtype = np.float32
ctx = ctx_factory()
order = "C"
n = 16
from pymbolic import var
K_sym = var("K")
field_shape = (K_sym, n, n)
# K - run-time symbolic
knl = lp.make_kernel(ctx.devices[0],
"[K] -> {[i,j,e,m,o,gi]: 0<=i,j,m,o<%d and 0<=e<K and 0<=gi<3}" % n,
[
"ue(a,b) := u[e,a,b]",
"ur(a,b) := sum_float32(@o, D[a,o]*ue(o,b))",
"us(a,b) := sum_float32(@o, D[b,o]*ue(a,o))",
"lap[e,i,j] = "
" sum_float32(m, D[m,i]*(G[0,e,m,j]*ur(m,j)+G[1,e,m,j]*us(m,j)))"
"+ sum_float32(m, D[m,j]*(G[1,e,i,m]*ur(i,m)+G[2,e,i,m]*us(i,m)))"
],
[
lp.ArrayArg("u", dtype, shape=field_shape, order=order),
lp.ArrayArg("lap", dtype, shape=field_shape, order=order),
lp.ArrayArg("G", dtype, shape=(3,)+field_shape, order=order),
lp.ArrayArg("D", dtype, shape=(n, n), order=order),
lp.ValueArg("K", np.int32, approximately=1000),
],
name="semlap2D", assumptions="K>=1")
unroll = 32
seq_knl = knl
knl = lp.add_prefetch(knl, "D", ["m", "j", "i","o"])
knl = lp.add_prefetch(knl, "u", ["i", "j", "o"])
knl = lp.precompute(knl, "ue", np.float32, ["a", "b", "m"])
knl = lp.precompute(knl, "ur", np.float32, ["a", "b"])
knl = lp.precompute(knl, "us", np.float32, ["a", "b"])
knl = lp.split_iname(knl, "e", 2, outer_tag="g.0")
knl = lp.split_iname(knl, "j", n, inner_tag="l.0")#, slabs=(0, 1))
knl = lp.split_iname(knl, "i", n, inner_tag="l.1")#, slabs=(0, 1))
knl = lp.tag_inames(knl, dict(o="unr"))
knl = lp.tag_inames(knl, dict(m="unr"))
knl = lp.add_prefetch(knl, "G", [2,3]) # axis/argument indices on G
kernel_gen = lp.generate_loop_schedules(knl)
kernel_gen = lp.check_kernels(kernel_gen, dict(K=1000))
K = 1000
lp.auto_test_vs_ref(seq_knl, ctx, kernel_gen,
op_count=K*((n**3)*2*2 + n*n*2*3 + (n**3)*2*2)/1e9,
op_label="GFlops",
parameters={"K": K})
#TW: ^^^^^^^^^^^^^^^ TypeError: auto_test_vs_ref() got an unexpected keyword argument 'print_seq_code'
def test_tim3d(ctx_factory):
dtype = np.float32
ctx = ctx_factory()
order = "C"
n = 8
from pymbolic import var
K_sym = var("K")
field_shape = (K_sym, n, n, n)
# K - run-time symbolic
knl = lp.make_kernel(ctx.devices[0],
"[K] -> {[i,j,k,e,m,o,gi]: 0<=i,j,k,m,o<%d and 0<=e<K and 0<=gi<6}" % n,
[
"ur(a,b,c) := sum_float32(@o, D[a,o]*u[e,o,b,c])",
"us(a,b,c) := sum_float32(@o, D[b,o]*u[e,a,o,c])",
"ut(a,b,c) := sum_float32(@o, D[c,o]*u[e,a,b,o])",
"lap[e,i,j,k] = "
" sum_float32(m, D[m,i]*(G[0,e,m,j,k]*ur(m,j,k) + G[1,e,m,j,k]*us(m,j,k) + G[2,e,m,j,k]*ut(m,j,k)))"
" + sum_float32(m, D[m,j]*(G[1,e,i,m,k]*ur(i,m,k) + G[3,e,i,m,k]*us(i,m,k) + G[4,e,i,m,k]*ut(i,m,k)))"
" + sum_float32(m, D[m,k]*(G[2,e,i,j,m]*ur(i,j,m) + G[4,e,i,j,m]*us(i,j,m) + G[5,e,i,j,m]*ut(i,j,m)))"
],
[
lp.ArrayArg("u", dtype, shape=field_shape, order=order),
lp.ArrayArg("lap", dtype, shape=field_shape, order=order),
lp.ArrayArg("G", dtype, shape=(6,)+field_shape, order=order),
# lp.ConstantArrayArg("D", dtype, shape=(n, n), order=order),
lp.ArrayArg("D", dtype, shape=(n, n), order=order),
# lp.ImageArg("D", dtype, shape=(n, n)),
lp.ValueArg("K", np.int32, approximately=1000),
],
name="semlap3D", assumptions="K>=1")
seq_knl = knl
knl = lp.add_prefetch(knl, "D", ["m", "j", "i", "k","o"])
knl = lp.add_prefetch(knl, "u", ["i", "j", "o", "k"])
knl = lp.precompute(knl, "ur", np.float32, ["a", "b", "c"])
knl = lp.precompute(knl, "us", np.float32, ["a", "b", "c"])
knl = lp.precompute(knl, "ut", np.float32, ["a", "b", "c"])
knl = lp.split_iname(knl, "e", 1, outer_tag="g.0")#, slabs=(0, 1))
knl = lp.split_iname(knl, "k", n, inner_tag="l.2")#, slabs=(0, 1))
knl = lp.split_iname(knl, "j", n, inner_tag="l.1")#, slabs=(0, 1))
knl = lp.split_iname(knl, "i", n, inner_tag="l.0")#, slabs=(0, 1))
# knl = lp.tag_inames(knl, dict(k_nner="unr"))
knl = lp.tag_inames(knl, dict(o="unr"))
knl = lp.tag_inames(knl, dict(m="unr"))
# knl = lp.tag_inames(knl, dict(i="unr"))
knl = lp.add_prefetch(knl, "G", [2,3,4]) # axis/argument indices on G
kernel_gen = lp.generate_loop_schedules(knl)
kernel_gen = lp.check_kernels(kernel_gen, dict(K=1000))
K = 4000
lp.auto_test_vs_ref(seq_knl, ctx, kernel_gen,
op_count=K*((n**4)*3*2 + (n**3)*5*3 + (n**4)*3*2)/1e9,
op_label="GFlops",
parameters={"K": K})
#TW: ^^^^^^^^^^^^^^^ TypeError: auto_test_vs_ref() got an unexpected keyword argument 'print_seq_code'
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from py.test.cmdline import main
main([__file__])
|
en
| 0.226134
|
# see sem_reagan? # K - run-time symbolic # lp.ConstantArrayArg("D", dtype, shape=(n, n), order=order), # lp.ImageArg("D", dtype, shape=(n, n)), #, slabs=(0, 1)) # knl = lp.add_prefetch(knl, "G", [2,3], default_tag=None) # axis/argument indices on G # axis/argument indices on G #TW: ^^^^^^^^^^^^^^^ TypeError: auto_test_vs_ref() got an unexpected keyword argument 'print_seq_code' # K - run-time symbolic #, slabs=(0, 1)) #, slabs=(0, 1)) # axis/argument indices on G #TW: ^^^^^^^^^^^^^^^ TypeError: auto_test_vs_ref() got an unexpected keyword argument 'print_seq_code' # K - run-time symbolic # lp.ConstantArrayArg("D", dtype, shape=(n, n), order=order), # lp.ImageArg("D", dtype, shape=(n, n)), #, slabs=(0, 1)) #, slabs=(0, 1)) #, slabs=(0, 1)) #, slabs=(0, 1)) # knl = lp.tag_inames(knl, dict(k_nner="unr")) # knl = lp.tag_inames(knl, dict(i="unr")) # axis/argument indices on G #TW: ^^^^^^^^^^^^^^^ TypeError: auto_test_vs_ref() got an unexpected keyword argument 'print_seq_code'
| 2.036205
| 2
|
setup.py
|
bumplzz69/vyper
| 3
|
6626236
|
<reponame>bumplzz69/vyper
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
test_deps = [
'pytest',
'pytest-cov',
'py-evm==0.2.0a34',
'eth-tester==0.1.0b33',
'web3==4.8.2',
]
extras = {
'test': test_deps
}
setup(
name='vyper',
# *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
version='0.1.0-beta.6',
description='Vyper Programming Language for Ethereum',
long_description_markdown_filename='README.md',
author='<NAME>',
author_email='',
url='https://github.com/ethereum/vyper',
license="MIT",
keywords='ethereum',
include_package_data=True,
packages=find_packages(exclude=('tests', 'docs')),
python_requires='>=3.6',
py_modules=['vyper'],
install_requires=[
'pycryptodome>=3.5.1,<4',
],
setup_requires=[
'pytest-runner',
'setuptools-markdown'
],
tests_require=test_deps,
extras_require=extras,
scripts=[
'bin/vyper',
'bin/vyper-serve',
'bin/vyper-lll'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
]
)
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
test_deps = [
'pytest',
'pytest-cov',
'py-evm==0.2.0a34',
'eth-tester==0.1.0b33',
'web3==4.8.2',
]
extras = {
'test': test_deps
}
setup(
name='vyper',
# *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
version='0.1.0-beta.6',
description='Vyper Programming Language for Ethereum',
long_description_markdown_filename='README.md',
author='<NAME>',
author_email='',
url='https://github.com/ethereum/vyper',
license="MIT",
keywords='ethereum',
include_package_data=True,
packages=find_packages(exclude=('tests', 'docs')),
python_requires='>=3.6',
py_modules=['vyper'],
install_requires=[
'pycryptodome>=3.5.1,<4',
],
setup_requires=[
'pytest-runner',
'setuptools-markdown'
],
tests_require=test_deps,
extras_require=extras,
scripts=[
'bin/vyper',
'bin/vyper-serve',
'bin/vyper-lll'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
]
)
|
en
| 0.74374
|
# -*- coding: utf-8 -*- # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
| 1.2187
| 1
|
tests/test_reload_conf.py
|
tac0x2a/o-namazu
| 1
|
6626237
|
from unittest.mock import MagicMock, call, ANY
from pathlib import Path
from . import conftest as ct
from onamazu.onamazu import ONamazu
from onamazu.watcher import NamazuEvent
def test_return_empty_when_no_config_file():
o = ONamazu(ct.ROOT_DIR, 60)
o.event_handler = MagicMock(name="event_handler")
o.click()
o.stop()
o.event_handler.assert_not_called()
class TestReload:
def test_reload_on_create(self):
ct.place_config_file("", {"pattern": "*.csv"})
o = ONamazu(ct.ROOT_DIR, 60)
o.event_handler = MagicMock(name="event_handler")
o.click()
file01 = ct.place_file("sub", "sample01.csv", "hello,world1")
o.click()
o.event_handler.assert_not_called()
ct.place_config_file("sub", {"pattern": "*.csv"})
o.click(); o.click(); o.click() # apply
file02 = ct.place_file("sub", "sample02.csv", "hello,world2")
o.click()
o.stop()
actual = [args.args[0].src_path for args in o.event_handler.call_args_list]
assert [str(file01), str(file02)] == actual
def test_reload_on_modified(self):
ct.place_config_file("", {"pattern": "*.jsonl"})
o = ONamazu(ct.ROOT_DIR, 60)
o.event_handler = MagicMock(name="event_handler")
o.click()
file01 = ct.place_file("", "sample01.csv", "hello,world1")
o.click()
o.event_handler.assert_not_called()
ct.place_config_file("", {"pattern": "*.csv"})
o.click(); o.click(); o.click() # apply
file02 = ct.place_file("", "sample02.csv", "hello,world2")
o.click()
o.stop()
actual = [args.args[0].src_path for args in o.event_handler.call_args_list]
assert [str(file01), str(file02)] == actual
def test_reload_on_delete(self):
ct.place_config_file("", {"pattern": "*.csv"})
ct.place_config_file("sub", {"pattern": "*.csv"})
o = ONamazu(ct.ROOT_DIR, 60)
o.event_handler = MagicMock(name="event_handler")
o.click()
ct.place_file("sub", "sample01.csv", "hello,world1")
o.click()
ct.delete_config_file("sub")
o.click(); o.click(); o.click() # apply
ct.place_file("sub", "sample02.csv", "hello,world2")
o.click()
o.stop()
o.event_handler.assert_called_once_with(ANY)
ev, = o.event_handler.call_args.args
assert ev.src_path == "/".join([ct.ROOT_DIR, "sub", "sample01.csv"])
|
from unittest.mock import MagicMock, call, ANY
from pathlib import Path
from . import conftest as ct
from onamazu.onamazu import ONamazu
from onamazu.watcher import NamazuEvent
def test_return_empty_when_no_config_file():
o = ONamazu(ct.ROOT_DIR, 60)
o.event_handler = MagicMock(name="event_handler")
o.click()
o.stop()
o.event_handler.assert_not_called()
class TestReload:
def test_reload_on_create(self):
ct.place_config_file("", {"pattern": "*.csv"})
o = ONamazu(ct.ROOT_DIR, 60)
o.event_handler = MagicMock(name="event_handler")
o.click()
file01 = ct.place_file("sub", "sample01.csv", "hello,world1")
o.click()
o.event_handler.assert_not_called()
ct.place_config_file("sub", {"pattern": "*.csv"})
o.click(); o.click(); o.click() # apply
file02 = ct.place_file("sub", "sample02.csv", "hello,world2")
o.click()
o.stop()
actual = [args.args[0].src_path for args in o.event_handler.call_args_list]
assert [str(file01), str(file02)] == actual
def test_reload_on_modified(self):
ct.place_config_file("", {"pattern": "*.jsonl"})
o = ONamazu(ct.ROOT_DIR, 60)
o.event_handler = MagicMock(name="event_handler")
o.click()
file01 = ct.place_file("", "sample01.csv", "hello,world1")
o.click()
o.event_handler.assert_not_called()
ct.place_config_file("", {"pattern": "*.csv"})
o.click(); o.click(); o.click() # apply
file02 = ct.place_file("", "sample02.csv", "hello,world2")
o.click()
o.stop()
actual = [args.args[0].src_path for args in o.event_handler.call_args_list]
assert [str(file01), str(file02)] == actual
def test_reload_on_delete(self):
ct.place_config_file("", {"pattern": "*.csv"})
ct.place_config_file("sub", {"pattern": "*.csv"})
o = ONamazu(ct.ROOT_DIR, 60)
o.event_handler = MagicMock(name="event_handler")
o.click()
ct.place_file("sub", "sample01.csv", "hello,world1")
o.click()
ct.delete_config_file("sub")
o.click(); o.click(); o.click() # apply
ct.place_file("sub", "sample02.csv", "hello,world2")
o.click()
o.stop()
o.event_handler.assert_called_once_with(ANY)
ev, = o.event_handler.call_args.args
assert ev.src_path == "/".join([ct.ROOT_DIR, "sub", "sample01.csv"])
|
en
| 0.22234
|
# apply # apply # apply
| 2.400609
| 2
|
webcomix/tests/test_search.py
|
J-CPelletier/WebComicToCBZ
| 29
|
6626238
|
<filename>webcomix/tests/test_search.py
from webcomix.comic import Comic
from webcomix.search import discovery
from webcomix.tests.fake_websites.fixture import (
one_webpage_searchable_uri,
three_webpages_uri,
three_webpages_classes_uri,
)
def test_search_searchable_website(mocker, three_webpages_classes_uri):
expected = Comic(
"Blindsprings",
three_webpages_classes_uri,
"//*[contains(translate(@class, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ','abcdefghijklmnopqrstuvwxyz'), 'comic')]//@src",
"//*[contains(translate(@class, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ','abcdefghijklmnopqrstuvwxyz'), 'next')]//@href",
)
mocker.patch("webcomix.search.possible_image_xpath", ["comic"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["*"])
mocker.patch("webcomix.search.possible_tags_next", ["*"])
mocker.patch("webcomix.search.possible_attributes_image", ["@class"])
mocker.patch("webcomix.search.possible_attributes_next", ["@class"])
mocker.patch("webcomix.util.check_first_pages")
comic, result = discovery("Blindsprings", three_webpages_classes_uri)
three_webpages_classes_folder = three_webpages_classes_uri.strip("1.html")
assert result == [
{
"page": 1,
"url": three_webpages_classes_uri,
"image_urls": [three_webpages_classes_folder + "1.jpeg"],
"alt_text": None,
},
{
"page": 2,
"url": three_webpages_classes_folder + "2.html",
"image_urls": [three_webpages_classes_folder + "2.jpeg"],
"alt_text": None,
},
{
"page": 3,
"url": three_webpages_classes_folder + "3.html",
"image_urls": [three_webpages_classes_folder + "3.jpeg"],
"alt_text": None,
},
]
assert comic.start_url == expected.start_url
assert comic.next_page_selector == expected.next_page_selector
assert comic.comic_image_selector == expected.comic_image_selector
def test_search_unsearchable_website(mocker, three_webpages_uri):
mocker.patch("webcomix.search.possible_image_xpath", ["comic"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["*"])
mocker.patch("webcomix.search.possible_tags_next", ["*"])
mocker.patch("webcomix.search.possible_attributes_image", ["@class"])
mocker.patch("webcomix.search.possible_attributes_next", ["@class"])
assert discovery("test", three_webpages_uri) == (None, None)
def test_can_stop_searching(mocker, three_webpages_classes_uri):
mocker.patch("webcomix.search.possible_image_xpath", ["comic"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["div"])
mocker.patch("webcomix.search.possible_tags_next", ["div"])
mocker.patch("webcomix.search.possible_attributes_image", ["@rel"])
mocker.patch("webcomix.search.possible_attributes_next", ["@class"])
exit_called = mocker.patch("sys.exit")
mocker.patch("webcomix.comic.Comic.verify_xpath", side_effect=KeyboardInterrupt)
result = discovery("test", three_webpages_classes_uri)
assert exit_called.call_count == 1
assert result == (None, None)
def test_can_find_single_page_correctly_while_searching(
mocker, one_webpage_searchable_uri
):
mocker.patch("webcomix.search.possible_image_xpath", ["image"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["*"])
mocker.patch("webcomix.search.possible_tags_next", ["*"])
mocker.patch("webcomix.search.possible_attributes_image", ["@class"])
mocker.patch("webcomix.search.possible_attributes_next", ["."])
comic, result = discovery("test", one_webpage_searchable_uri, single_page=True)
validation = comic.verify_xpath()
assert len(result) == 1
assert result == validation
assert len(result[0]["image_urls"]) == 2
|
<filename>webcomix/tests/test_search.py
from webcomix.comic import Comic
from webcomix.search import discovery
from webcomix.tests.fake_websites.fixture import (
one_webpage_searchable_uri,
three_webpages_uri,
three_webpages_classes_uri,
)
def test_search_searchable_website(mocker, three_webpages_classes_uri):
expected = Comic(
"Blindsprings",
three_webpages_classes_uri,
"//*[contains(translate(@class, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ','abcdefghijklmnopqrstuvwxyz'), 'comic')]//@src",
"//*[contains(translate(@class, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ','abcdefghijklmnopqrstuvwxyz'), 'next')]//@href",
)
mocker.patch("webcomix.search.possible_image_xpath", ["comic"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["*"])
mocker.patch("webcomix.search.possible_tags_next", ["*"])
mocker.patch("webcomix.search.possible_attributes_image", ["@class"])
mocker.patch("webcomix.search.possible_attributes_next", ["@class"])
mocker.patch("webcomix.util.check_first_pages")
comic, result = discovery("Blindsprings", three_webpages_classes_uri)
three_webpages_classes_folder = three_webpages_classes_uri.strip("1.html")
assert result == [
{
"page": 1,
"url": three_webpages_classes_uri,
"image_urls": [three_webpages_classes_folder + "1.jpeg"],
"alt_text": None,
},
{
"page": 2,
"url": three_webpages_classes_folder + "2.html",
"image_urls": [three_webpages_classes_folder + "2.jpeg"],
"alt_text": None,
},
{
"page": 3,
"url": three_webpages_classes_folder + "3.html",
"image_urls": [three_webpages_classes_folder + "3.jpeg"],
"alt_text": None,
},
]
assert comic.start_url == expected.start_url
assert comic.next_page_selector == expected.next_page_selector
assert comic.comic_image_selector == expected.comic_image_selector
def test_search_unsearchable_website(mocker, three_webpages_uri):
mocker.patch("webcomix.search.possible_image_xpath", ["comic"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["*"])
mocker.patch("webcomix.search.possible_tags_next", ["*"])
mocker.patch("webcomix.search.possible_attributes_image", ["@class"])
mocker.patch("webcomix.search.possible_attributes_next", ["@class"])
assert discovery("test", three_webpages_uri) == (None, None)
def test_can_stop_searching(mocker, three_webpages_classes_uri):
mocker.patch("webcomix.search.possible_image_xpath", ["comic"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["div"])
mocker.patch("webcomix.search.possible_tags_next", ["div"])
mocker.patch("webcomix.search.possible_attributes_image", ["@rel"])
mocker.patch("webcomix.search.possible_attributes_next", ["@class"])
exit_called = mocker.patch("sys.exit")
mocker.patch("webcomix.comic.Comic.verify_xpath", side_effect=KeyboardInterrupt)
result = discovery("test", three_webpages_classes_uri)
assert exit_called.call_count == 1
assert result == (None, None)
def test_can_find_single_page_correctly_while_searching(
mocker, one_webpage_searchable_uri
):
mocker.patch("webcomix.search.possible_image_xpath", ["image"])
mocker.patch("webcomix.search.possible_next_page_xpath", ["next"])
mocker.patch("webcomix.search.possible_tags_image", ["*"])
mocker.patch("webcomix.search.possible_tags_next", ["*"])
mocker.patch("webcomix.search.possible_attributes_image", ["@class"])
mocker.patch("webcomix.search.possible_attributes_next", ["."])
comic, result = discovery("test", one_webpage_searchable_uri, single_page=True)
validation = comic.verify_xpath()
assert len(result) == 1
assert result == validation
assert len(result[0]["image_urls"]) == 2
|
none
| 1
| 2.328262
| 2
|
|
dm_pix/_src/augment_test.py
|
SupreethRao99/dm_pix
| 0
|
6626239
|
<reponame>SupreethRao99/dm_pix
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_pix._src.augment."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from dm_pix._src import augment
import jax
import numpy as np
import tensorflow as tf
_IMG_SHAPE = (131, 111, 3)
_RAND_FLOATS_IN_RANGE = list(
np.random.uniform(0., 1., size=(10,) + _IMG_SHAPE).astype(np.float32))
_RAND_FLOATS_OUT_OF_RANGE = list(
np.random.uniform(-0.5, 1.5, size=(10,) + _IMG_SHAPE).astype(np.float32))
_KERNEL_SIZE = _IMG_SHAPE[0] / 10.
class _ImageAugmentationTest(parameterized.TestCase):
"""Runs tests for the various augments with the correct arguments."""
def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range):
pass
def _test_fn(self, images_list, jax_fn, tf_fn):
pass
def assertAllCloseTolerant(self, x, y):
# Increase tolerance on TPU due to lower precision.
tol = 1e-2 if jax.local_devices()[0].platform == "tpu" else 1e-4
np.testing.assert_allclose(x, y, rtol=tol, atol=tol)
self.assertEqual(x.dtype, y.dtype)
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_adjust_brightness(self, images_list):
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_brightness,
tf_fn=tf.image.adjust_brightness,
delta=(-0.5, 0.5))
key = jax.random.PRNGKey(0)
self._test_fn_with_random_arg(
images_list,
jax_fn=functools.partial(augment.random_brightness, key),
tf_fn=None,
max_delta=(0, 0.5))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_adjust_contrast(self, images_list):
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_contrast,
tf_fn=tf.image.adjust_contrast,
factor=(0.5, 1.5))
key = jax.random.PRNGKey(0)
self._test_fn_with_random_arg(
images_list,
jax_fn=functools.partial(augment.random_contrast, key, upper=1),
tf_fn=None,
lower=(0, 0.9))
# Doesn't make sense outside of [0, 1].
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE))
def test_adjust_gamma(self, images_list):
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_gamma,
tf_fn=tf.image.adjust_gamma,
gamma=(0.5, 1.5))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_adjust_saturation(self, images_list):
# tf.image.adjust_saturation has a buggy implementation when the green and
# blue channels have very close values that don't match the red channel.
# This is due to a rounding error in http://shortn/_ETSJsEwUj5
# if (g - b) < 0 but small enough that (hh + 1) == 1.
# Eg: tf.image.adjust_saturation([[[0.75, 0.0369078, 0.0369079]]], 1.0)
# -> [[[0.03690779, 0.03690779, 0.03690779]]]
# Perturb the inputs slightly so that this doesn't happen.
def perturb(rgb):
rgb_new = np.copy(rgb)
rgb_new[..., 1] += 0.001 * (np.abs(rgb[..., 2] - rgb[..., 1]) < 1e-3)
return rgb_new
images_list = list(map(perturb, images_list))
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_saturation,
tf_fn=tf.image.adjust_saturation,
factor=(0.5, 1.5))
key = jax.random.PRNGKey(0)
self._test_fn_with_random_arg(
images_list,
jax_fn=functools.partial(augment.random_saturation, key, upper=1),
tf_fn=None,
lower=(0, 0.9))
# CPU TF uses a different hue adjustment method outside of the [0, 1] range.
# Disable out-of-range tests.
@parameterized.named_parameters(
("in_range", _RAND_FLOATS_IN_RANGE),)
def test_adjust_hue(self, images_list):
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_hue,
tf_fn=tf.image.adjust_hue,
delta=(-0.5, 0.5))
key = jax.random.PRNGKey(0)
self._test_fn_with_random_arg(
images_list,
jax_fn=functools.partial(augment.random_hue, key),
tf_fn=None,
max_delta=(0, 0.5))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_rot90(self, images_list):
self._test_fn(
images_list,
jax_fn=lambda img: augment.rot90(img, k=1),
tf_fn=lambda img: tf.image.rot90(img, k=1))
self._test_fn(
images_list,
jax_fn=lambda img: augment.rot90(img, k=2),
tf_fn=lambda img: tf.image.rot90(img, k=2))
self._test_fn(
images_list,
jax_fn=lambda img: augment.rot90(img, k=3),
tf_fn=lambda img: tf.image.rot90(img, k=3))
# The functions below don't have a TF equivalent to compare to, we just check
# that they run.
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_flip(self, images_list):
self._test_fn(
images_list,
jax_fn=augment.flip_left_right,
tf_fn=tf.image.flip_left_right)
self._test_fn(
images_list, jax_fn=augment.flip_up_down, tf_fn=tf.image.flip_up_down)
key = jax.random.PRNGKey(0)
self._test_fn(
images_list,
jax_fn=functools.partial(augment.random_flip_left_right, key),
tf_fn=None)
self._test_fn(
images_list,
jax_fn=functools.partial(augment.random_flip_up_down, key),
tf_fn=None)
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_solarize(self, images_list):
self._test_fn_with_random_arg(
images_list, jax_fn=augment.solarize, tf_fn=None, threshold=(0., 1.))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_gaussian_blur(self, images_list):
blur_fn = functools.partial(augment.gaussian_blur, kernel_size=_KERNEL_SIZE)
self._test_fn_with_random_arg(
images_list, jax_fn=blur_fn, tf_fn=None, sigma=(0.1, 2.0))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_random_crop(self, images_list):
key = jax.random.PRNGKey(43)
crop_fn = lambda img: augment.random_crop(key, img, (100, 100, 3))
self._test_fn(images_list, jax_fn=crop_fn, tf_fn=None)
class TestMatchTensorflow(_ImageAugmentationTest):
def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range):
if tf_fn is None:
return
assert len(kw_range) == 1
kw_name, (random_min, random_max) = list(kw_range.items())[0]
for image_rgb in images_list:
argument = np.random.uniform(random_min, random_max, size=())
adjusted_jax = jax_fn(image_rgb, **{kw_name: argument})
adjusted_tf = tf_fn(image_rgb, argument).numpy()
self.assertAllCloseTolerant(adjusted_jax, adjusted_tf)
def _test_fn(self, images_list, jax_fn, tf_fn):
if tf_fn is None:
return
for image_rgb in images_list:
adjusted_jax = jax_fn(image_rgb)
adjusted_tf = tf_fn(image_rgb).numpy()
self.assertAllCloseTolerant(adjusted_jax, adjusted_tf)
class TestVmap(_ImageAugmentationTest):
def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range):
del tf_fn # unused.
assert len(kw_range) == 1
kw_name, (random_min, random_max) = list(kw_range.items())[0]
arguments = [
np.random.uniform(random_min, random_max, size=()) for _ in images_list
]
fn_vmap = jax.vmap(jax_fn)
outputs_vmaped = list(
fn_vmap(np.stack(images_list, axis=0), np.stack(arguments, axis=0)))
assert len(images_list) == len(outputs_vmaped)
assert len(images_list) == len(arguments)
for image_rgb, argument, adjusted_vmap in zip(images_list, arguments,
outputs_vmaped):
adjusted_jax = jax_fn(image_rgb, **{kw_name: argument})
self.assertAllCloseTolerant(adjusted_jax, adjusted_vmap)
def _test_fn(self, images_list, jax_fn, tf_fn):
del tf_fn # unused.
fn_vmap = jax.vmap(jax_fn)
outputs_vmaped = list(fn_vmap(np.stack(images_list, axis=0)))
assert len(images_list) == len(outputs_vmaped)
for image_rgb, adjusted_vmap in zip(images_list, outputs_vmaped):
adjusted_jax = jax_fn(image_rgb)
self.assertAllCloseTolerant(adjusted_jax, adjusted_vmap)
class TestJit(_ImageAugmentationTest):
def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range):
del tf_fn # unused.
assert len(kw_range) == 1
kw_name, (random_min, random_max) = list(kw_range.items())[0]
jax_fn_jitted = jax.jit(jax_fn)
for image_rgb in images_list:
argument = np.random.uniform(random_min, random_max, size=())
adjusted_jax = jax_fn(image_rgb, argument)
adjusted_jit = jax_fn_jitted(image_rgb, **{kw_name: argument})
self.assertAllCloseTolerant(adjusted_jax, adjusted_jit)
def _test_fn(self, images_list, jax_fn, tf_fn):
del tf_fn # unused.
jax_fn_jitted = jax.jit(jax_fn)
for image_rgb in images_list:
adjusted_jax = jax_fn(image_rgb)
adjusted_jit = jax_fn_jitted(image_rgb)
self.assertAllCloseTolerant(adjusted_jax, adjusted_jit)
if __name__ == "__main__":
absltest.main()
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_pix._src.augment."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from dm_pix._src import augment
import jax
import numpy as np
import tensorflow as tf
_IMG_SHAPE = (131, 111, 3)
_RAND_FLOATS_IN_RANGE = list(
np.random.uniform(0., 1., size=(10,) + _IMG_SHAPE).astype(np.float32))
_RAND_FLOATS_OUT_OF_RANGE = list(
np.random.uniform(-0.5, 1.5, size=(10,) + _IMG_SHAPE).astype(np.float32))
_KERNEL_SIZE = _IMG_SHAPE[0] / 10.
class _ImageAugmentationTest(parameterized.TestCase):
"""Runs tests for the various augments with the correct arguments."""
def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range):
pass
def _test_fn(self, images_list, jax_fn, tf_fn):
pass
def assertAllCloseTolerant(self, x, y):
# Increase tolerance on TPU due to lower precision.
tol = 1e-2 if jax.local_devices()[0].platform == "tpu" else 1e-4
np.testing.assert_allclose(x, y, rtol=tol, atol=tol)
self.assertEqual(x.dtype, y.dtype)
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_adjust_brightness(self, images_list):
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_brightness,
tf_fn=tf.image.adjust_brightness,
delta=(-0.5, 0.5))
key = jax.random.PRNGKey(0)
self._test_fn_with_random_arg(
images_list,
jax_fn=functools.partial(augment.random_brightness, key),
tf_fn=None,
max_delta=(0, 0.5))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_adjust_contrast(self, images_list):
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_contrast,
tf_fn=tf.image.adjust_contrast,
factor=(0.5, 1.5))
key = jax.random.PRNGKey(0)
self._test_fn_with_random_arg(
images_list,
jax_fn=functools.partial(augment.random_contrast, key, upper=1),
tf_fn=None,
lower=(0, 0.9))
# Doesn't make sense outside of [0, 1].
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE))
def test_adjust_gamma(self, images_list):
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_gamma,
tf_fn=tf.image.adjust_gamma,
gamma=(0.5, 1.5))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_adjust_saturation(self, images_list):
# tf.image.adjust_saturation has a buggy implementation when the green and
# blue channels have very close values that don't match the red channel.
# This is due to a rounding error in http://shortn/_ETSJsEwUj5
# if (g - b) < 0 but small enough that (hh + 1) == 1.
# Eg: tf.image.adjust_saturation([[[0.75, 0.0369078, 0.0369079]]], 1.0)
# -> [[[0.03690779, 0.03690779, 0.03690779]]]
# Perturb the inputs slightly so that this doesn't happen.
def perturb(rgb):
rgb_new = np.copy(rgb)
rgb_new[..., 1] += 0.001 * (np.abs(rgb[..., 2] - rgb[..., 1]) < 1e-3)
return rgb_new
images_list = list(map(perturb, images_list))
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_saturation,
tf_fn=tf.image.adjust_saturation,
factor=(0.5, 1.5))
key = jax.random.PRNGKey(0)
self._test_fn_with_random_arg(
images_list,
jax_fn=functools.partial(augment.random_saturation, key, upper=1),
tf_fn=None,
lower=(0, 0.9))
# CPU TF uses a different hue adjustment method outside of the [0, 1] range.
# Disable out-of-range tests.
@parameterized.named_parameters(
("in_range", _RAND_FLOATS_IN_RANGE),)
def test_adjust_hue(self, images_list):
self._test_fn_with_random_arg(
images_list,
jax_fn=augment.adjust_hue,
tf_fn=tf.image.adjust_hue,
delta=(-0.5, 0.5))
key = jax.random.PRNGKey(0)
self._test_fn_with_random_arg(
images_list,
jax_fn=functools.partial(augment.random_hue, key),
tf_fn=None,
max_delta=(0, 0.5))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_rot90(self, images_list):
self._test_fn(
images_list,
jax_fn=lambda img: augment.rot90(img, k=1),
tf_fn=lambda img: tf.image.rot90(img, k=1))
self._test_fn(
images_list,
jax_fn=lambda img: augment.rot90(img, k=2),
tf_fn=lambda img: tf.image.rot90(img, k=2))
self._test_fn(
images_list,
jax_fn=lambda img: augment.rot90(img, k=3),
tf_fn=lambda img: tf.image.rot90(img, k=3))
# The functions below don't have a TF equivalent to compare to, we just check
# that they run.
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_flip(self, images_list):
self._test_fn(
images_list,
jax_fn=augment.flip_left_right,
tf_fn=tf.image.flip_left_right)
self._test_fn(
images_list, jax_fn=augment.flip_up_down, tf_fn=tf.image.flip_up_down)
key = jax.random.PRNGKey(0)
self._test_fn(
images_list,
jax_fn=functools.partial(augment.random_flip_left_right, key),
tf_fn=None)
self._test_fn(
images_list,
jax_fn=functools.partial(augment.random_flip_up_down, key),
tf_fn=None)
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_solarize(self, images_list):
self._test_fn_with_random_arg(
images_list, jax_fn=augment.solarize, tf_fn=None, threshold=(0., 1.))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_gaussian_blur(self, images_list):
blur_fn = functools.partial(augment.gaussian_blur, kernel_size=_KERNEL_SIZE)
self._test_fn_with_random_arg(
images_list, jax_fn=blur_fn, tf_fn=None, sigma=(0.1, 2.0))
@parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE),
("out_of_range", _RAND_FLOATS_OUT_OF_RANGE))
def test_random_crop(self, images_list):
key = jax.random.PRNGKey(43)
crop_fn = lambda img: augment.random_crop(key, img, (100, 100, 3))
self._test_fn(images_list, jax_fn=crop_fn, tf_fn=None)
class TestMatchTensorflow(_ImageAugmentationTest):
def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range):
if tf_fn is None:
return
assert len(kw_range) == 1
kw_name, (random_min, random_max) = list(kw_range.items())[0]
for image_rgb in images_list:
argument = np.random.uniform(random_min, random_max, size=())
adjusted_jax = jax_fn(image_rgb, **{kw_name: argument})
adjusted_tf = tf_fn(image_rgb, argument).numpy()
self.assertAllCloseTolerant(adjusted_jax, adjusted_tf)
def _test_fn(self, images_list, jax_fn, tf_fn):
if tf_fn is None:
return
for image_rgb in images_list:
adjusted_jax = jax_fn(image_rgb)
adjusted_tf = tf_fn(image_rgb).numpy()
self.assertAllCloseTolerant(adjusted_jax, adjusted_tf)
class TestVmap(_ImageAugmentationTest):
def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range):
del tf_fn # unused.
assert len(kw_range) == 1
kw_name, (random_min, random_max) = list(kw_range.items())[0]
arguments = [
np.random.uniform(random_min, random_max, size=()) for _ in images_list
]
fn_vmap = jax.vmap(jax_fn)
outputs_vmaped = list(
fn_vmap(np.stack(images_list, axis=0), np.stack(arguments, axis=0)))
assert len(images_list) == len(outputs_vmaped)
assert len(images_list) == len(arguments)
for image_rgb, argument, adjusted_vmap in zip(images_list, arguments,
outputs_vmaped):
adjusted_jax = jax_fn(image_rgb, **{kw_name: argument})
self.assertAllCloseTolerant(adjusted_jax, adjusted_vmap)
def _test_fn(self, images_list, jax_fn, tf_fn):
del tf_fn # unused.
fn_vmap = jax.vmap(jax_fn)
outputs_vmaped = list(fn_vmap(np.stack(images_list, axis=0)))
assert len(images_list) == len(outputs_vmaped)
for image_rgb, adjusted_vmap in zip(images_list, outputs_vmaped):
adjusted_jax = jax_fn(image_rgb)
self.assertAllCloseTolerant(adjusted_jax, adjusted_vmap)
class TestJit(_ImageAugmentationTest):
def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range):
del tf_fn # unused.
assert len(kw_range) == 1
kw_name, (random_min, random_max) = list(kw_range.items())[0]
jax_fn_jitted = jax.jit(jax_fn)
for image_rgb in images_list:
argument = np.random.uniform(random_min, random_max, size=())
adjusted_jax = jax_fn(image_rgb, argument)
adjusted_jit = jax_fn_jitted(image_rgb, **{kw_name: argument})
self.assertAllCloseTolerant(adjusted_jax, adjusted_jit)
def _test_fn(self, images_list, jax_fn, tf_fn):
del tf_fn # unused.
jax_fn_jitted = jax.jit(jax_fn)
for image_rgb in images_list:
adjusted_jax = jax_fn(image_rgb)
adjusted_jit = jax_fn_jitted(image_rgb)
self.assertAllCloseTolerant(adjusted_jax, adjusted_jit)
if __name__ == "__main__":
absltest.main()
|
en
| 0.841524
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for dm_pix._src.augment. Runs tests for the various augments with the correct arguments. # Increase tolerance on TPU due to lower precision. # Doesn't make sense outside of [0, 1]. # tf.image.adjust_saturation has a buggy implementation when the green and # blue channels have very close values that don't match the red channel. # This is due to a rounding error in http://shortn/_ETSJsEwUj5 # if (g - b) < 0 but small enough that (hh + 1) == 1. # Eg: tf.image.adjust_saturation([[[0.75, 0.0369078, 0.0369079]]], 1.0) # -> [[[0.03690779, 0.03690779, 0.03690779]]] # Perturb the inputs slightly so that this doesn't happen. # CPU TF uses a different hue adjustment method outside of the [0, 1] range. # Disable out-of-range tests. # The functions below don't have a TF equivalent to compare to, we just check # that they run. # unused. # unused. # unused. # unused.
| 2.050374
| 2
|
mc-sema/protobuf-2.5.0/python/google/protobuf/descriptor_database.py
|
randolphwong/mcsema
| 252
|
6626240
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a container for DescriptorProtos."""
__author__ = '<EMAIL> (<NAME>)'
class DescriptorDatabase(object):
"""A container accepting FileDescriptorProtos and maps DescriptorProtos."""
def __init__(self):
self._file_desc_protos_by_file = {}
self._file_desc_protos_by_symbol = {}
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
"""
self._file_desc_protos_by_file[file_desc_proto.name] = file_desc_proto
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(
(name, file_desc_proto) for name in _ExtractSymbols(message, package))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol[
'.'.join((package, enum.name))] = file_desc_proto
def FindFileByName(self, name):
"""Finds the file descriptor proto by file name.
Typically the file name is a relative path ending to a .proto file. The
proto with the given name will have to have been added to this database
using the Add method or else an error will be raised.
Args:
name: The file name to find.
Returns:
The file descriptor proto matching the name.
Raises:
KeyError if no file by the given name was added.
"""
return self._file_desc_protos_by_file[name]
def FindFileContainingSymbol(self, symbol):
"""Finds the file descriptor proto containing the specified symbol.
The symbol should be a fully qualified name including the file descriptor's
package and any containing messages. Some examples:
'some.package.name.Message'
'some.package.name.Message.NestedEnum'
The file descriptor proto containing the specified symbol must be added to
this database using the Add method or else an error will be raised.
Args:
symbol: The fully qualified symbol name.
Returns:
The file descriptor proto containing the symbol.
Raises:
KeyError if no file contains the specified symbol.
"""
return self._file_desc_protos_by_symbol[symbol]
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = '.'.join((package, desc_proto.name))
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name))
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a container for DescriptorProtos."""
__author__ = '<EMAIL> (<NAME>)'
class DescriptorDatabase(object):
"""A container accepting FileDescriptorProtos and maps DescriptorProtos."""
def __init__(self):
self._file_desc_protos_by_file = {}
self._file_desc_protos_by_symbol = {}
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
"""
self._file_desc_protos_by_file[file_desc_proto.name] = file_desc_proto
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(
(name, file_desc_proto) for name in _ExtractSymbols(message, package))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol[
'.'.join((package, enum.name))] = file_desc_proto
def FindFileByName(self, name):
"""Finds the file descriptor proto by file name.
Typically the file name is a relative path ending to a .proto file. The
proto with the given name will have to have been added to this database
using the Add method or else an error will be raised.
Args:
name: The file name to find.
Returns:
The file descriptor proto matching the name.
Raises:
KeyError if no file by the given name was added.
"""
return self._file_desc_protos_by_file[name]
def FindFileContainingSymbol(self, symbol):
"""Finds the file descriptor proto containing the specified symbol.
The symbol should be a fully qualified name including the file descriptor's
package and any containing messages. Some examples:
'some.package.name.Message'
'some.package.name.Message.NestedEnum'
The file descriptor proto containing the specified symbol must be added to
this database using the Add method or else an error will be raised.
Args:
symbol: The fully qualified symbol name.
Returns:
The file descriptor proto containing the symbol.
Raises:
KeyError if no file contains the specified symbol.
"""
return self._file_desc_protos_by_symbol[symbol]
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = '.'.join((package, desc_proto.name))
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name))
|
en
| 0.704856
|
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Provides a container for DescriptorProtos. A container accepting FileDescriptorProtos and maps DescriptorProtos. Adds the FileDescriptorProto and its types to this database. Args: file_desc_proto: The FileDescriptorProto to add. Finds the file descriptor proto by file name. Typically the file name is a relative path ending to a .proto file. The proto with the given name will have to have been added to this database using the Add method or else an error will be raised. Args: name: The file name to find. Returns: The file descriptor proto matching the name. Raises: KeyError if no file by the given name was added. Finds the file descriptor proto containing the specified symbol. The symbol should be a fully qualified name including the file descriptor's package and any containing messages. Some examples: 'some.package.name.Message' 'some.package.name.Message.NestedEnum' The file descriptor proto containing the specified symbol must be added to this database using the Add method or else an error will be raised. Args: symbol: The fully qualified symbol name. Returns: The file descriptor proto containing the symbol. Raises: KeyError if no file contains the specified symbol. Pulls out all the symbols from a descriptor proto. Args: desc_proto: The proto to extract symbols from. package: The package containing the descriptor type. Yields: The fully qualified name found in the descriptor.
| 1.573636
| 2
|
gipsy/dashboard/__init__.py
|
marwahaha/gipsy-1
| 10
|
6626241
|
<filename>gipsy/dashboard/__init__.py
default_app_config = "gipsy.dashboard.apps.DefaultAppConfig"
|
<filename>gipsy/dashboard/__init__.py
default_app_config = "gipsy.dashboard.apps.DefaultAppConfig"
|
none
| 1
| 1.176349
| 1
|
|
measure_mate/migrations/0037_auto_20161211_2345.py
|
niche-tester/measure-mate
| 15
|
6626242
|
<reponame>niche-tester/measure-mate<gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-11 23:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('measure_mate', '0036_auto_20161208_0108'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='enabled',
field=models.BooleanField(db_index=True, default=1),
),
migrations.AlterField(
model_name='attribute',
name='rank',
field=models.IntegerField(db_index=True, default=1),
),
migrations.AlterField(
model_name='rating',
name='rank',
field=models.IntegerField(db_index=True, default=1),
),
migrations.AlterField(
model_name='team',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='template',
name='enabled',
field=models.BooleanField(db_index=True, default=1),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-11 23:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('measure_mate', '0036_auto_20161208_0108'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='enabled',
field=models.BooleanField(db_index=True, default=1),
),
migrations.AlterField(
model_name='attribute',
name='rank',
field=models.IntegerField(db_index=True, default=1),
),
migrations.AlterField(
model_name='rating',
name='rank',
field=models.IntegerField(db_index=True, default=1),
),
migrations.AlterField(
model_name='team',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='template',
name='enabled',
field=models.BooleanField(db_index=True, default=1),
),
]
|
en
| 0.813481
|
# -*- coding: utf-8 -*- # Generated by Django 1.10.4 on 2016-12-11 23:45
| 1.567537
| 2
|
inflation_forecasting/config.py
|
fornasari12/time-series-forecasting
| 4
|
6626243
|
import logging
import yaml
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(message)s",
datefmt="%Y-%m-%d,%H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def load_config(path):
with open(path) as f:
config = yaml.load(f, Loader=yaml.Loader)
return config
|
import logging
import yaml
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(message)s",
datefmt="%Y-%m-%d,%H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def load_config(path):
with open(path) as f:
config = yaml.load(f, Loader=yaml.Loader)
return config
|
none
| 1
| 2.552885
| 3
|
|
run.py
|
Kystalll/ProxyPool
| 0
|
6626244
|
from ProxyPool.proxypool.scheduler import Scheduler
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
def main():
try:
s = Scheduler()
s.run()
except:
main()
if __name__ == '__main__':
main()
|
from ProxyPool.proxypool.scheduler import Scheduler
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
def main():
try:
s = Scheduler()
s.run()
except:
main()
if __name__ == '__main__':
main()
|
none
| 1
| 1.96078
| 2
|
|
trio_guest_win32.py
|
richardsheridan/trio-guest
| 10
|
6626245
|
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import traceback
import trio
import win32api
import win32con
import win32gui
import win32ui
from outcome import Error
from pywin.mfc import dialog
import example_tasks
TRIO_MSG = win32con.WM_APP + 3
trio_functions = collections.deque()
# @cffi.def_extern() # if your mainloop is in C/C++
def do_trio():
trio_functions.popleft()()
class Win32Host:
def __init__(self, display):
self.display = display
self.mainthreadid = win32api.GetCurrentThreadId()
# create event queue with null op
win32gui.PeekMessage(
win32con.NULL, win32con.WM_USER, win32con.WM_USER, win32con.PM_NOREMOVE
)
def run_sync_soon_threadsafe(self, func):
"""Use use PostThreadMessage to schedule a callback
https://docs.microsoft.com/en-us/windows/win32/winmsg/about-messages-and-message-queues
"""
win32api.PostThreadMessage(self.mainthreadid, TRIO_MSG, win32con.NULL, win32con.NULL)
trio_functions.append(func)
def run_sync_soon_not_threadsafe(self, func):
"""Use use PostMessage to schedule a callback
https://docs.microsoft.com/en-us/windows/win32/winmsg/about-messages-and-message-queues
This doesn't provide any real efficiency over threadsafe.
"""
win32api.PostMessage(win32con.NULL, TRIO_MSG, win32con.NULL, win32con.NULL)
trio_functions.append(func)
def done_callback(self, outcome):
"""non-blocking request to end the main loop
"""
print(f"Outcome: {outcome}")
if isinstance(outcome, Error):
exc = outcome.error
traceback.print_exception(type(exc), exc, exc.__traceback__)
exitcode = 1
else:
exitcode = 0
self.display.dialog.PostMessage(win32con.WM_CLOSE, 0, 0)
self.display.dialog.close()
win32gui.PostQuitMessage(exitcode)
def mainloop(self):
while True:
code, msg = win32gui.GetMessage(0, 0, 0)
if not code:
break
if code < 0:
error = win32api.GetLastError()
raise RuntimeError(error)
#######################################
### Trio specific part of main loop ###
#######################################
hwnd, msgid, lparam, wparam, time, point = msg
if hwnd == win32con.NULL and msgid == TRIO_MSG:
do_trio()
continue
###############################
### Trio specific part ends ###
###############################
win32gui.TranslateMessage(msg)
win32gui.DispatchMessage(msg)
def MakeDlgTemplate():
style = (
win32con.DS_MODALFRAME
| win32con.WS_POPUP
| win32con.WS_VISIBLE
| win32con.WS_CAPTION
| win32con.WS_SYSMENU
| win32con.DS_SETFONT
)
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
w = 300
h = 21
dlg = [
["...", (0, 0, w, h), style, None, (8, "MS Sans Serif")],
]
s = win32con.WS_TABSTOP | cs
dlg.append(
[128, "Cancel", win32con.IDCANCEL, (w - 60, h - 18, 50, 14), s | win32con.BS_PUSHBUTTON]
)
return dlg
class PBarDialog(dialog.Dialog):
def OnInitDialog(self):
code = super().OnInitDialog()
self.pbar = win32ui.CreateProgressCtrl()
self.pbar.CreateWindow(
win32con.WS_CHILD | win32con.WS_VISIBLE, (10, 10, 310, 24), self, 3000
)
return code
def OnCancel(self):
# also window close response
self.cancelfn()
class Win32Display:
def __init__(self):
self.dialog = PBarDialog(MakeDlgTemplate())
self.dialog.CreateWindow()
# self.display.DoModal()
def set_title(self, title):
self.dialog.SetWindowText(title)
def set_max(self, maximum):
# hack around uint16 issue
self.realmax = maximum
self.dialog.pbar.SetRange(0, 65535)
def set_value(self, downloaded):
self.dialog.pbar.SetPos(int((downloaded / self.realmax * 65535)))
def set_cancel(self, fn):
self.dialog.cancelfn = fn
def main(task):
display = Win32Display()
host = Win32Host(display)
trio.lowlevel.start_guest_run(
task,
display,
run_sync_soon_threadsafe=host.run_sync_soon_threadsafe,
run_sync_soon_not_threadsafe=host.run_sync_soon_not_threadsafe,
done_callback=host.done_callback,
)
host.mainloop()
if __name__ == "__main__":
print("Known bug: Dragging the window freezes everything.")
print("For now only click buttons!")
main(example_tasks.count)
|
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import traceback
import trio
import win32api
import win32con
import win32gui
import win32ui
from outcome import Error
from pywin.mfc import dialog
import example_tasks
TRIO_MSG = win32con.WM_APP + 3
trio_functions = collections.deque()
# @cffi.def_extern() # if your mainloop is in C/C++
def do_trio():
trio_functions.popleft()()
class Win32Host:
def __init__(self, display):
self.display = display
self.mainthreadid = win32api.GetCurrentThreadId()
# create event queue with null op
win32gui.PeekMessage(
win32con.NULL, win32con.WM_USER, win32con.WM_USER, win32con.PM_NOREMOVE
)
def run_sync_soon_threadsafe(self, func):
"""Use use PostThreadMessage to schedule a callback
https://docs.microsoft.com/en-us/windows/win32/winmsg/about-messages-and-message-queues
"""
win32api.PostThreadMessage(self.mainthreadid, TRIO_MSG, win32con.NULL, win32con.NULL)
trio_functions.append(func)
def run_sync_soon_not_threadsafe(self, func):
"""Use use PostMessage to schedule a callback
https://docs.microsoft.com/en-us/windows/win32/winmsg/about-messages-and-message-queues
This doesn't provide any real efficiency over threadsafe.
"""
win32api.PostMessage(win32con.NULL, TRIO_MSG, win32con.NULL, win32con.NULL)
trio_functions.append(func)
def done_callback(self, outcome):
"""non-blocking request to end the main loop
"""
print(f"Outcome: {outcome}")
if isinstance(outcome, Error):
exc = outcome.error
traceback.print_exception(type(exc), exc, exc.__traceback__)
exitcode = 1
else:
exitcode = 0
self.display.dialog.PostMessage(win32con.WM_CLOSE, 0, 0)
self.display.dialog.close()
win32gui.PostQuitMessage(exitcode)
def mainloop(self):
while True:
code, msg = win32gui.GetMessage(0, 0, 0)
if not code:
break
if code < 0:
error = win32api.GetLastError()
raise RuntimeError(error)
#######################################
### Trio specific part of main loop ###
#######################################
hwnd, msgid, lparam, wparam, time, point = msg
if hwnd == win32con.NULL and msgid == TRIO_MSG:
do_trio()
continue
###############################
### Trio specific part ends ###
###############################
win32gui.TranslateMessage(msg)
win32gui.DispatchMessage(msg)
def MakeDlgTemplate():
style = (
win32con.DS_MODALFRAME
| win32con.WS_POPUP
| win32con.WS_VISIBLE
| win32con.WS_CAPTION
| win32con.WS_SYSMENU
| win32con.DS_SETFONT
)
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
w = 300
h = 21
dlg = [
["...", (0, 0, w, h), style, None, (8, "MS Sans Serif")],
]
s = win32con.WS_TABSTOP | cs
dlg.append(
[128, "Cancel", win32con.IDCANCEL, (w - 60, h - 18, 50, 14), s | win32con.BS_PUSHBUTTON]
)
return dlg
class PBarDialog(dialog.Dialog):
def OnInitDialog(self):
code = super().OnInitDialog()
self.pbar = win32ui.CreateProgressCtrl()
self.pbar.CreateWindow(
win32con.WS_CHILD | win32con.WS_VISIBLE, (10, 10, 310, 24), self, 3000
)
return code
def OnCancel(self):
# also window close response
self.cancelfn()
class Win32Display:
def __init__(self):
self.dialog = PBarDialog(MakeDlgTemplate())
self.dialog.CreateWindow()
# self.display.DoModal()
def set_title(self, title):
self.dialog.SetWindowText(title)
def set_max(self, maximum):
# hack around uint16 issue
self.realmax = maximum
self.dialog.pbar.SetRange(0, 65535)
def set_value(self, downloaded):
self.dialog.pbar.SetPos(int((downloaded / self.realmax * 65535)))
def set_cancel(self, fn):
self.dialog.cancelfn = fn
def main(task):
display = Win32Display()
host = Win32Host(display)
trio.lowlevel.start_guest_run(
task,
display,
run_sync_soon_threadsafe=host.run_sync_soon_threadsafe,
run_sync_soon_not_threadsafe=host.run_sync_soon_not_threadsafe,
done_callback=host.done_callback,
)
host.mainloop()
if __name__ == "__main__":
print("Known bug: Dragging the window freezes everything.")
print("For now only click buttons!")
main(example_tasks.count)
|
en
| 0.612896
|
# # Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @cffi.def_extern() # if your mainloop is in C/C++ # create event queue with null op Use use PostThreadMessage to schedule a callback https://docs.microsoft.com/en-us/windows/win32/winmsg/about-messages-and-message-queues Use use PostMessage to schedule a callback https://docs.microsoft.com/en-us/windows/win32/winmsg/about-messages-and-message-queues This doesn't provide any real efficiency over threadsafe. non-blocking request to end the main loop ####################################### ### Trio specific part of main loop ### ####################################### ############################### ### Trio specific part ends ### ############################### # also window close response # self.display.DoModal() # hack around uint16 issue
| 1.839124
| 2
|
server/misc/ocr.py
|
SarthakRout/gradeLess
| 0
|
6626246
|
<filename>server/misc/ocr.py
import sys
imgpath = sys.argv[1]
fin = open('coord.txt', 'r')
lines_list = fin.readlines()
for line in lines_list:
coord = line.split(' ')
xref11 = coord[0]
yref11 = coord[1]
xref21 = coord[2]
yref21 = coord[3]
print('xref11:' + xref11)
print('yref11:' + yref11)
print('xref21:' + xref21)
print('yref21:' + yref21)
fin.close()
|
<filename>server/misc/ocr.py
import sys
imgpath = sys.argv[1]
fin = open('coord.txt', 'r')
lines_list = fin.readlines()
for line in lines_list:
coord = line.split(' ')
xref11 = coord[0]
yref11 = coord[1]
xref21 = coord[2]
yref21 = coord[3]
print('xref11:' + xref11)
print('yref11:' + yref11)
print('xref21:' + xref21)
print('yref21:' + yref21)
fin.close()
|
none
| 1
| 2.821782
| 3
|
|
library/binance/bsswap.py
|
danyanyam/ftx
| 2
|
6626247
|
<reponame>danyanyam/ftx
from base import BaseApiClass
import datetime as dt
# TODO Enums, Raises
class BSSwap(BaseApiClass):
"""https://binance-docs.github.io/apidocs/spot/en/#margin-account-trade"""
def __init__(self, api_key: str, secret_key: str):
super().__init__(api_key, secret_key)
def list_all_swap_pools(self,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#blvt-nav-kline-candlestick-streams"""
return self.get('/sapi/v1/bswap/pools')
def get_liquidity_information_of_a_pool(self,
poolId: str = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#get-liquidity-information-of-a-pool-user_data"""
return self.get('/sapi/v1/bswap/liquidity',
poolId=poolId,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def add_liquidity(self,
poolId: str = None,
asset: str = None,
quantity: float = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#add-liquidity-trade"""
return self.post('/sapi/v1/bswap/liquidityAdd',
poolId=poolId,
asset=asset,
quantity=quantity,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def remove_liquidity(self,
poolId: str = None,
type: str = None,
asset: str = None,
shareAmount: float = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#remove-liquidity-trade"""
return self.post('/sapi/v1/bswap/liquidityRemove',
poolId=poolId,
type=type,
asset=asset,
shareAmount=shareAmount,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def get_liquidity_operation_record(self,
operationId: str = None,
poolId: str = None,
operation: str = None,
start_time: dt.datetime = None,
end_time: dt.datetime = None,
limit: int = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#get-liquidity-operation-record-user_data"""
return self.get('/sapi/v1/bswap/liquidityOps',
operationId=operationId,
poolId=poolId,
operation=operation,
start_time=start_time,
end_time=end_time,
limit=limit,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def request_quote(self,
quoteAsset: str = None,
baseAsset: str = None,
quoteQty: float = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#request-quote-user_data"""
return self.get('/sapi/v1/bswap/quote',
quoteAsset=quoteAsset,
baseAsset=baseAsset,
quoteQty=quoteQty,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def swap(self,
quoteAsset: str = None,
baseAsset: str = None,
quoteQty: float = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#swap-trade"""
return self.post('/sapi/v1/bswap/swap',
quoteAsset=quoteAsset,
baseAsset=baseAsset,
quoteQty=quoteQty,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def get_swap_history(self,
swapId: int = None,
start_time: dt.datetime = None,
end_time: dt.datetime = None,
status: int = None,
quoteAsset: str = None,
baseAsset: str = None,
limit: int = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#get-swap-history-user_data"""
return self.get('/sapi/v1/bswap/swap',
swapId=swapId,
start_time=start_time,
end_time=end_time,
status=status,
baseAsset=baseAsset,
quoteAsset=quoteAsset,
limit=limit,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
|
from base import BaseApiClass
import datetime as dt
# TODO Enums, Raises
class BSSwap(BaseApiClass):
"""https://binance-docs.github.io/apidocs/spot/en/#margin-account-trade"""
def __init__(self, api_key: str, secret_key: str):
super().__init__(api_key, secret_key)
def list_all_swap_pools(self,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#blvt-nav-kline-candlestick-streams"""
return self.get('/sapi/v1/bswap/pools')
def get_liquidity_information_of_a_pool(self,
poolId: str = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#get-liquidity-information-of-a-pool-user_data"""
return self.get('/sapi/v1/bswap/liquidity',
poolId=poolId,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def add_liquidity(self,
poolId: str = None,
asset: str = None,
quantity: float = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#add-liquidity-trade"""
return self.post('/sapi/v1/bswap/liquidityAdd',
poolId=poolId,
asset=asset,
quantity=quantity,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def remove_liquidity(self,
poolId: str = None,
type: str = None,
asset: str = None,
shareAmount: float = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#remove-liquidity-trade"""
return self.post('/sapi/v1/bswap/liquidityRemove',
poolId=poolId,
type=type,
asset=asset,
shareAmount=shareAmount,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def get_liquidity_operation_record(self,
operationId: str = None,
poolId: str = None,
operation: str = None,
start_time: dt.datetime = None,
end_time: dt.datetime = None,
limit: int = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#get-liquidity-operation-record-user_data"""
return self.get('/sapi/v1/bswap/liquidityOps',
operationId=operationId,
poolId=poolId,
operation=operation,
start_time=start_time,
end_time=end_time,
limit=limit,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def request_quote(self,
quoteAsset: str = None,
baseAsset: str = None,
quoteQty: float = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#request-quote-user_data"""
return self.get('/sapi/v1/bswap/quote',
quoteAsset=quoteAsset,
baseAsset=baseAsset,
quoteQty=quoteQty,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def swap(self,
quoteAsset: str = None,
baseAsset: str = None,
quoteQty: float = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#swap-trade"""
return self.post('/sapi/v1/bswap/swap',
quoteAsset=quoteAsset,
baseAsset=baseAsset,
quoteQty=quoteQty,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
def get_swap_history(self,
swapId: int = None,
start_time: dt.datetime = None,
end_time: dt.datetime = None,
status: int = None,
quoteAsset: str = None,
baseAsset: str = None,
limit: int = None,
recvWindow: int = None,
time_req: bool = True,
sign: bool = True):
"""https://binance-docs.github.io/apidocs/spot/en/#get-swap-history-user_data"""
return self.get('/sapi/v1/bswap/swap',
swapId=swapId,
start_time=start_time,
end_time=end_time,
status=status,
baseAsset=baseAsset,
quoteAsset=quoteAsset,
limit=limit,
recvWindow=recvWindow,
time_req=time_req,
sign=sign)
|
en
| 0.602611
|
# TODO Enums, Raises https://binance-docs.github.io/apidocs/spot/en/#margin-account-trade https://binance-docs.github.io/apidocs/spot/en/#blvt-nav-kline-candlestick-streams https://binance-docs.github.io/apidocs/spot/en/#get-liquidity-information-of-a-pool-user_data https://binance-docs.github.io/apidocs/spot/en/#add-liquidity-trade https://binance-docs.github.io/apidocs/spot/en/#remove-liquidity-trade https://binance-docs.github.io/apidocs/spot/en/#get-liquidity-operation-record-user_data https://binance-docs.github.io/apidocs/spot/en/#request-quote-user_data https://binance-docs.github.io/apidocs/spot/en/#swap-trade https://binance-docs.github.io/apidocs/spot/en/#get-swap-history-user_data
| 2.4446
| 2
|
ops.py
|
yahsieh37/Visual-Saliency-Prediction
| 0
|
6626248
|
import tensorflow as tf
import tensorflow.contrib as tf_contrib
import numpy as np
# Xavier : tf_contrib.layers.xavier_initializer()
# He : tf_contrib.layers.variance_scaling_initializer()
# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)
# l2_decay : tf_contrib.layers.l2_regularizer(0.0001)
weight_init = tf_contrib.layers.xavier_initializer()
weight_regularizer = None
weight_regularizer_fully = None
##################################################################################
# Layer
##################################################################################
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad > 0:
h = x.get_shape().as_list()[1]
if h % stride == 0:
pad = pad * 2
else:
pad = max(kernel - (h % stride), 0)
pad_top = pad // 2
pad_bottom = pad - pad_top
pad_left = pad // 2
pad_right = pad - pad_left
if pad_type == 'zero':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]])
if pad_type == 'reflect':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], mode='REFLECT')
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding='VALID')
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
#x = tf.layers.conv2d(x, channels,
# kernel, kernel_initializer=weight_init,
# kernel_regularizer=weight_regularizer,
# strides=stride, use_bias=use_bias)
x = tf.layers.conv2d(x, channels, kernel)
return x
def deconv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, sn=False, scope='deconv_0'):
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
if padding == 'SAME':
output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, channels]
else:
output_shape = [x_shape[0], x_shape[1] * stride + max(kernel - stride, 0),
x_shape[2] * stride + max(kernel - stride, 0), channels]
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, channels, x.get_shape()[-1]], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w), output_shape=output_shape,
strides=[1, stride, stride, 1], padding=padding)
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d_transpose(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, padding=padding, use_bias=use_bias)
return x
def fully_connected(x, units, use_bias=True, sn=False, scope='linear'):
with tf.variable_scope(scope):
x = flatten(x)
shape = x.get_shape().as_list()
channels = shape[-1]
if sn:
w = tf.get_variable("kernel", [channels, units], tf.float32,
initializer=weight_init, regularizer=weight_regularizer_fully)
if use_bias:
bias = tf.get_variable("bias", [units],
initializer=tf.constant_initializer(0.0))
x = tf.matmul(x, spectral_norm(w)) + bias
else:
x = tf.matmul(x, spectral_norm(w))
else:
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer_fully,
use_bias=use_bias)
return x
def flatten(x) :
return tf.layers.flatten(x)
def hw_flatten(x) :
return tf.reshape(x, shape=[tf.shape(x)[0], -1, tf.shape(x)[-1]])
#return tf.reshape(x, shape=[tf.shape(x)[0], -1, x.shape[-1]])
def max_pooling(x) :
return tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='SAME')
|
import tensorflow as tf
import tensorflow.contrib as tf_contrib
import numpy as np
# Xavier : tf_contrib.layers.xavier_initializer()
# He : tf_contrib.layers.variance_scaling_initializer()
# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)
# l2_decay : tf_contrib.layers.l2_regularizer(0.0001)
weight_init = tf_contrib.layers.xavier_initializer()
weight_regularizer = None
weight_regularizer_fully = None
##################################################################################
# Layer
##################################################################################
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad > 0:
h = x.get_shape().as_list()[1]
if h % stride == 0:
pad = pad * 2
else:
pad = max(kernel - (h % stride), 0)
pad_top = pad // 2
pad_bottom = pad - pad_top
pad_left = pad // 2
pad_right = pad - pad_left
if pad_type == 'zero':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]])
if pad_type == 'reflect':
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], mode='REFLECT')
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding='VALID')
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
#x = tf.layers.conv2d(x, channels,
# kernel, kernel_initializer=weight_init,
# kernel_regularizer=weight_regularizer,
# strides=stride, use_bias=use_bias)
x = tf.layers.conv2d(x, channels, kernel)
return x
def deconv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, sn=False, scope='deconv_0'):
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
if padding == 'SAME':
output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, channels]
else:
output_shape = [x_shape[0], x_shape[1] * stride + max(kernel - stride, 0),
x_shape[2] * stride + max(kernel - stride, 0), channels]
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, channels, x.get_shape()[-1]], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w), output_shape=output_shape,
strides=[1, stride, stride, 1], padding=padding)
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d_transpose(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, padding=padding, use_bias=use_bias)
return x
def fully_connected(x, units, use_bias=True, sn=False, scope='linear'):
with tf.variable_scope(scope):
x = flatten(x)
shape = x.get_shape().as_list()
channels = shape[-1]
if sn:
w = tf.get_variable("kernel", [channels, units], tf.float32,
initializer=weight_init, regularizer=weight_regularizer_fully)
if use_bias:
bias = tf.get_variable("bias", [units],
initializer=tf.constant_initializer(0.0))
x = tf.matmul(x, spectral_norm(w)) + bias
else:
x = tf.matmul(x, spectral_norm(w))
else:
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer_fully,
use_bias=use_bias)
return x
def flatten(x) :
return tf.layers.flatten(x)
def hw_flatten(x) :
return tf.reshape(x, shape=[tf.shape(x)[0], -1, tf.shape(x)[-1]])
#return tf.reshape(x, shape=[tf.shape(x)[0], -1, x.shape[-1]])
def max_pooling(x) :
return tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='SAME')
|
de
| 0.207595
|
# Xavier : tf_contrib.layers.xavier_initializer() # He : tf_contrib.layers.variance_scaling_initializer() # Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02) # l2_decay : tf_contrib.layers.l2_regularizer(0.0001) ################################################################################## # Layer ################################################################################## #x = tf.layers.conv2d(x, channels, # kernel, kernel_initializer=weight_init, # kernel_regularizer=weight_regularizer, # strides=stride, use_bias=use_bias) #return tf.reshape(x, shape=[tf.shape(x)[0], -1, x.shape[-1]])
| 2.444705
| 2
|
car/sensors/objcenter_ssd.py
|
zhijiahu/gopigo-car
| 2
|
6626249
|
from collections import deque
from collections import namedtuple
import numpy as np
import imutils
import cv2
from .sensorbase import SensorBase
class ObjCenterSSD(SensorBase):
def __init__(self, args):
super(ObjCenterSSD, self).__init__(args)
# set instance variables
self.objType = args["object"]
# initialize the list of class labels MobileNet SSD was
# trained to detect, then generate a set of bounding box
# colors for each class
self.CLASSES = ["background", "aeroplane", "bicycle", "bird",
"boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor"]
self.COLORS = np.random.uniform(0, 255,
size=(len(self.CLASSES), 3))
# check if the object type is a part of class labels
# MobileNet SDD was trained to detect
if self.objType in self.CLASSES:
# load our serialized model from disk
print("[INFO] loading model...")
self.net = cv2.dnn.readNetFromCaffe(args["prototxt"],
args["model"])
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
# otherwise, alert the user regarding this and stop the
# application
else:
print("[ERROR] object type not part of class labels" \
" model was trained on...")
exit(0)
def update_internal(self, frame):
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections
# and predictions
self.net.setInput(blob)
detections = self.net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
# with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence`
# is greater than the minimum confidence
if confidence > self.args["confidence"]:
# extract the index of the class label from the
# `detections`
idx = int(detections[0, 0, i, 1])
# filter for only object type we are interested in
if self.CLASSES[idx] != self.objType:
continue
# compute the (x, y)-coordinates of the bounding
# box for the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format(self.CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
self.COLORS[idx], 2)
# calculate the center (x, y)-coordinates and width
# of the object
objX = int((endX - startX / 2) + startX)
objY = int((endY - startY / 2) + startY)
width = endX - startX
# when the object is on the left, the robot needs to
# veer left until the next update
if objX < w // 2:
self.l_multiplier = 0.50
self.r_multiplier = 1.50
# when the object is on the right, the robot should
# veer right until the next update
elif objX > w // 2:
self.l_multiplier = 1.50
self.r_multiplier = 0.50
# otherwise, the object is in the center, so the robot
# should go straight until the next update
else:
self.l_multiplier = 0.9
self.r_multiplier = 0.9
self.motor_duration = 0
print("[INFO] Detected object")
return True
return False
|
from collections import deque
from collections import namedtuple
import numpy as np
import imutils
import cv2
from .sensorbase import SensorBase
class ObjCenterSSD(SensorBase):
def __init__(self, args):
super(ObjCenterSSD, self).__init__(args)
# set instance variables
self.objType = args["object"]
# initialize the list of class labels MobileNet SSD was
# trained to detect, then generate a set of bounding box
# colors for each class
self.CLASSES = ["background", "aeroplane", "bicycle", "bird",
"boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor"]
self.COLORS = np.random.uniform(0, 255,
size=(len(self.CLASSES), 3))
# check if the object type is a part of class labels
# MobileNet SDD was trained to detect
if self.objType in self.CLASSES:
# load our serialized model from disk
print("[INFO] loading model...")
self.net = cv2.dnn.readNetFromCaffe(args["prototxt"],
args["model"])
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
# otherwise, alert the user regarding this and stop the
# application
else:
print("[ERROR] object type not part of class labels" \
" model was trained on...")
exit(0)
def update_internal(self, frame):
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections
# and predictions
self.net.setInput(blob)
detections = self.net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
# with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence`
# is greater than the minimum confidence
if confidence > self.args["confidence"]:
# extract the index of the class label from the
# `detections`
idx = int(detections[0, 0, i, 1])
# filter for only object type we are interested in
if self.CLASSES[idx] != self.objType:
continue
# compute the (x, y)-coordinates of the bounding
# box for the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format(self.CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
self.COLORS[idx], 2)
# calculate the center (x, y)-coordinates and width
# of the object
objX = int((endX - startX / 2) + startX)
objY = int((endY - startY / 2) + startY)
width = endX - startX
# when the object is on the left, the robot needs to
# veer left until the next update
if objX < w // 2:
self.l_multiplier = 0.50
self.r_multiplier = 1.50
# when the object is on the right, the robot should
# veer right until the next update
elif objX > w // 2:
self.l_multiplier = 1.50
self.r_multiplier = 0.50
# otherwise, the object is in the center, so the robot
# should go straight until the next update
else:
self.l_multiplier = 0.9
self.r_multiplier = 0.9
self.motor_duration = 0
print("[INFO] Detected object")
return True
return False
|
en
| 0.857192
|
# set instance variables # initialize the list of class labels MobileNet SSD was # trained to detect, then generate a set of bounding box # colors for each class # check if the object type is a part of class labels # MobileNet SDD was trained to detect # load our serialized model from disk # otherwise, alert the user regarding this and stop the # application # grab the frame dimensions and convert it to a blob # pass the blob through the network and obtain the detections # and predictions # loop over the detections # extract the confidence (i.e., probability) associated # with the prediction # filter out weak detections by ensuring the `confidence` # is greater than the minimum confidence # extract the index of the class label from the # `detections` # filter for only object type we are interested in # compute the (x, y)-coordinates of the bounding # box for the object # draw the prediction on the frame # calculate the center (x, y)-coordinates and width # of the object # when the object is on the left, the robot needs to # veer left until the next update # when the object is on the right, the robot should # veer right until the next update # otherwise, the object is in the center, so the robot # should go straight until the next update
| 2.857825
| 3
|
tests/unit/modules/test_pkgutil.py
|
xiaowei582648206/saltx
| 1
|
6626250
|
<reponame>xiaowei582648206/saltx<gh_stars>1-10
# -*- coding: utf-8 -*-
'''
:codeauthor: <NAME> <<EMAIL>>
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
Mock,
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.pkgutil as pkgutil
from salt.exceptions import CommandExecutionError, MinionError
import salt.utils.pkg
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PkgutilTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.pkgutil
'''
def setup_loader_modules(self):
return {pkgutil: {}}
# 'refresh_db' function tests: 1
def test_refresh_db(self):
'''
Test if it updates the pkgutil repo database (pkgutil -U).
'''
mock = MagicMock(return_value=0)
with patch.dict(pkgutil.__salt__, {'cmd.retcode': mock}):
with patch.object(salt.utils.pkg, 'clear_rtag', Mock()):
self.assertTrue(pkgutil.refresh_db())
# 'upgrade_available' function tests: 1
def test_upgrade_available(self):
'''
Test if there is an upgrade available for a certain package.
'''
mock = MagicMock(return_value='A\n B\n SAME')
with patch.dict(pkgutil.__salt__, {'cmd.run_stdout': mock}):
self.assertEqual(pkgutil.upgrade_available('CSWpython'), '')
mock = MagicMock(side_effect=['A\n B\n SALT', None])
with patch.dict(pkgutil.__salt__, {'cmd.run_stdout': mock}):
self.assertEqual(pkgutil.upgrade_available('CSWpython'), 'SALT')
self.assertEqual(pkgutil.upgrade_available('CSWpython'), '')
# 'list_upgrades' function tests: 1
def test_list_upgrades(self):
'''
Test if it list all available package upgrades on this system.
'''
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_ret = MagicMock(return_value=0)
with patch.dict(pkgutil.__salt__, {'cmd.run_stdout': mock_run,
'cmd.retcode': mock_ret}):
with patch.object(salt.utils.pkg, 'clear_rtag', Mock()):
self.assertDictEqual(pkgutil.list_upgrades(), {'A': ' B'})
# 'upgrade' function tests: 1
def test_upgrade(self):
'''
Test if it upgrade all of the packages to the latest available version.
'''
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_ret = MagicMock(return_value=0)
mock_pkg = MagicMock(return_value='')
with patch.dict(pkgutil.__salt__,
{'cmd.run_stdout': mock_run,
'cmd.retcode': mock_ret,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_ret, 'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
with patch.object(salt.utils.pkg, 'clear_rtag', Mock()):
self.assertDictEqual(pkgutil.upgrade(), {})
# 'list_pkgs' function tests: 1
def test_list_pkgs(self):
'''
Test if it list the packages currently installed as a dict.
'''
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_ret = MagicMock(return_value=True)
mock_pkg = MagicMock(return_value='')
with patch.dict(pkgutil.__salt__,
{'cmd.run_stdout': mock_run,
'cmd.retcode': mock_ret,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
self.assertDictEqual(pkgutil.list_pkgs(versions_as_list=True,
removed=True), {})
self.assertDictEqual(pkgutil.list_pkgs(), {})
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': True}):
self.assertTrue(pkgutil.list_pkgs(versions_as_list=True))
mock_pkg = MagicMock(return_value=True)
with patch.dict(pkgutil.__salt__,
{'pkg_resource.stringify': mock_pkg}):
self.assertTrue(pkgutil.list_pkgs())
# 'version' function tests: 1
def test_version(self):
'''
Test if it returns a version if the package is installed.
'''
mock_ret = MagicMock(return_value=True)
with patch.dict(pkgutil.__salt__, {'pkg_resource.version': mock_ret}):
self.assertTrue(pkgutil.version('CSWpython'))
# 'latest_version' function tests: 1
def test_latest_version(self):
'''
Test if it return the latest version of the named package
available for upgrade or installation.
'''
self.assertEqual(pkgutil.latest_version(), '')
mock_run_all = MagicMock(return_value='A\t B\t SAME')
mock_run = MagicMock(return_value={'stdout': ''})
mock_ret = MagicMock(return_value=True)
mock_pkg = MagicMock(return_value='')
with patch.dict(pkgutil.__salt__,
{'cmd.retcode': mock_ret,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_run, 'cmd.run': mock_run_all}):
with patch.object(salt.utils.pkg, 'clear_rtag', Mock()):
self.assertEqual(pkgutil.latest_version('CSWpython'), '')
self.assertDictEqual(pkgutil.latest_version('CSWpython', 'Python'),
{'Python': '', 'CSWpython': ''})
# 'install' function tests: 1
def test_install(self):
'''
Test if it install packages using the pkgutil tool.
'''
mock_pkg = MagicMock(side_effect=MinionError)
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg}):
self.assertRaises(CommandExecutionError, pkgutil.install)
mock_ret = MagicMock(return_value=True)
mock_pkg = MagicMock(return_value=[''])
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
self.assertDictEqual(pkgutil.install(), {})
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_run_all = MagicMock(return_value={'stdout': ''})
mock_pkg = MagicMock(return_value=[{"bar": "1.2.3"}])
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_run_all, 'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
self.assertDictEqual(pkgutil.install
(pkgs='["foo", {"bar": "1.2.3"}]'), {})
# 'remove' function tests: 1
def test_remove(self):
'''
Test if it remove a package and all its dependencies
which are not in use by other packages.
'''
mock_pkg = MagicMock(side_effect=MinionError)
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg}):
self.assertRaises(CommandExecutionError, pkgutil.remove)
mock_ret = MagicMock(return_value=True)
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_run_all = MagicMock(return_value={'stdout': ''})
mock_pkg = MagicMock(return_value=[''])
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_run_all, 'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
self.assertDictEqual(pkgutil.remove(), {})
mock_pkg = MagicMock(return_value=[{"bar": "1.2.3"}])
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_run_all, 'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
with patch.object(pkgutil, 'list_pkgs',
return_value={"bar": "1.2.3"}):
self.assertDictEqual(pkgutil.remove(pkgs='["foo", "bar"]'),
{})
# 'purge' function tests: 1
def test_purge(self):
'''
Test if it package purges are not supported,
this function is identical to ``remove()``.
'''
mock_pkg = MagicMock(side_effect=MinionError)
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg}):
self.assertRaises(CommandExecutionError, pkgutil.purge)
|
# -*- coding: utf-8 -*-
'''
:codeauthor: <NAME> <<EMAIL>>
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
Mock,
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.pkgutil as pkgutil
from salt.exceptions import CommandExecutionError, MinionError
import salt.utils.pkg
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PkgutilTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.pkgutil
'''
def setup_loader_modules(self):
return {pkgutil: {}}
# 'refresh_db' function tests: 1
def test_refresh_db(self):
'''
Test if it updates the pkgutil repo database (pkgutil -U).
'''
mock = MagicMock(return_value=0)
with patch.dict(pkgutil.__salt__, {'cmd.retcode': mock}):
with patch.object(salt.utils.pkg, 'clear_rtag', Mock()):
self.assertTrue(pkgutil.refresh_db())
# 'upgrade_available' function tests: 1
def test_upgrade_available(self):
'''
Test if there is an upgrade available for a certain package.
'''
mock = MagicMock(return_value='A\n B\n SAME')
with patch.dict(pkgutil.__salt__, {'cmd.run_stdout': mock}):
self.assertEqual(pkgutil.upgrade_available('CSWpython'), '')
mock = MagicMock(side_effect=['A\n B\n SALT', None])
with patch.dict(pkgutil.__salt__, {'cmd.run_stdout': mock}):
self.assertEqual(pkgutil.upgrade_available('CSWpython'), 'SALT')
self.assertEqual(pkgutil.upgrade_available('CSWpython'), '')
# 'list_upgrades' function tests: 1
def test_list_upgrades(self):
'''
Test if it list all available package upgrades on this system.
'''
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_ret = MagicMock(return_value=0)
with patch.dict(pkgutil.__salt__, {'cmd.run_stdout': mock_run,
'cmd.retcode': mock_ret}):
with patch.object(salt.utils.pkg, 'clear_rtag', Mock()):
self.assertDictEqual(pkgutil.list_upgrades(), {'A': ' B'})
# 'upgrade' function tests: 1
def test_upgrade(self):
'''
Test if it upgrade all of the packages to the latest available version.
'''
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_ret = MagicMock(return_value=0)
mock_pkg = MagicMock(return_value='')
with patch.dict(pkgutil.__salt__,
{'cmd.run_stdout': mock_run,
'cmd.retcode': mock_ret,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_ret, 'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
with patch.object(salt.utils.pkg, 'clear_rtag', Mock()):
self.assertDictEqual(pkgutil.upgrade(), {})
# 'list_pkgs' function tests: 1
def test_list_pkgs(self):
'''
Test if it list the packages currently installed as a dict.
'''
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_ret = MagicMock(return_value=True)
mock_pkg = MagicMock(return_value='')
with patch.dict(pkgutil.__salt__,
{'cmd.run_stdout': mock_run,
'cmd.retcode': mock_ret,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
self.assertDictEqual(pkgutil.list_pkgs(versions_as_list=True,
removed=True), {})
self.assertDictEqual(pkgutil.list_pkgs(), {})
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': True}):
self.assertTrue(pkgutil.list_pkgs(versions_as_list=True))
mock_pkg = MagicMock(return_value=True)
with patch.dict(pkgutil.__salt__,
{'pkg_resource.stringify': mock_pkg}):
self.assertTrue(pkgutil.list_pkgs())
# 'version' function tests: 1
def test_version(self):
'''
Test if it returns a version if the package is installed.
'''
mock_ret = MagicMock(return_value=True)
with patch.dict(pkgutil.__salt__, {'pkg_resource.version': mock_ret}):
self.assertTrue(pkgutil.version('CSWpython'))
# 'latest_version' function tests: 1
def test_latest_version(self):
'''
Test if it return the latest version of the named package
available for upgrade or installation.
'''
self.assertEqual(pkgutil.latest_version(), '')
mock_run_all = MagicMock(return_value='A\t B\t SAME')
mock_run = MagicMock(return_value={'stdout': ''})
mock_ret = MagicMock(return_value=True)
mock_pkg = MagicMock(return_value='')
with patch.dict(pkgutil.__salt__,
{'cmd.retcode': mock_ret,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_run, 'cmd.run': mock_run_all}):
with patch.object(salt.utils.pkg, 'clear_rtag', Mock()):
self.assertEqual(pkgutil.latest_version('CSWpython'), '')
self.assertDictEqual(pkgutil.latest_version('CSWpython', 'Python'),
{'Python': '', 'CSWpython': ''})
# 'install' function tests: 1
def test_install(self):
'''
Test if it install packages using the pkgutil tool.
'''
mock_pkg = MagicMock(side_effect=MinionError)
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg}):
self.assertRaises(CommandExecutionError, pkgutil.install)
mock_ret = MagicMock(return_value=True)
mock_pkg = MagicMock(return_value=[''])
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
self.assertDictEqual(pkgutil.install(), {})
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_run_all = MagicMock(return_value={'stdout': ''})
mock_pkg = MagicMock(return_value=[{"bar": "1.2.3"}])
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_run_all, 'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
self.assertDictEqual(pkgutil.install
(pkgs='["foo", {"bar": "1.2.3"}]'), {})
# 'remove' function tests: 1
def test_remove(self):
'''
Test if it remove a package and all its dependencies
which are not in use by other packages.
'''
mock_pkg = MagicMock(side_effect=MinionError)
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg}):
self.assertRaises(CommandExecutionError, pkgutil.remove)
mock_ret = MagicMock(return_value=True)
mock_run = MagicMock(return_value='A\t B\t SAME')
mock_run_all = MagicMock(return_value={'stdout': ''})
mock_pkg = MagicMock(return_value=[''])
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_run_all, 'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
self.assertDictEqual(pkgutil.remove(), {})
mock_pkg = MagicMock(return_value=[{"bar": "1.2.3"}])
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg,
'pkg_resource.stringify': mock_pkg,
'pkg_resource.sort_pkglist': mock_pkg,
'cmd.run_all': mock_run_all, 'cmd.run': mock_run}):
with patch.dict(pkgutil.__context__, {'pkg.list_pkgs': mock_ret}):
with patch.object(pkgutil, 'list_pkgs',
return_value={"bar": "1.2.3"}):
self.assertDictEqual(pkgutil.remove(pkgs='["foo", "bar"]'),
{})
# 'purge' function tests: 1
def test_purge(self):
'''
Test if it package purges are not supported,
this function is identical to ``remove()``.
'''
mock_pkg = MagicMock(side_effect=MinionError)
with patch.dict(pkgutil.__salt__,
{'pkg_resource.parse_targets': mock_pkg}):
self.assertRaises(CommandExecutionError, pkgutil.purge)
|
en
| 0.641839
|
# -*- coding: utf-8 -*- :codeauthor: <NAME> <<EMAIL>> # Import Python Libs # Import Salt Testing Libs # Import Salt Libs Test cases for salt.modules.pkgutil # 'refresh_db' function tests: 1 Test if it updates the pkgutil repo database (pkgutil -U). # 'upgrade_available' function tests: 1 Test if there is an upgrade available for a certain package. # 'list_upgrades' function tests: 1 Test if it list all available package upgrades on this system. # 'upgrade' function tests: 1 Test if it upgrade all of the packages to the latest available version. # 'list_pkgs' function tests: 1 Test if it list the packages currently installed as a dict. # 'version' function tests: 1 Test if it returns a version if the package is installed. # 'latest_version' function tests: 1 Test if it return the latest version of the named package available for upgrade or installation. # 'install' function tests: 1 Test if it install packages using the pkgutil tool. # 'remove' function tests: 1 Test if it remove a package and all its dependencies which are not in use by other packages. # 'purge' function tests: 1 Test if it package purges are not supported, this function is identical to ``remove()``.
| 2.186774
| 2
|