text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
Not mine, found over there:
http://code.activestate.com/recipes/576819-logging-to-console-without-surprises/
Licensed under the MIT License and slightly modified.
"""
import logging
import sys
class LevelHandler(logging.StreamHandler):
"""A handler that logs to console in the sensible way.
StreamHandler can log to *one of* sys.stdout or sys.stderr.
It is more sensible to log to sys.stdout by default with only error
(logging.ERROR and above) messages going to sys.stderr. This is how
ConsoleHandler behaves.
"""
def __init__(self, stream_greater_or_equal=sys.stderr,
stream_lower=sys.stdout,
level=logging.ERROR):
"""
Initialize handler
"""
# can't use line bellow, becuase StreamHandler is not
# a new-style class
#super(LevelHandler, self).__init__()
logging.StreamHandler.__init__(self)
self._level = level
# keep it set as None
self.stream = None
self._stream_lower = stream_lower
self._stream_greater_or_equal = stream_greater_or_equal
def emit(self, record):
"""
Overriden emit.
"""
if record.levelno >= self._level:
return self._emit(record, self._stream_greater_or_equal)
else:
return self._emit(record, self._stream_lower)
def _emit(self, record, stream):
self.stream = stream
try:
return logging.StreamHandler.emit(self, record)
# can't use line bellow, because StreamHandler is not
# a new-style class
#return super(LevelHandler, self).emit(self, record)
except:
raise
else:
self.stream = None
def flush(self):
# Workaround a bug in logging module
# See:
# http://bugs.python.org/issue6333
if self.stream and hasattr(self.stream, 'flush') and not self.stream.closed:
logging.StreamHandler.flush(self)
|
{
"content_hash": "79ed69008c936f94ee5e55ccdb07545f",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 84,
"avg_line_length": 32.950819672131146,
"alnum_prop": 0.609950248756219,
"repo_name": "pbenas/smoker",
"id": "4567633fd9c41d9956dd2c80ab68f27624c017ed",
"size": "2032",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "smoker/logger/level_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "195585"
},
{
"name": "Shell",
"bytes": "4345"
}
],
"symlink_target": ""
}
|
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.permutations import Permutation
from sympy.utilities.iterables import uniq
_af_new = Permutation._af_new
def DirectProduct(*groups):
"""
Returns the direct product of several groups as a permutation group.
This is implemented much like the __mul__ procedure for taking the direct
product of two permutation groups, but the idea of shifting the
generators is realized in the case of an arbitrary number of groups.
A call to DirectProduct(G1, G2, ..., Gn) is generally expected to be faster
than a call to G1*G2*...*Gn (and thus the need for this algorithm).
Examples
========
>>> from sympy.combinatorics.group_constructs import DirectProduct
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> C = CyclicGroup(4)
>>> G = DirectProduct(C,C,C)
>>> G.order()
64
See Also
========
__mul__
"""
degrees = []
gens_count = []
total_degree = 0
total_gens = 0
for group in groups:
current_deg = group.degree
current_num_gens = len(group.generators)
degrees.append(current_deg)
total_degree += current_deg
gens_count.append(current_num_gens)
total_gens += current_num_gens
array_gens = []
for i in range(total_gens):
array_gens.append(range(total_degree))
current_gen = 0
current_deg = 0
for i in xrange(len(gens_count)):
for j in xrange(current_gen, current_gen + gens_count[i]):
gen = ((groups[i].generators)[j - current_gen]).array_form
array_gens[j][current_deg:current_deg + degrees[i]] = \
[ x + current_deg for x in gen]
current_gen += gens_count[i]
current_deg += degrees[i]
perm_gens = list(uniq([_af_new(list(a)) for a in array_gens]))
return PermutationGroup(perm_gens, dups=False)
|
{
"content_hash": "f9af05de01807ce717bceb3661779047",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 33.91228070175438,
"alnum_prop": 0.6409725814795655,
"repo_name": "amitjamadagni/sympy",
"id": "89fe888cc8b04709241619042d4d163a32502a84",
"size": "1933",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/combinatorics/group_constructs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12199014"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "287"
},
{
"name": "TeX",
"bytes": "8789"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from driven.vizualization.plotting import plotting
__all__ = ['plotting']
|
{
"content_hash": "d2249bf8825dfbc0c44240fc2f08b112",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 50,
"avg_line_length": 25,
"alnum_prop": 0.7466666666666667,
"repo_name": "biosustain/driven",
"id": "ef27ed9718fe160776b849a3cae8622c628ff10e",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "driven/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6705"
},
{
"name": "Python",
"bytes": "100376"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
from datetime import date
from datetime import datetime
import gzip
from shutil import copyfileobj
from os.path import isfile
from os.path import getsize
from os.path import isfile
from os.path import getmtime
from os.path import join
import csv
from report_data.utils import find_data
from report_data.utils import overwrite_if_changed
from report_data.utils import read_from_file
from acumen import conf as AC
from acumen.utils.log import info, error
from acumen.utils import monthly_shards
from acumen.utils.configs import load_data_config
from acumen.utils.configs import save_data_config
def update_csv_meta(uri,
name=None,
description=None,
last_successful_update=None,
last_failed_update=None,
uncompressed_size=None,
compressed_size=None,
valid_rows=None,
invalid_rows=None,
):
return None
def update_csv_meta_from_files(uri, filename, compressed_filename):
"""Creates the meta-data JSON format for data files
# JSON format for the data upload
{
"URI": "samples.csv",
"name": "name",
"description": "description",
"update": {
"success": "ts",
"fail": "ts"
},
"size": {
"uncompressed": 123,
"compressed": 12
},
"rows": {
"valid": 1234,
"error": 0
}
}
- calculate full download: samples.csv
- calculate valid download: samples__valid.csv
- calculate errors download: samples__errors.csv
- column name, value, error message
"""
from acumen.utils.configs import load_data_config
meta = load_data_config(uri)
# The dict ultimately storing all meta-data
meta['URI'] = uri
# Both files must exist, otherwise exit.
for required_file in [filename, compressed_filename]:
if not isfile(required_file):
raise AssertionError('Required file %s does not exist!' % required_file)
# Set the sizes.
meta['size'] = {
'compressed': getsize(compressed_filename),
'uncompressed': getsize(filename),
}
# Parse and track valid rows, error rows, and errors
valid_rows = 0
invalid_rows = 0
all_errors = []
valid_filename = filename.replace('.csv', '__valid.csv')
error_filename = filename.replace('.csv', '__error.csv')
error_log_filename = filename.replace('.csv', '__error_log.csv')
with open(valid_filename, 'w') as vf, open(error_filename, 'w') as ef, open(error_log_filename, 'w') as elf:
# Writers for the two partitions of data and the error log file.
vfw = csv.writer(vf)
efw = csv.writer(ef)
elfw = csv.writer(elf)
# Make sure the partitions have a header.
_, _, _, _, row = find_data(uri + '.csv')
vfw.writerow(row.get_header())
efw.writerow(row.get_header())
elfw.writerow(['Row', 'Error'])
# Validate each row.
row_num = 0
for row in read_from_file(row.__class__, filename=filename):
row_num += 1
formatted, raw, errors = row.pop()
if not errors:
valid_rows += 1
vfw.writerow(formatted)
else:
invalid_rows += 1
all_errors.append(errors)
efw.writerow(raw)
for error in errors:
elfw.writerow([row_num, error])
meta['rows'] = {
'valid': valid_rows,
'error': invalid_rows,
}
# Populate the timestamps.
if 'update' not in meta:
meta['update'] = {}
meta['update']['success'] = datetime.fromtimestamp(getmtime(filename)).strftime('%Y-%m-%d %H:%M:%S')
return meta
def update_data_meta(filename):
# Update the meta-information.
stripped_uri = filename.split('/')[-1].replace('.csv', '').replace('.json', '')
data_filename = filename.replace('conf/data', 'data').replace('.json', '.csv')
data_compressed_filename = data_filename + '.gz'
meta = update_csv_meta_from_files(stripped_uri, data_filename, data_compressed_filename)
save_data_config(stripped_uri, meta)
def write_data():
"""Writes all the data and related config files
This is most helpful for putting a snapshot of all exported data to S3 so
that the rest of metrics, events and alerts can be generated. All steps
post data export do not rely on Counsyl's production codebase, namely
dev/website.
"""
# Write all the data files. Update meta-info in configs.
write_all(AC.BASE_DIR, 'data', lambda x: x.endswith('.csv'), lambda x: 'text/csv')
# Write all data config files.
write_all(AC.BASE_DIR, 'conf/data', lambda x: x.endswith('.json'), lambda x: 'application/json', pre_hook=update_data_meta)
def read_data(use_cache=True):
"""Reads all the data and related config files"""
read_all(AC.BASE_DIR, 'data', use_cache=use_cache)
read_all(AC.BASE_DIR, 'conf/data', use_cache=use_cache)
def write_metric():
"""Writes all the metrics and related config files
This is most helpful for putting a snapshot of all exported metris to S3 so
that they are available for general use and use in the HTML/JS-based reports.
"""
# Write all the metric files. Update meta-info in configs.
write_all(AC.BASE_DIR, 'metric', lambda x: x.endswith('.csv'), lambda x: 'text/csv')
# Write all metric config files.
write_all(AC.BASE_DIR, 'conf/metric', lambda x: x.endswith('.json'), lambda x: 'application/json')
def write_all(base_dir, sub_dir, match_func, content_type_func, pre_hook=None):
"""Helper to write potentially many files to S3
e.g.
- match_func = lambda x: x.endswith('.json')
- content_type = lambda x: 'text/json'
- pre_hook = allows for processing before S3 upload. Helpful for calculating
info such as valid and invalid rows in data feeds.
"""
from os import listdir
directory = join(base_dir, sub_dir) if sub_dir else base_dir
onlyfiles = [f for f in listdir(directory) if isfile(join(directory, f)) and match_func(f)]
for f in onlyfiles:
try:
if pre_hook:
pre_hook(join(directory, f))
write(
join(sub_dir, f),
join(directory, f),
content_type_func(f))
except Exception as ex:
error("Can't write %s" % f)
error(ex)
def write(uri, filename=None, content_type='text/csv'):
"""Writes data to S3"""
# If no destination file, save in the default cache.
if not filename:
filename = AC.DATA_DIR + uri
compressed_filename = filename + '.gz'
# Compress locally before S3 upload.
with open(filename, 'rb') as uncompressed, gzip.open(compressed_filename, 'wb') as compressed:
copyfileobj(uncompressed, compressed)
# Upload to S3.
info('S3 upload: %s as %s' % (compressed_filename, uri))
from boto.s3.connection import S3Connection
from boto.s3.key import Key
conn = S3Connection()
bucket = conn.get_bucket(AC.S3_BUCKET)
key = bucket.get_key(uri)
# Upload from the S3 bucket.
if not key:
key = Key(bucket, name=uri)
key.set_contents_from_filename(
compressed_filename,
headers = {
'Content-Encoding': 'gzip',
'Content-Type': content_type,
})
def read_all(base_dir, prefix, use_cache=True):
from boto.s3.connection import S3Connection
from boto.s3.key import Key
conn = S3Connection()
bucket = conn.get_bucket(AC.S3_BUCKET)
for key in bucket.list():
keyname = key.name.encode('utf-8')
if keyname.startswith(prefix):
read(keyname,
filename=join(base_dir, keyname),
use_cache=use_cache)
def read(uri, filename=None, use_cache=True):
# If no destination file, save in the default cache.
if not filename:
filename = AC.DATA_DIR + uri
compressed_filename = filename + '.gz'
# Skip S3 download, if the caches is valid.
if not use_cache or not isfile(compressed_filename):
# Download from S3
info('S3 download: %s' % compressed_filename)
from boto.s3.connection import S3Connection
from boto.s3.key import Key
conn = S3Connection()
bucket = conn.get_bucket(AC.S3_BUCKET)
key = bucket.get_key(uri)
key.get_contents_to_filename(compressed_filename)
else:
info('Using compressed cache: ' + compressed_filename)
# Decompress and save to the destination.
if not use_cache or not isfile(filename):
info('Decompressing: %s to %s' % (compressed_filename, filename))
with open(filename, 'wb') as uncompressed, gzip.open(compressed_filename, 'rb') as compressed:
copyfileobj(compressed, uncompressed)
else:
info('Using file cache: ' + filename)
# Return the file location of the CSV.
return filename
def list_csv():
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.utils import parse_ts
conn = S3Connection()
bucket = conn.get_bucket(AC.S3_BUCKET)
for key in bucket.list(prefix='csv/'):
yield key.name[4:], parse_ts(key.last_modified), key.size
def read_metric(uri, filename=None, use_cache=True):
# If no destination file, save in the default cache.
if not filename:
filename = AC.METRIC_DIR + uri
# Do a type check. Then parse accordingly.
# Auto-add .csv since this metric is saved as a csv.
with open(filename + '.csv', 'r') as f:
reader = csv.reader(f)
header = reader.next()
for row in reader:
yield dict(zip(header, row))
def partial_name(uri, shard_key):
return uri.replace('.csv', '_%s.csv' % shard_key)
def combine_csv(uri, output_dir=None, shard_dir=None, shard_func=monthly_shards):
"""Combines multiple CSV files to make an aggregate"""
if not output_dir:
output_dir = AC.DATA_DIR
if not shard_dir:
shard_dir = get_dir(AC.DATA_DIR, uri)
add_header = True
temp_output_file = output_dir + uri + '.temp'
with open(temp_output_file, 'w') as agg:
for shard_key, start, end in shard_func():
output_file = shard_dir + partial_name(uri, shard_key)
try:
with open(output_file, 'r') as data:
# Prepend a header, if not already.
if add_header:
add_header = False
else:
data.next()
# Copy all other lines as-is.
for line in data:
if line:
agg.write(line)
except Exception as ex:
error("Can't aggregate CSV line")
error(ex)
# Overwrite if the contents changed.
old_file = output_dir + uri
if overwrite_if_changed(old_file, temp_output_file):
# Upload to S3 and overwrite the meta-data.
pass
|
{
"content_hash": "09e24157a49cb50f66d36737e95b16f9",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 127,
"avg_line_length": 34.40184049079755,
"alnum_prop": 0.6092732946946054,
"repo_name": "jfalkner/acumen",
"id": "6d50758415a6f0b8a65821f19b3f64b6faa1a247",
"size": "11215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acumen/io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1626"
},
{
"name": "HTML",
"bytes": "8386"
},
{
"name": "JavaScript",
"bytes": "68725"
},
{
"name": "Python",
"bytes": "62524"
}
],
"symlink_target": ""
}
|
import numpy as np
from collections import defaultdict
import itertools
from utils import plotting
from utils.policy import make_epsilon_greedy_policy
def sarsa(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1):
"""
SARSA algorithm: On-policy TD control. Finds the optimal epsilon-greedy policy.
Args:
env: OpenAI environment.
num_episodes: Number of episodes to run for.
discount_factor: Lambda time discount factor.
alpha: TD learning rate.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, stats).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in range(num_episodes):
current_state = env.reset()
# choose the action based on epsilon greedy policy
probs = policy(current_state)
action = np.random.choice(np.arange(len(probs)), p=probs)
# keep track number of time-step per episode only for plotting
for t in itertools.count():
next_state, reward, done, _ = env.step(action)
# choose next action
next_probs = policy(next_state)
next_action = np.random.choice(np.arange(len(next_probs)), p=next_probs)
# evaluate Q using estimated action value of (next_state, next_action)
td_target = reward + discount_factor * Q[next_state][next_action]
Q[current_state][action] += alpha * (td_target - Q[current_state][action])
# improve policy using new evaluate Q
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
# Update statistics
stats.episode_rewards[i_episode] += reward
stats.episode_lengths[i_episode] = t
if done:
break
else:
current_state = next_state
action = next_action
return Q, stats
|
{
"content_hash": "5ba6ac1f5d1f386ee8aad29225ebca8a",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 102,
"avg_line_length": 38.261538461538464,
"alnum_prop": 0.6397265782066747,
"repo_name": "transedward/ml-playground",
"id": "bbf08f5ea45c51c0a1265e21561477edfd0e8dcb",
"size": "2487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reinforcement/sarsa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3167015"
},
{
"name": "Python",
"bytes": "76537"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from unicore.languages.constants import LANGUAGES
class LanguagesTestCase(TestCase):
def test_total_languages(self):
self.assertEqual(len(LANGUAGES), 438)
|
{
"content_hash": "f170bd241c274bb99b55a0b9c214fe89",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 49,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.7788944723618091,
"repo_name": "universalcore/unicore.languages",
"id": "6faee2b90bd87bb456a9499dfb5c9827a570cac5",
"size": "199",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "unicore/languages/tests/test_languages.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "18098"
}
],
"symlink_target": ""
}
|
"""
sphinxcontrib.odfbuilder
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Open Document Format builder
:copyright: Copyright 2017 by Christopher Hoskin <christopher.hoskin@gmail.com>
:license: BSD, see LICENSE for details.
"""
import pbr.version
__version__ = pbr.version.VersionInfo(
'sphinxcontrib-odfbuilder').version_string()
|
{
"content_hash": "f20f80d7fd9e01d9a28803ae7796a45c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 23.533333333333335,
"alnum_prop": 0.6345609065155807,
"repo_name": "mans0954/odfbuilder",
"id": "400d7a93995c2a8e2457b17848741a593623440e",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinxcontrib/builders/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "807"
},
{
"name": "Makefile",
"bytes": "616"
},
{
"name": "Python",
"bytes": "12849"
}
],
"symlink_target": ""
}
|
class Solution:
def canDistribute(self, nums: List[int], quantity: List[int]) -> bool:
numbersMap={}
for x in nums:
if x not in numbersMap:
numbersMap[x]=0
numbersMap[x]+=1
index=0
numbers=[0]*len(numbersMap)
for k,v in numbersMap.items():
numbers[index]=v
index+=1
numbers=sorted(numbers, reverse=True)
quantity=sorted(quantity, reverse=True)
valid_numbers=0
while valid_numbers<len(numbers) and numbers[valid_numbers]!=0:
valid_numbers+=1
def distribute(start):
if start>=len(quantity):
return True
for j in range(valid_numbers):
if numbers[j]>=quantity[start]:
numbers[j]-=quantity[start]
if distribute(start+1):
return True
numbers[j]+=quantity[start]
return False
return distribute(0)
|
{
"content_hash": "3c1c6360417f5513635cf1fd3c215d78",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 36.06896551724138,
"alnum_prop": 0.5,
"repo_name": "Magic07/online-judge-solutions",
"id": "d83df9dc27a4a23393ca206b1e9977b8a3aecaf1",
"size": "1046",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "leetcode/1758-distribute-repeating-integers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "34617"
}
],
"symlink_target": ""
}
|
import call_go
print("Output is:")
print(call_go.add(1,4))
|
{
"content_hash": "29e0c0796aa46a02116430f11d78e9e4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 23,
"avg_line_length": 15,
"alnum_prop": 0.6833333333333333,
"repo_name": "swt02026/golang-with-other-langauage-example",
"id": "0f4736360481728cc9b3d34aa1a520282cd15dd7",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cython_call_golang/call_go_in_python.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "176"
},
{
"name": "Go",
"bytes": "283"
},
{
"name": "Julia",
"bytes": "199"
},
{
"name": "Makefile",
"bytes": "747"
},
{
"name": "Python",
"bytes": "179"
}
],
"symlink_target": ""
}
|
"""Declarative class for defining hnadling of preferences.
"""
from atom.api import List, Str
from enaml.core.api import Declarative, d_, d_func
class Preferences(Declarative):
""" Declarative class for defining a workbench preference contribution.
Preferences object can be contributed as extensions child to the 'plugin'
extension point of a preference plugin.
"""
#: Name of the method of the plugin contributing this extension to call
#: when the preference plugin need to save the preferences.
saving_method = d_(Str('preferences_from_members'))
#: Name of the method of the plugin contributing this extension to call
#: when the preference plugin need to load preferences.
loading_method = d_(Str('update_members_from_preferences'))
#: The list of plugin members whose values should be observed and whose
#: update should cause and automatic update of the preferences.
auto_save = d_(List())
#: A callable taking the plugin_id and the preference declaration as arg
#: and returning an autonomous enaml view (Container) used to edit
#: the preferences.
@d_func
def edit_view(self, workbench, id):
"""Create a view to edit the preferences.
Parameters
----------
workbench :
Reference to the application workbench.
id : unicode
Id of the plugin for which to generate the view.
Returns
-------
view : enaml.widgets.api.Container
View used to edit the preferences. It should have a model
attribute. The model members must correspond to the tagged members
the plugin, their values will be used to update the preferences.
"""
pass
|
{
"content_hash": "09934336121ea7cbda78785a01eab83c",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 35.08,
"alnum_prop": 0.6721778791334093,
"repo_name": "Ecpy/ecpy",
"id": "db5c7940f0f5f25c740b1d5e1b9bc5a032ef6059",
"size": "2137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exopy/app/preferences/preferences.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "162"
},
{
"name": "Python",
"bytes": "1344669"
},
{
"name": "Shell",
"bytes": "420"
}
],
"symlink_target": ""
}
|
import glob
import os
import sys
import numpy as np
sys.path.insert(0, os.pardir)
from testing_harness import TestHarness
from openmc.statepoint import StatePoint
class FixedSourceTestHarness(TestHarness):
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = StatePoint(statepoint)
# Write out tally data.
outstr = ''
if self._tallies:
tally_num = 1
for tally_ind in sp.tallies:
tally = sp.tallies[tally_ind]
results = np.zeros((tally.sum.size*2, ))
results[0::2] = tally.sum.ravel()
results[1::2] = tally.sum_sq.ravel()
results = ['{0:12.6E}'.format(x) for x in results]
outstr += 'tally ' + str(tally_num) + ':\n'
outstr += '\n'.join(results) + '\n'
tally_num += 1
gt = sp.global_tallies
outstr += 'leakage:\n'
outstr += '{0:12.6E}'.format(gt[gt['name'] == b'leakage'][0]['sum']) + '\n'
outstr += '{0:12.6E}'.format(gt[gt['name'] == b'leakage'][0]['sum_sq']) + '\n'
return outstr
if __name__ == '__main__':
harness = FixedSourceTestHarness('statepoint.10.*', True)
harness.main()
|
{
"content_hash": "750ca4b9c1cc249b7cdd34b2b7f1fc93",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 86,
"avg_line_length": 32.904761904761905,
"alnum_prop": 0.5463096960926194,
"repo_name": "mjlong/openmc",
"id": "12dbb8d7693d52bcadf0356cfd8a16d92c883975",
"size": "1405",
"binary": false,
"copies": "4",
"ref": "refs/heads/mjl-ds",
"path": "tests/test_fixed_source/test_fixed_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "30306"
},
{
"name": "Fortran",
"bytes": "1309375"
},
{
"name": "Makefile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "806830"
},
{
"name": "Shell",
"bytes": "4490"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Ethereum Blockchain Explorer'
copyright = u'2012, Christoph Mussenbrock'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v0.0.1'
# The full version, including alpha/beta/rc tags.
release = 'v0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sampledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sample.tex', u'sample Documentation',
u'Kenneth Reitz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sample', u'sample Documentation',
[u'Kenneth Reitz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sample', u'sample Documentation',
u'Kenneth Reitz', 'sample', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "ac2f9e0dd0dc3feab48636b9b2121861",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 32.06113537117904,
"alnum_prop": 0.7017161536366113,
"repo_name": "christoph2806/py-ethscan",
"id": "25b061ab5ed22c6c526ab2dde0a34fb447405424",
"size": "7759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "11807"
}
],
"symlink_target": ""
}
|
"""Script for running all test files (except memory leaks tests)."""
import os
import sys
from psutilcli.test import unittest
from psutilcli.test import VERBOSITY
def get_suite():
HERE = os.path.abspath(os.path.dirname(__file__))
testmodules = [os.path.splitext(x)[0] for x in os.listdir(HERE)
if x.endswith('.py') and x.startswith('test_')]
suite = unittest.TestSuite()
for tm in testmodules:
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(tm))
return suite
def main():
# run tests
result = unittest.TextTestRunner(verbosity=VERBOSITY).run(get_suite())
success = result.wasSuccessful()
sys.exit(0 if success else 1)
if __name__ == '__main__':
main()
|
{
"content_hash": "a2750336bfe84d496c6c44d0f53f8c58",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 26.357142857142858,
"alnum_prop": 0.6680216802168022,
"repo_name": "giampaolo/psutil-cli",
"id": "1891b0090b58998b4966917c2feb92d5c463648c",
"size": "922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psutilcli/test/runner.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3270"
},
{
"name": "Python",
"bytes": "26186"
},
{
"name": "Shell",
"bytes": "2174"
}
],
"symlink_target": ""
}
|
from rest_framework import viewsets, mixins
from rest_framework.views import APIView
from robocrm.models import RoboUser
from projects.models import Project
from officers.models import Officer
from webcams.models import Webcam
from social_media.models import SocialMedia
from sponsors.models import Sponsor
from faq.models import Category, QA
from robocrm.models import Machine
from rest_framework.response import Response
from channels.models import Channel
from rest_framework.parsers import JSONParser
from rest_framework.decorators import detail_route
from rest_framework.decorators import *
from .serializers import *
from rest_framework.decorators import api_view
from rest_framework.exceptions import APIException, ParseError, NotAuthenticated, AuthenticationFailed, PermissionDenied
from .errno import *
from .google_api import get_calendar_events
import dateutil.parser
from django.conf import settings
from django.utils import timezone
from .filters import *
from rest_framework.viewsets import GenericViewSet
from tshirts.models import TShirt
from django.core.mail import send_mail
import logging
from posters.models import Poster
from .models import APIRequest
from rest_framework_extensions.cache.decorators import cache_response
from django.utils.text import slugify
from django.contrib.contenttypes.models import ContentType
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .permissions import IsAPIRequesterOrReadOnlyPermission, UserBalancePermission, UserRFIDPermission, UserEmailPermission, IsTooltronOrReadOnlyPermission
from django.db import IntegrityError
from rest_framework.generics import GenericAPIView, CreateAPIView
from django.shortcuts import redirect
from rest_framework import status
from datetime import datetime
from upcs.models import UPCItem
from upcs.remote_lookup import remote_lookup as upc_remote_lookup
from upcs.format_upc import format_upc
logger = logging.getLogger(__name__)
def create_api_request(request, serializer):
"""
Helper function to construct
an APIRequest and populate it's fields
that are constructed from the request
object and serializer 'meta' field.
"""
# Fix so updater_id maps correctly since
# API exposes RoboUser IDs not User IDs
user = request.user
if hasattr(user, 'robouser'):
user = user.robouser
return APIRequest(
endpoint = request.path.replace("/api", ""),
updater_object = user,
meta = serializer.validated_data.get('meta', ""),
api_client = request.META.get('HTTP_API_CLIENT', "")
)
# APIRequestViewSet is a ModelViewSet without create and destroy abilities
class APIRequestViewSet(
#mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
#mixins.DestroyModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
A APIRequest is created whenever a sucessfully authenticated POST request
is made to '/rfid/', '/magnetic/', '/users/:id/rfid/', '/users/:id/email/',
or '/users/:id/balance/'.
Users can successfully make POST requests to '/rfid/' and '/magnetic/' through
the HTML API viewer however those requests are not shown here(only those made
by projects are shown here).
"""
permission_classes = (IsAPIRequesterOrReadOnlyPermission, )
queryset = APIRequest.objects.all()
serializer_class = APIRequestSerializer
filter_class = APIRequestFilter
class WebcamViewSet(viewsets.ReadOnlyModelViewSet):
"""
The Club's Webcams.
"""
queryset = Webcam.objects.all()
serializer_class = WebcamSerializer
filter_fields = ('id', 'name', )
class DateTimeViewSet(viewsets.ViewSet):
"""
The current datetime. Exists so that projects
without a realtime clock can easily get the datetime.
The query parameter 'form' can be provided, returning
the response of the datetime through strftime with the
provided formatting string.
"""
# TODO: remove this once django-rest-swagger fixes the bug
# that requires these be set here
paginate_by=None
page_kwarg=None
paginate_by_param=None
def list(self, request):
now = datetime.now()
form = request.query_params.get('form', None)
if form:
now = now.strftime(form)
return Response({
'datetime': now
})
class RoboUserViewSet(viewsets.ReadOnlyModelViewSet):
"""
The members of the Robotics Club.
"""
queryset = RoboUser.objects.all()
serializer_class = RoboUserSerializer
filter_class = RoboUserFilter
# rfid, email, and balance are special cases
def get_serializer_class(self):
path = self.request.path
tokens = path.rsplit('/')
tokens[:] = (x for x in tokens if x != "")
if len(tokens):
action = tokens[-1]
if action == 'email':
return EmailSerializer
elif action == 'rfid':
return RFIDSerializer
elif action == 'balance':
return BalanceSerializer
return super().get_serializer_class()
@detail_route(methods=['POST'], permission_classes=[UserBalancePermission, ])
def balance(self, request, pk):
"""
Increments/decrements a User's balance(privileged operation).
---
serializer: api.serializers.BalanceSerializer
"""
u = self.get_object()
user = request._user
serializer = BalanceSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
amount = serializer.validated_data['amount']
u.balance += amount
u.save()
api_request = create_api_request(request, serializer)
api_request.user = u
api_request.extra = "Amount: ${0:.2f}, New Balance: ${1:.2f}".format(amount, u.balance)
api_request.save()
return Response({
"api_request": api_request.id
})
@detail_route(methods=['POST'], permission_classes=[UserRFIDPermission, ])
def rfid(self, request, pk):
"""
Set a Users RFID(privileged operation).
---
serializer: api.serializers.RFIDSerializer
"""
u = self.get_object()
user = request._user
serializer = RFIDSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
rfid = serializer.validated_data['rfid']
u.rfid = rfid
# save() causes server error
try:
u.save()
except IntegrityError:
error = ParseError(detail="RFID already belongs to another member.")
error.errno = DUPLICATE
raise error
api_request = create_api_request(request, serializer)
api_request.user = u
api_request.save()
return Response({
"api_request": api_request.id
})
@detail_route(methods=['POST'], permission_classes=[UserEmailPermission, ])
def email(self, request, pk):
"""
Sends a User an email(privileged operation).
---
serializer: api.serializers.EmailSerializer
"""
user = self.get_object()
project = request._user
from_address = "{}@roboticsclub.org".format(slugify(str(project)))
serializer = EmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
subject = serializer.validated_data['subject']
content = serializer.validated_data['content']
send_mail(subject, content, from_address, [user.user.email])
api_request = create_api_request(request, serializer)
api_request.extra = "Subject: {}, Body: {}".format(subject, content)
api_request.user = user
api_request.save()
return Response({
"api_request": api_request.id
})
class OfficerViewSet(viewsets.ReadOnlyModelViewSet):
"""
The officers of the Robotics Club.
"""
queryset = Officer.objects.all()
serializer_class = OfficerSerializer
filter_fields = ('id', 'position', 'user', 'order', )
class ProjectViewSet(viewsets.ReadOnlyModelViewSet):
"""
Club Projects
"""
queryset = Project.objects.all()
serializer_class = ProjectSerializer
filter_fields = ('id', 'name', 'display', 'leaders', )
# ChannelViewSet is a ModelViewSet without create and destroy abilities
class ChannelViewSet(
#mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
#mixins.DestroyModelMixin,
mixins.ListModelMixin,
GenericViewSet):
queryset = Channel.objects.all()
serializer_class = ChannelSerializer
filter_class = ChannelFilter
# Channels have anonymous read only access
permission_classes = (IsAuthenticatedOrReadOnly, )
class SponsorViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Sponsor.objects.all()
serializer_class = SponsorSerializer
filter_fields = ('id', 'name', 'active', )
class SocialMediaViewSet(viewsets.ReadOnlyModelViewSet):
queryset = SocialMedia.objects.all()
serializer_class = SocialMediaSerializer
filter_fields = ('id', 'name', )
class CalendarViewSet(viewsets.ViewSet):
"""
Returns a list of events currently occuring on the club's calendar.
Each event has the field 'name', 'location', 'start_time', and 'end_time'.
The 'current time' can be changed by setting the URL parameter 'dt' to a specified datetime.
"""
# TODO: remove this once django-rest-swagger fixes the bug
# that requires these be set here
paginate_by=None
page_kwarg=None
paginate_by_param=None
# TODO: key this to take into account dt
#@cache_response(30)
def list(self, request):
"""
List the calendar events.
"""
dt = self.request.query_params.get('dt', None)
if not dt:
# If no datetime specified use now
dt = datetime.now()
else:
dt = dateutil.parser.parse(dt)
events = get_calendar_events(dt)
return Response(events)
class MagneticView(GenericAPIView):
"""
Returns the RoboUser ID associated with the specified CMU Card ID
and the APIRequest ID.
"""
serializer_class = MagneticSerializer
def post(self, request, *args, **kwargs):
serializer = MagneticSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
magnetic = serializer.validated_data['magnetic']
logger.info("Magnetic lookup {}".format(magnetic))
api_request = create_api_request(request, serializer)
try:
robouser = RoboUser.objects.get(magnetic=magnetic)
api_request.user = robouser
api_request.save()
return Response({
"found": True,
"user": robouser.id,
"api_request": api_request.id
})
except RoboUser.DoesNotExist:
api_request.save()
return Response({
"found": False,
"user": None,
"api_request": api_request.id
})
class RFIDView(GenericAPIView):
"""
Returns the RoboUser ID associated with the specified CMU RFID tag
and the APIRequest ID.
"""
serializer_class = RFIDSerializer
def post(self, request, *args, **kwargs):
serializer = RFIDSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
rfid = serializer.validated_data['rfid']
logger.info("RFID lookup {}".format(rfid))
api_request = create_api_request(request, serializer)
try:
robouser = RoboUser.objects.get(rfid=rfid)
api_request.user = robouser
api_request.save()
return Response({
"found": True,
"user": robouser.id,
"api_request": api_request.id
})
except RoboUser.DoesNotExist:
api_request.save()
return Response({
"found": False,
"user": None,
"api_request": api_request.id
})
class CategoryViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
filter_fields = ('id', 'title', )
class TShirtViewSet(viewsets.ReadOnlyModelViewSet):
queryset = TShirt.objects.all()
serializer_class = TShirtSerializer
filter_fields = ('id', 'name', 'year', )
class PosterViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Poster.objects.all()
serializer_class = PosterSerializer
filter_fields = ('id', 'name', 'year', )
# MachineViewSet is a ModelViewSet without create and destroy abilities
class MachineViewSet(
#mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
#mixins.DestroyModelMixin,
mixins.ListModelMixin,
GenericViewSet):
queryset = Machine.objects.all()
serializer_class = MachineSerializer
filter_class = MachineFilter
# Channels have anonymous read only access
permission_classes = (IsTooltronOrReadOnlyPermission, )
class UPCItemViewSet(viewsets.ReadOnlyModelViewSet):
"""
This endpoint is used to find out more information about a item with a UPC code.
At the time of this writing, the only UPC items used by the club are Fridge items
and thus why this model has a `cost` field.
When a UPC is retrieved a request should be made to `/upcs/?upc=<YOUR_UPC>` to find
the Item corresponding to the specified UPC. By making a request to this URL with the
`upc` query parameter the UPC is properly formatted to a 8 digit UPCE or 12 digit UPCA
and then looked up locally. If an item cannot be found a external database is queried,
and if a result is found it's result is saved locally.
"""
queryset = UPCItem.objects.all()
def get_queryset(self):
upc = self.request.query_params.get('upc', None)
if not upc:
return super().get_queryset()
upc = format_upc(upc)
items = UPCItem.objects.filter(upc=upc)
if items.count() > 0:
# Need to return Queryset
return items
else:
name = upc_remote_lookup(upc)
if not name:
return UPCItem.objects.none()
else:
upcitem = UPCItem(name=name, upc=upc)
upcitem.save()
return UPCItem.objects.filter(id=upcitem.id)
serializer_class = UPCItemSerializer
# UPC cannot be a filter field as it have to be used to determine
# the queryset in `get_queryset`.
filter_fields = ('id', 'name', 'cost', )
def login_redirect_docs(request):
return redirect('/admin/login/?next=/docs/')
|
{
"content_hash": "438fc3fe29e94eccec39bb88e55fb434",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 155,
"avg_line_length": 28.782077393075358,
"alnum_prop": 0.6918341352957826,
"repo_name": "sreidy/roboticsclub.org",
"id": "a611f61f0e0e40c3b03f664d2fda2419ba1ace69",
"size": "14132",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87807"
},
{
"name": "HTML",
"bytes": "32573"
},
{
"name": "JavaScript",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "239652"
}
],
"symlink_target": ""
}
|
import os
import ddt
import mock
from oslo_config import cfg
import requests
import six
from six.moves.urllib import parse
from rally import exceptions
from rally.verification.tempest import config
from tests.unit import fakes
from tests.unit import test
CONF = cfg.CONF
CREDS = {
"admin": {
"username": "admin",
"tenant_name": "admin",
"password": "admin-12345",
"auth_url": "http://test:5000/v2.0/",
"permission": "admin",
"region_name": "test",
"admin_domain_name": "Default",
"https_insecure": False,
"https_cacert": "/path/to/cacert/file"
}
}
@ddt.ddt
class TempestConfigTestCase(test.TestCase):
def setUp(self):
super(TempestConfigTestCase, self).setUp()
mock.patch("rally.common.objects.deploy.db.deployment_get",
return_value=CREDS).start()
mock.patch("rally.osclients.Clients").start()
self.mock_isfile = mock.patch("os.path.isfile",
return_value=True).start()
self.tempest_conf = config.TempestConfig("fake_deployment")
@mock.patch("os.rename")
@mock.patch("six.moves.builtins.open", side_effect=mock.mock_open())
@mock.patch("requests.get", return_value=mock.MagicMock(status_code=200))
def test__download_image_success(self, mock_get,
mock_open, mock_rename):
self.mock_isfile.return_value = False
self.tempest_conf._download_image()
mock_get.assert_called_once_with(
CONF.tempest.img_url, stream=True)
@mock.patch("requests.get")
@ddt.data(404, 500)
def test__download_image_failure(self, status_code, mock_get):
self.mock_isfile.return_value = False
mock_get.return_value = mock.MagicMock(status_code=status_code)
self.assertRaises(exceptions.TempestConfigCreationFailure,
self.tempest_conf._download_image)
@mock.patch("requests.get", side_effect=requests.ConnectionError())
def test__download_image_connection_error(self, mock_requests_get):
self.mock_isfile.return_value = False
self.assertRaises(exceptions.TempestConfigCreationFailure,
self.tempest_conf._download_image)
@ddt.data({"publicURL": "test_url"},
{"interface": "public", "url": "test_url"})
def test__get_service_url(self, endpoint):
mock_catalog = mock.MagicMock()
mock_catalog.get_endpoints.return_value = {
"test_service_type": [endpoint]}
self.tempest_conf.keystone.service_catalog = mock_catalog
self.tempest_conf.clients.services.return_value = {
"test_service_type": "test_service"}
self.assertEqual(
self.tempest_conf._get_service_url("test_service"), "test_url")
def test__configure_auth(self):
self.tempest_conf._configure_auth()
expected = (
("admin_username", CREDS["admin"]["username"]),
("admin_password", CREDS["admin"]["password"]),
("admin_project_name", CREDS["admin"]["tenant_name"]),
("admin_domain_name", CREDS["admin"]["admin_domain_name"]))
result = self.tempest_conf.conf.items("auth")
for item in expected:
self.assertIn(item, result)
@ddt.data("data_processing", "data-processing")
def test__configure_data_processing(self, service_type):
self.tempest_conf.available_services = ["sahara"]
self.tempest_conf.clients.services.return_value = {
service_type: "sahara"}
self.tempest_conf._configure_data_processing()
self.assertEqual(
self.tempest_conf.conf.get(
"data-processing", "catalog_type"), service_type)
def test__configure_identity(self):
self.tempest_conf._configure_identity()
expected = (
("region", CREDS["admin"]["region_name"]),
("auth_version", "v2"),
("uri", CREDS["admin"]["auth_url"][:-1]),
("uri_v3", CREDS["admin"]["auth_url"].replace("/v2.0/", "/v3")),
("disable_ssl_certificate_validation",
str(CREDS["admin"]["https_insecure"])),
("ca_certificates_file", CREDS["admin"]["https_cacert"]))
result = self.tempest_conf.conf.items("identity")
for item in expected:
self.assertIn(item, result)
def test__configure_network_if_neutron(self):
self.tempest_conf.available_services = ["neutron"]
client = self.tempest_conf.clients.neutron()
client.list_networks.return_value = {
"networks": [
{
"status": "ACTIVE",
"id": "test_id",
"router:external": True
}
]
}
self.tempest_conf._configure_network()
self.assertEqual(
self.tempest_conf.conf.get("network",
"public_network_id"), "test_id")
def test__configure_network_if_nova(self):
self.tempest_conf.available_services = ["nova"]
client = self.tempest_conf.clients.nova()
client.networks.list.return_value = [
mock.MagicMock(human_id="fake-network")]
self.tempest_conf._configure_network()
expected = {"compute": ("fixed_network_name", "fake-network"),
"validation": ("network_for_ssh", "fake-network")}
for section, option in six.iteritems(expected):
result = self.tempest_conf.conf.items(section)
self.assertIn(option, result)
@ddt.data({}, {"version": "4.1.0", "args": ("extensions", "/extensions"),
"kwargs": {"retrieve_all": True}})
@ddt.unpack
def test__configure_network_feature_enabled(
self, version="4.0.0", args=("/extensions",), kwargs={}):
self.tempest_conf.available_services = ["neutron"]
client = self.tempest_conf.clients.neutron()
client.list_ext.return_value = {
"extensions": [
{"alias": "dvr"},
{"alias": "extra_dhcp_opt"},
{"alias": "extraroute"}
]
}
mock.patch("neutronclient.version.__version__", version).start()
self.tempest_conf._configure_network_feature_enabled()
client.list_ext.assert_called_once_with(*args, **kwargs)
self.assertEqual(self.tempest_conf.conf.get(
"network-feature-enabled", "api_extensions"),
"dvr,extra_dhcp_opt,extraroute")
@mock.patch("os.makedirs")
@mock.patch("os.path.exists", return_value=False)
def test__configure_oslo_concurrency(self, mock_exists, mock_makedirs):
self.tempest_conf._configure_oslo_concurrency()
lock_path = os.path.join(
self.tempest_conf.data_dir, "lock_files_fake_deployment")
mock_makedirs.assert_called_with(lock_path)
self.assertEqual(
self.tempest_conf.conf.get(
"oslo_concurrency", "lock_path"), lock_path)
def test__configure_object_storage(self):
self.tempest_conf._configure_object_storage()
expected = (
("operator_role", CONF.tempest.swift_operator_role),
("reseller_admin_role", CONF.tempest.swift_reseller_admin_role))
result = self.tempest_conf.conf.items("object-storage")
for item in expected:
self.assertIn(item, result)
def test__configure_orchestration(self):
self.tempest_conf._configure_orchestration()
expected = (
("stack_owner_role", CONF.tempest.heat_stack_owner_role),
("stack_user_role", CONF.tempest.heat_stack_user_role))
result = self.tempest_conf.conf.items("orchestration")
for item in expected:
self.assertIn(item, result)
def test__configure_scenario(self):
self.tempest_conf._configure_scenario()
image_name = parse.urlparse(
config.CONF.tempest.img_url).path.split("/")[-1]
expected = (("img_dir", self.tempest_conf.data_dir),
("img_file", image_name))
result = self.tempest_conf.conf.items("scenario")
for item in expected:
self.assertIn(item, result)
def test__configure_service_available(self):
available_services = ("nova", "cinder", "glance", "sahara")
self.tempest_conf.available_services = available_services
self.tempest_conf._configure_service_available()
expected = (
("neutron", "False"), ("heat", "False"), ("nova", "True"),
("swift", "False"), ("cinder", "True"), ("sahara", "True"),
("glance", "True"))
result = self.tempest_conf.conf.items("service_available")
for item in expected:
self.assertIn(item, result)
@ddt.data({}, {"service": "neutron", "connect_method": "floating"})
@ddt.unpack
def test__configure_validation(self, service="nova",
connect_method="fixed"):
self.tempest_conf.available_services = [service]
self.tempest_conf._configure_validation()
expected = (("run_validation", "True"),
("connect_method", connect_method))
result = self.tempest_conf.conf.items("validation")
for item in expected:
self.assertIn(item, result)
@mock.patch("rally.verification.tempest.config._write_config")
@mock.patch("inspect.getmembers")
def test_generate(self, mock_inspect_getmembers, mock__write_config):
configure_something_method = mock.MagicMock()
mock_inspect_getmembers.return_value = [("_configure_something",
configure_something_method)]
fake_extra_conf = mock.MagicMock()
fake_extra_conf.sections.return_value = ["section"]
fake_extra_conf.items.return_value = [("option", "value")]
self.tempest_conf.generate("/path/to/fake/conf", fake_extra_conf)
self.assertEqual(configure_something_method.call_count, 1)
self.assertIn(("option", "value"),
self.tempest_conf.conf.items("section"))
self.assertEqual(mock__write_config.call_count, 1)
@mock.patch("six.moves.builtins.open", side_effect=mock.mock_open())
def test__write_config(self, mock_open):
conf_path = "/path/to/fake/conf"
conf_data = mock.Mock()
config._write_config(conf_path, conf_data)
mock_open.assert_called_once_with(conf_path, "w+")
conf_data.write.assert_called_once_with(mock_open.side_effect())
class TempestResourcesContextTestCase(test.TestCase):
def setUp(self):
super(TempestResourcesContextTestCase, self).setUp()
mock.patch("rally.common.objects.deploy.db.deployment_get",
return_value=CREDS).start()
mock.patch("rally.osclients.Clients").start()
fake_verification = {"uuid": "uuid"}
self.context = config.TempestResourcesContext("fake_deployment",
fake_verification,
"/fake/path/to/config")
self.context.conf.add_section("compute")
self.context.conf.add_section("orchestration")
@mock.patch("rally.plugins.openstack.wrappers."
"network.NeutronWrapper.create_network")
@mock.patch("six.moves.builtins.open", side_effect=mock.mock_open())
def test_options_configured_manually(
self, mock_open, mock_neutron_wrapper_create_network):
self.context.available_services = ["glance", "heat", "nova", "neutron"]
self.context.conf.set("compute", "image_ref", "id1")
self.context.conf.set("compute", "image_ref_alt", "id2")
self.context.conf.set("compute", "flavor_ref", "id3")
self.context.conf.set("compute", "flavor_ref_alt", "id4")
self.context.conf.set("compute", "fixed_network_name", "name1")
self.context.conf.set("orchestration", "instance_type", "id5")
self.context.__enter__()
glanceclient = self.context.clients.glance()
novaclient = self.context.clients.nova()
self.assertEqual(glanceclient.images.create.call_count, 0)
self.assertEqual(novaclient.flavors.create.call_count, 0)
self.assertEqual(mock_neutron_wrapper_create_network.call_count, 0)
def test__create_tempest_roles(self):
role1 = CONF.tempest.swift_operator_role
role2 = CONF.tempest.swift_reseller_admin_role
role3 = CONF.tempest.heat_stack_owner_role
role4 = CONF.tempest.heat_stack_user_role
client = self.context.clients.verified_keystone()
client.roles.list.return_value = [fakes.FakeRole(name=role1),
fakes.FakeRole(name=role2)]
client.roles.create.side_effect = [fakes.FakeFlavor(name=role3),
fakes.FakeFlavor(name=role4)]
self.context._create_tempest_roles()
self.assertEqual(client.roles.create.call_count, 2)
created_roles = [role.name for role in self.context._created_roles]
self.assertIn(role3, created_roles)
self.assertIn(role4, created_roles)
# We can choose any option to test the '_configure_option' method. So let's
# configure the 'flavor_ref' option.
def test__configure_option(self):
create_method = mock.MagicMock()
create_method.side_effect = [fakes.FakeFlavor(id="id1")]
self.context.conf.set("compute", "flavor_ref", "")
self.context._configure_option("compute",
"flavor_ref", create_method, 64)
self.assertEqual(create_method.call_count, 1)
result = self.context.conf.get("compute", "flavor_ref")
self.assertEqual("id1", result)
@mock.patch("rally.plugins.openstack.wrappers.glance.wrap")
def test__discover_or_create_image_when_image_exists(self, mock_wrap):
client = mock_wrap.return_value
client.list_images.return_value = [fakes.FakeImage(name="CirrOS")]
image = self.context._discover_or_create_image()
self.assertEqual("CirrOS", image.name)
self.assertEqual(0, client.create_image.call_count)
self.assertEqual(0, len(self.context._created_images))
@mock.patch("rally.plugins.openstack.wrappers.glance.wrap")
def test__discover_or_create_image(self, mock_wrap):
client = mock_wrap.return_value
image = self.context._discover_or_create_image()
self.assertEqual(image, client.create_image.return_value)
self.assertEqual(self.context._created_images[0],
client.create_image.return_value)
mock_wrap.assert_called_once_with(self.context.clients.glance,
self.context)
client.create_image.assert_called_once_with(
container_format=CONF.tempest.img_container_format,
image_location=mock.ANY,
disk_format=CONF.tempest.img_disk_format,
name=mock.ANY,
visibility="public")
def test__discover_or_create_flavor_when_flavor_exists(self):
client = self.context.clients.nova()
client.flavors.list.return_value = [fakes.FakeFlavor(id="id1", ram=64,
vcpus=1, disk=0)]
flavor = self.context._discover_or_create_flavor(64)
self.assertEqual("id1", flavor.id)
self.assertEqual(0, len(self.context._created_flavors))
def test__discover_or_create_flavor(self):
client = self.context.clients.nova()
client.flavors.create.side_effect = [fakes.FakeFlavor(id="id1")]
flavor = self.context._discover_or_create_flavor(64)
self.assertEqual("id1", flavor.id)
self.assertEqual("id1", self.context._created_flavors[0].id)
def test__create_network_resources(self):
client = self.context.clients.neutron()
fake_network = {
"id": "nid1",
"name": "network",
"status": "status"}
client.create_network.side_effect = [{"network": fake_network}]
client.create_router.side_effect = [{"router": {"id": "rid1"}}]
client.create_subnet.side_effect = [{"subnet": {"id": "subid1"}}]
network = self.context._create_network_resources()
self.assertEqual("nid1", network["id"])
self.assertEqual("nid1", self.context._created_networks[0]["id"])
self.assertEqual("rid1",
self.context._created_networks[0]["router_id"])
self.assertEqual("subid1",
self.context._created_networks[0]["subnets"][0])
def test__cleanup_tempest_roles(self):
self.context._created_roles = [fakes.FakeRole(), fakes.FakeRole()]
self.context._cleanup_tempest_roles()
client = self.context.clients.keystone()
self.assertEqual(client.roles.delete.call_count, 2)
@mock.patch("rally.plugins.openstack.wrappers.glance.wrap")
def test__cleanup_images(self, mock_wrap):
self.context._created_images = [fakes.FakeImage(id="id1"),
fakes.FakeImage(id="id2")]
self.context.conf.set("compute", "image_ref", "id1")
self.context.conf.set("compute", "image_ref_alt", "id2")
wrapper = mock_wrap.return_value
wrapper.get_image.side_effect = [
fakes.FakeImage(id="id1", status="DELETED"),
fakes.FakeImage(id="id2"),
fakes.FakeImage(id="id2", status="DELETED")]
self.context._cleanup_images()
client = self.context.clients.glance()
client.images.delete.assert_has_calls([mock.call("id1"),
mock.call("id2")])
self.assertEqual("", self.context.conf.get("compute", "image_ref"))
self.assertEqual("", self.context.conf.get("compute", "image_ref_alt"))
def test__cleanup_flavors(self):
self.context._created_flavors = [fakes.FakeFlavor(id="id1"),
fakes.FakeFlavor(id="id2"),
fakes.FakeFlavor(id="id3")]
self.context.conf.set("compute", "flavor_ref", "id1")
self.context.conf.set("compute", "flavor_ref_alt", "id2")
self.context.conf.set("orchestration", "instance_type", "id3")
self.context._cleanup_flavors()
client = self.context.clients.nova()
self.assertEqual(client.flavors.delete.call_count, 3)
self.assertEqual("", self.context.conf.get("compute", "flavor_ref"))
self.assertEqual("", self.context.conf.get("compute",
"flavor_ref_alt"))
self.assertEqual("", self.context.conf.get("orchestration",
"instance_type"))
@mock.patch("rally.plugins.openstack.wrappers."
"network.NeutronWrapper.delete_network")
def test__cleanup_network_resources(
self, mock_neutron_wrapper_delete_network):
self.context._created_networks = [{"name": "net-12345"}]
self.context.conf.set("compute", "fixed_network_name", "net-12345")
self.context._cleanup_network_resources()
self.assertEqual(mock_neutron_wrapper_delete_network.call_count, 1)
self.assertEqual("", self.context.conf.get("compute",
"fixed_network_name"))
|
{
"content_hash": "b7044c7a8f3d37a669180f958a2db6d4",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 79,
"avg_line_length": 42.21935483870968,
"alnum_prop": 0.5960676446617766,
"repo_name": "varuntiwari27/rally",
"id": "2873cfb9f427bae5a6ac285cdf4f75476a83e1a9",
"size": "20262",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/verification/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "452687"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "6231"
},
{
"name": "HTML",
"bytes": "51546"
},
{
"name": "JavaScript",
"bytes": "14187"
},
{
"name": "Makefile",
"bytes": "68380"
},
{
"name": "Mako",
"bytes": "17949"
},
{
"name": "Python",
"bytes": "8478187"
},
{
"name": "Shell",
"bytes": "61579"
}
],
"symlink_target": ""
}
|
from django import forms
from django.forms.util import ErrorDict
FORM_MODEL_TRANSLATION = {
'billing__first_name': 'bill_first_name',
'billing__last_name': 'bill_last_name',
'billing__street_address': 'bill_street1',
'billing__extended_address': 'bill_street2',
'billing__locality': 'bill_city',
'billing__region': 'bill_state',
'billing__postal_code': 'bill_postal_code',
'billing__country_code_alpha2': 'bill_country',
'customer__phone': 'bill_phone',
'shipping__first_name': 'ship_first_name',
'shipping__last_name': 'ship_last_name',
'shipping__street_address': 'ship_street1',
'shipping__extended_address': 'ship_street2',
'shipping__locality': 'ship_city',
'shipping__region': 'ship_state',
'shipping__postal_code': 'ship_postal_code',
'shipping__country_code_alpha2': 'ship_country'
}
class PaymentForm(forms.Form):
billing__first_name = forms.CharField(max_length=255)
billing__last_name = forms.CharField(max_length=255)
billing__street_address = forms.CharField(max_length=80)
billing__extended_address = forms.CharField(max_length=80, required=False)
billing__locality = forms.CharField(max_length=50)
billing__region = forms.CharField(max_length=50)
billing__postal_code = forms.CharField(max_length=30)
billing__country_code_alpha2 = forms.CharField(max_length=2)
customer__phone = forms.CharField(max_length=30)
shipping__first_name = forms.CharField(max_length=255)
shipping__last_name = forms.CharField(max_length=255)
shipping__street_address = forms.CharField(max_length=80)
shipping__extended_address = forms.CharField(max_length=80, required=False)
shipping__locality = forms.CharField(max_length=50)
shipping__region = forms.CharField(max_length=50)
shipping__postal_code = forms.CharField(max_length=30)
shipping__country_code_alpha2 = forms.CharField(max_length=2)
def __getitem__(self, name):
try:
field = self.fields[name]
except KeyError:
raise KeyError("Key %r not found in Form" % name)
return forms.forms.BoundField(self, field, name)
def set_transaction(self, data):
self._submit_url = data.pop('submit_url')
for k, v in data.iteritems():
if self.is_bound:
self.data[k] = v
else:
self.fields[k].initial = v
def set_result(self, result):
self.is_bound = True
if result.errors:
self._errors = result.errors
else:
self._errors = ErrorDict()
if not result.success:
self._errors[forms.forms.NON_FIELD_ERRORS] = self.error_class([result.errors])
@property
def action(self):
"""
Action to post the form to.
"""
return self._submit_url
|
{
"content_hash": "67d810aeef2ad0d5cf52d1a3e2407cac",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 94,
"avg_line_length": 39.02739726027397,
"alnum_prop": 0.6433836433836434,
"repo_name": "hiidef/hiicart",
"id": "bb3b8130dfcd94b9c947bd93f4db45cd3e39fdfa",
"size": "2849",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hiicart/gateway/cash_on_delivery/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3735"
},
{
"name": "Python",
"bytes": "617340"
}
],
"symlink_target": ""
}
|
import ujson
from gnocchiclient.v1 import base
class ArchivePolicyManager(base.Manager):
url = "v1/archive_policy/"
def list(self):
"""List archive policies
"""
return self._get(self.url).json()
def get(self, name):
"""Get an archive policy
:param name: Name of the archive policy
:type name: str
"""
return self._get(self.url + name).json()
def create(self, archive_policy):
"""Create an archive policy
:param archive_policy: the archive policy
:type archive_policy: dict
"""
return self._post(
self.url, headers={'Content-Type': "application/json"},
data=ujson.dumps(archive_policy)).json()
def update(self, name, archive_policy):
"""Update an archive policy
:param name: the name of archive policy
:type name: str
:param archive_policy: the archive policy
:type archive_policy: dict
"""
return self._patch(
self.url + '/' + name,
headers={'Content-Type': "application/json"},
data=ujson.dumps(archive_policy)).json()
def delete(self, name):
"""Delete an archive policy
:param name: Name of the archive policy
:type name: str
"""
self._delete(self.url + name)
|
{
"content_hash": "4abc5d8fe137a0686f412cda9d6ae704",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 67,
"avg_line_length": 24.763636363636362,
"alnum_prop": 0.5697503671071953,
"repo_name": "sileht/python-gnocchiclient",
"id": "2f87f391935d0f76ca79cae6ae510e46492afef3",
"size": "1937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gnocchiclient/v1/archive_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "195737"
}
],
"symlink_target": ""
}
|
import os
import re
import librosa
import subprocess
from datetime import datetime
from decimal import Decimal
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
from ..acoustics import analyze_pitch, analyze_formant_tracks, analyze_formant_points, analyze_intensity, \
analyze_script, analyze_track_script, analyze_utterance_pitch, update_utterance_pitch_track, analyze_vot
from ..acoustics.formants.helper import save_formant_point_data
from ..acoustics.classes import Track, TimePoint
from .syllabic import SyllabicContext
from ..acoustics.utils import load_waveform, generate_spectrogram
def sanitize_value(value, type):
"""
Ensure a given value is of the correct type, if the value is in a list or tuple, the first element will be coerced
Parameters
----------
value : object
Value to be coerced
type : Type
One of ``int``, ``float``, ``str``, ``bool``
Returns
-------
object
Value coerced to specified type
"""
if not isinstance(value, type):
if isinstance(value, (list, tuple)):
value = value[0]
try:
value = type(value)
except (ValueError, TypeError):
value = None
return value
def generate_filter_string(discourse, begin, end, channel, num_points, kwargs):
"""
Constructs a filter string in InfluxDB query language (i.e., WHERE clause) based on relevant information from
the Neo4j database
Parameters
----------
discourse : str
Name of the audio file
begin : float
Beginning of the track in seconds
end : float
End of the track in seconds
channel : int
Which channel of the audio file
num_points : int
Number of points in the track to return, if 0 will return all raw measurements
kwargs : dict
Any extra filters
Returns
-------
str
InfluxDB query language WHERE clause to specify a track
"""
extra_filters = ['''"{}" = '{}' '''.format(k, v) for k, v in kwargs.items()]
filter_string = '''WHERE "discourse" = '{}'
AND "time" >= {}
AND "time" <= {}
AND "channel" = '{}'
'''
if extra_filters:
filter_string += '\nAND {}'.format('\nAND '.join(extra_filters))
if num_points:
duration = end - begin
time_step = duration / (num_points - 1)
begin -= time_step / 2
end += time_step / 2
time_step *= 1000
filter_string += '\ngroup by time({}ms) fill(null)'.format(int(time_step))
discourse = discourse.replace("'", r"\'")
filter_string = filter_string.format(discourse, s_to_nano(begin), s_to_nano(end), channel)
return filter_string
def s_to_nano(seconds):
"""
Converts seconds (as a float or Decimal) to nanoseconds (as an int)
Parameters
----------
seconds : float or Decimal
Seconds
Returns
-------
int
Nanoseconds
"""
if not isinstance(seconds, Decimal):
seconds = Decimal(seconds).quantize(Decimal('0.001'))
return int(seconds * Decimal('1e9'))
def s_to_ms(seconds):
"""
Converts seconds (as a float or Decimal) to milliseconds (as an int)
Parameters
----------
seconds : float or Decimal
Seconds
Returns
-------
int
Milliseconds
"""
if not isinstance(seconds, Decimal):
seconds = Decimal(seconds).quantize(Decimal('0.001'))
return int(seconds * Decimal('1e3'))
def to_seconds(time_string):
"""
Converts a time string from InfluxDB into number of seconds to generate a time point in an audio file
Parameters
----------
time_string : str
Formatted time string (either ``%Y-%m-%dT%H:%M:%S.%fZ`` or ``%Y-%m-%dT%H:%M:%SZ``
Returns
-------
Decimal
Time stamp quantized to the nearest millisecond
"""
"""Converts a string representing a date and time to a
decimal representing number of seconds into the day"""
try:
d = datetime.strptime(time_string, '%Y-%m-%dT%H:%M:%S.%fZ')
s = 60 * 60 * d.hour + 60 * d.minute + d.second + d.microsecond / 1e6
except:
try:
d = datetime.strptime(time_string, '%Y-%m-%dT%H:%M:%SZ')
s = 60 * 60 * d.hour + 60 * d.minute + d.second + d.microsecond / 1e6
except:
m = re.search(r'T(\d{2}):(\d{2}):(\d+)\.(\d+)?', time_string)
p = m.groups()
s = 60 * 60 * int(p[0]) + 60 * int(p[1]) + int(p[2]) + int(p[3][:6]) / 1e6
s = Decimal(s).quantize(Decimal('0.001'))
return s
class AudioContext(SyllabicContext):
"""
Class that contains methods for dealing with audio files for corpora
"""
def load_audio(self, discourse, file_type):
"""
Loads a given audio file at the specified sampling rate type (``consonant``, ``vowel`` or ``low_freq``).
Consonant files have a sampling rate of 16 kHz, vowel files a sampling rate of 11 kHz, and low frequency files
a sampling rate of 1.2 kHz.
Parameters
----------
discourse : str
Name of the audio file to load
file_type : str
One of ``consonant``, ``vowel`` or ``low_freq``
Returns
-------
numpy.array
Audio signal
int
Sampling rate of the file
"""
sound_file = self.discourse_sound_file(discourse)
if file_type == 'consonant':
path = os.path.expanduser(sound_file.consonant_file_path)
elif file_type == 'vowel':
path = os.path.expanduser(sound_file.vowel_file_path)
elif file_type == 'low_freq':
path = os.path.expanduser(sound_file.low_freq_file_path)
else:
path = os.path.expanduser(sound_file.file_path)
signal, sr = librosa.load(path, sr=None)
return signal, sr
def load_waveform(self, discourse, file_type='consonant', begin=None, end=None):
"""
Loads a segment of a larger audio file. If ``begin`` is unspecified, the segment will start at the beginning of
the audio file, and if ``end`` is unspecified, the segment will end at the end of the audio file.
Parameters
----------
discourse : str
Name of the audio file to load
file_type : str
One of ``consonant``, ``vowel`` or ``low_freq``
begin : float, optional
Timestamp in seconds
end : float, optional
Timestamp in seconds
Returns
-------
numpy.array
Audio signal
int
Sampling rate of the file
"""
sf = self.discourse_sound_file(discourse)
if file_type == 'consonant':
file_path = sf['consonant_file_path']
elif file_type == 'vowel':
file_path = sf['vowel_file_path']
elif file_type == 'low_freq':
file_path = sf['low_freq_file_path']
else:
file_path = sf['file_path']
return load_waveform(file_path, begin, end)
def generate_spectrogram(self, discourse, file_type='consonant', begin=None, end=None):
"""
Generate a spectrogram from an audio file. If ``begin`` is unspecified, the segment will start at the beginning of
the audio file, and if ``end`` is unspecified, the segment will end at the end of the audio file.
Parameters
----------
discourse : str
Name of the audio file to load
file_type : str
One of ``consonant``, ``vowel`` or ``low_freq``
begin : float
Timestamp in seconds
end : float
Timestamp in seconds
Returns
-------
numpy.array
Spectrogram information
float
Time step between each window
float
Frequency step between each frequency bin
"""
signal, sr = self.load_waveform(discourse, file_type, begin, end)
return generate_spectrogram(signal, sr)
def analyze_pitch(self, source='praat', algorithm='base',
absolute_min_pitch=50, absolute_max_pitch=500, adjusted_octaves=1,
stop_check=None, call_back=None, multiprocessing=True):
"""
Analyze pitch tracks and save them to the database.
See :meth:`polyglotdb.acoustics.pitch.base.analyze_pitch` for more details.
Parameters
----------
source : str
Program to use for analyzing pitch, either ``praat`` or ``reaper``
algorithm : str
Algorithm to use, ``base``, ``gendered``, or ``speaker_adjusted``
absolute_min_pitch : int
Absolute pitch floor
absolute_max_pitch : int
Absolute pitch ceiling
adjusted_octaves : int
How many octaves around the speaker's mean pitch to set the speaker adjusted pitch floor and ceiling
stop_check : callable
Function to check whether processing should stop early
call_back : callable
Function to report progress
multiprocessing : bool
Flag whether to use multiprocessing or threading
"""
analyze_pitch(self, source, algorithm, stop_check=stop_check, call_back=call_back, multiprocessing=multiprocessing,
absolute_min_pitch=absolute_min_pitch, absolute_max_pitch=absolute_max_pitch, adjusted_octaves=adjusted_octaves)
def analyze_utterance_pitch(self, utterance, source='praat', **kwargs):
"""
Analyze a single utterance's pitch track.
See :meth:`polyglotdb.acoustics.pitch.base.analyze_utterance_pitch` for more details.
Parameters
----------
utterance : str
Utterance ID from Neo4j
source : str
Program to use for analyzing pitch, either ``praat`` or ``reaper``
kwargs
Additional settings to use in analyzing pitch
Returns
-------
:class:`~polyglotdb.acoustics.classes.Track`
Pitch track
"""
return analyze_utterance_pitch(self, utterance, source, **kwargs)
def update_utterance_pitch_track(self, utterance, new_track):
"""
Save a pitch track for the specified utterance.
See :meth:`polyglotdb.acoustics.pitch.base.update_utterance_pitch_track` for more details.
Parameters
----------
utterance : str
Utterance ID from Neo4j
new_track : list or :class:`~polyglotdb.acoustics.classes.Track`
Pitch track
Returns
-------
int
Time stamp of update
"""
return update_utterance_pitch_track(self, utterance, new_track)
def analyze_vot(self, classifier,
stop_label="stops",
stop_check=None,
call_back=None,
multiprocessing=False,
overwrite_edited=False,
vot_min=5,
vot_max=100,
window_min=-30,
window_max=30):
"""
Compute VOTs for stops and save them to the database.
See :meth:`polyglotdb.acoustics.vot.base.analyze_vot` for more details.
Parameters
----------
classifier : str
Path to an AutoVOT classifier model
stop_label : str
Label of subset to analyze
vot_min : int
Minimum VOT in ms
vot_max : int
Maximum VOT in ms
window_min : int
Window minimum in ms
window_max : int
Window maximum in Ms
overwrite_edited : bool
Overwrite VOTs with the "edited" property set to true, if this is true
call_back : callable
call back function, optional
stop_check : callable
stop check function, optional
multiprocessing : bool
Flag to use multiprocessing, otherwise will use threading
"""
analyze_vot(self, classifier, stop_label=stop_label, stop_check=stop_check,
call_back=call_back, multiprocessing=multiprocessing,
overwrite_edited=overwrite_edited,
vot_min=vot_min, vot_max=vot_max, window_min=window_min,
window_max=window_max)
def analyze_formant_points(self, stop_check=None, call_back=None, multiprocessing=True,
vowel_label=None):
"""
Compute formant tracks and save them to the database
See :meth:`polyglotdb.acoustics.formants.base.analyze_formant_points` for more details.
Parameters
----------
stop_check : callable
Function to check whether to terminate early
call_back : callable
Function to report progress
multiprocessing : bool
Flag to use multiprocessing, defaults to True, if False uses threading
vowel_label : str, optional
Optional subset of phones to compute tracks over. If None, then tracks over utterances are computed.
"""
data = analyze_formant_points(self, stop_check=stop_check, call_back=call_back,
multiprocessing=multiprocessing, vowel_label=vowel_label)
save_formant_point_data(self, data)
def analyze_formant_tracks(self, source='praat', stop_check=None, call_back=None, multiprocessing=True,
vowel_label=None):
"""
Compute formant tracks and save them to the database
See :meth:`polyglotdb.acoustics.formants.base.analyze_formant_tracks` for more details.
Parameters
----------
source : str
Program to compute formants
stop_check : callable
Function to check whether to terminate early
call_back : callable
Function to report progress
multiprocessing : bool
Flag to use multiprocessing, defaults to True, if False uses threading
vowel_label : str, optional
Optional subset of phones to compute tracks over. If None, then tracks over utterances are computed.
"""
analyze_formant_tracks(self, source=source, stop_check=stop_check, call_back=call_back,
multiprocessing=multiprocessing, vowel_label=vowel_label)
def analyze_intensity(self, source='praat', stop_check=None, call_back=None, multiprocessing=True):
"""
Compute intensity tracks and save them to the database
See :meth:`polyglotdb.acoustics.intensity..analyze_intensity` for more details.
Parameters
----------
source : str
Program to compute intensity (only ``praat`` is supported)
stop_check : callable
Function to check whether to terminate early
call_back : callable
Function to report progress
multiprocessing : bool
Flag to use multiprocessing, defaults to True, if False uses threading
"""
analyze_intensity(self, source, stop_check, call_back, multiprocessing=multiprocessing)
def analyze_script(self, phone_class=None, subset=None, annotation_type=None, script_path=None, duration_threshold=0.01, arguments=None, stop_check=None,
call_back=None, multiprocessing=True, file_type='consonant'):
"""
Use a Praat script to analyze annotation types in the corpus. The Praat script must return properties per phone (i.e.,
point measures, not a track), and these properties will be saved to the Neo4j database.
See :meth:`polyglotdb.acoustics.other..analyze_script` for more details.
Parameters
----------
phone_class : str
DEPRECATED, the name of an already encoded subset of phones on which the analysis will be run
subset : str, optional
the name of an already encoded subset of an annotation type, on which the analysis will be run
annotation_type : str
the type of annotation that the analysis will go over
script_path : str
Path to the Praat script
duration_threshold : float
Minimum duration that phones should be to be analyzed
arguments : list
Arguments to pass to the Praat script
stop_check : callable
Function to check whether to terminate early
call_back : callable
Function to report progress
multiprocessing : bool
Flag to use multiprocessing, defaults to True, if False uses threading
file_type : str
Sampling rate type to use, one of ``consonant``, ``vowel``, or ``low_freq``
Returns
-------
list
List of the names of newly added properties to the Neo4j database
"""
return analyze_script(self, subset=subset, annotation_type=annotation_type, phone_class=phone_class, script_path=script_path, duration_threshold=duration_threshold,
arguments=arguments,
stop_check=stop_check, call_back=call_back, multiprocessing=multiprocessing)
def analyze_track_script(self, acoustic_name, properties, script_path, duration_threshold=0.01,phone_class=None,
arguments=None, stop_check=None, call_back=None, multiprocessing=True, file_type='consonant'):
"""
Use a Praat script to analyze phones in the corpus. The Praat script must return a track, and these tracks will
be saved to the InfluxDB database.
See :meth:`polyglotdb.acoustics.other..analyze_track_script` for more details.
Parameters
----------
acoustic_name : str
Name of the acoustic measure
properties : list
List of tuples of the form (``property_name``, ``Type``)
script_path : str
Path to the Praat script
duration_threshold : float
Minimum duration that phones should be to be analyzed
phone_class : str
Name of the phone subset to analyze
arguments : list
Arguments to pass to the Praat script
stop_check : callable
Function to check whether to terminate early
call_back : callable
Function to report progress
multiprocessing : bool
Flag to use multiprocessing, defaults to True, if False uses threading
file_type : str
Sampling rate type to use, one of ``consonant``, ``vowel``, or ``low_freq``
"""
return analyze_track_script(self, acoustic_name, properties, script_path, duration_threshold=duration_threshold,
arguments=arguments, phone_class=phone_class,
stop_check=stop_check, call_back=call_back, multiprocessing=multiprocessing, file_type=file_type)
def reset_formant_points(self):
"""
Reset formant point measures encoded in the corpus
"""
encoded_props = []
for prop in ['F1', 'F2', 'F3', 'B1', 'B2', 'B3', 'A1', 'A2', 'A3']:
if self.hierarchy.has_token_property('phone', prop):
encoded_props.append(prop)
q = self.query_graph(getattr(self, self.phone_name)).set_properties(**{x: None for x in encoded_props})
def genders(self):
"""
Gets all values of speaker property named ``gender`` in the Neo4j database
Returns
-------
list
List of gender values
"""
res = self.execute_cypher(
'''MATCH (s:Speaker:{corpus_name}) RETURN s.gender as gender'''.format(corpus_name=self.cypher_safe_name))
genders = set()
for s in res:
g = s['gender']
if g is None:
g = ''
genders.add(g)
return sorted(genders)
def reset_acoustics(self):
"""
Reset all acoustic measures currently encoded
"""
self.acoustic_client().drop_database(self.corpus_name)
if self.hierarchy.acoustics:
self.hierarchy.acoustic_properties = {}
self.encode_hierarchy()
def reset_acoustic_measure(self, acoustic_type):
"""
Reset a given acoustic measure
Parameters
----------
acoustic_type : str
Name of the acoustic measurement to reset
"""
self.acoustic_client().query('''DROP MEASUREMENT "{}";'''.format(acoustic_type))
if acoustic_type in self.hierarchy.acoustics:
self.hierarchy.acoustic_properties = {k: v for k, v in self.hierarchy.acoustic_properties.items() if
k != acoustic_type}
self.encode_hierarchy()
def reset_vot(self):
"""
Reset all VOT measurements in the corpus
"""
self.execute_cypher('''MATCH (v:vot:{corpus_name}) DETACH DELETE v'''.format(corpus_name=self.cypher_safe_name))
if 'phone' in self.hierarchy.subannotations:
if 'vot' in self.hierarchy.subannotations["phone"]:
self.hierarchy.subannotation_properties.pop("vot")
self.hierarchy.subannotations["phone"].remove("vot")
self.encode_hierarchy()
def acoustic_client(self):
"""
Generate a client to connect to the InfluxDB for the corpus
Returns
-------
InfluxDBClient
Client through which to run queries and writes
"""
client = InfluxDBClient(**self.config.acoustic_connection_kwargs)
databases = client.get_list_database()
if self.corpus_name not in databases:
client.create_database(self.corpus_name)
return client
def discourse_audio_directory(self, discourse):
"""
Return the directory for the stored audio files for a discourse
"""
return os.path.join(self.config.audio_dir, discourse)
def discourse_sound_file(self, discourse):
"""
Get details for the audio file paths for a specified discourse.
Parameters
----------
discourse : str
Name of the audio file in the corpus
Returns
-------
dict
Information for the audio file path
"""
statement = '''MATCH (d:Discourse:{corpus_name}) WHERE d.name = $discourse_name return d'''.format(
corpus_name=self.cypher_safe_name)
results = self.execute_cypher(statement, discourse_name=discourse)
for r in results:
d = r['d']
break
else:
raise Exception('Could not find discourse {}'.format(discourse))
return d
def utterance_sound_file(self, utterance_id, file_type='consonant'):
"""
Generate an audio file just for a single utterance in an audio file.
Parameters
----------
utterance_id : str
Utterance ID from Neo4j
file_type : str
Sampling rate type to use, one of ``consonant``, ``vowel``, or ``low_freq``
Returns
-------
str
Path to the generated sound file
"""
q = self.query_graph(self.utterance).filter(self.utterance.id == utterance_id).columns(
self.utterance.begin.column_name('begin'),
self.utterance.end.column_name('end'),
self.utterance.discourse.name.column_name('discourse'))
utterance_info = q.all()[0]
path = os.path.join(self.discourse_audio_directory(utterance_info['discourse']),
'{}_{}.wav'.format(utterance_id, file_type))
if os.path.exists(path):
return path
fname = self.discourse_sound_file(utterance_info['discourse'])["{}_file_path".format(file_type)]
subprocess.call(['sox', fname, path, 'trim', str(utterance_info['begin']),
str(utterance_info['end'] - utterance_info['begin'])])
return path
def has_all_sound_files(self):
"""
Check whether all discourses have a sound file
Returns
-------
bool
True if a sound file exists for each discourse name in corpus,
False otherwise
"""
if self._has_all_sound_files is not None:
return self._has_all_sound_files
discourses = self.discourses
for d in discourses:
sf = self.discourse_sound_file(d)
if sf is None:
break
if not os.path.exists(sf.file_path):
break
else:
self._has_all_sound_files = True
self._has_all_sound_files = False
return self._has_all_sound_files
@property
def has_sound_files(self):
"""
Check whether any discourses have a sound file
Returns
-------
bool
True if there are any sound files at all, false if there aren't
"""
if self._has_sound_files is None:
self._has_sound_files = False
for d in self.discourses:
sf = self.discourse_sound_file(d)
if sf['file_path'] is not None:
self._has_sound_files = True
break
return self._has_sound_files
def execute_influxdb(self, query):
"""
Execute an InfluxDB query for the corpus
Parameters
----------
query : str
Query to run
Returns
-------
:class:`influxdb.resultset.ResultSet`
Results of the query
"""
client = self.acoustic_client()
try:
result = client.query(query)
except InfluxDBClientError:
print('There was an issue with the following query:')
print(query)
raise
return result
def get_utterance_acoustics(self, acoustic_name, utterance_id, discourse, speaker):
"""
Get acoustic for a given utterance and time range
Parameters
----------
acoustic_name : str
Name of acoustic track
utterance_id : str
ID of the utterance from the Neo4j database
discourse : str
Name of the discourse
speaker : str
Name of the speaker
Returns
-------
:class:`polyglotdb.acoustics.classes.Track`
Track object
"""
properties = [x[0] for x in self.hierarchy.acoustic_properties[acoustic_name]]
property_names = ["{}".format(x) for x in properties]
columns = '"time", {}'.format(', '.join(property_names))
speaker = speaker.replace("'", r"\'") # Escape apostrophes
discourse = discourse.replace("'", r"\'") # Escape apostrophes
query = '''select {} from "{}"
WHERE "utterance_id" = '{}'
AND "discourse" = '{}'
AND "speaker" = '{}';'''.format(columns, acoustic_name, utterance_id, discourse, speaker)
result = self.execute_influxdb(query)
track = Track()
for r in result.get_points(acoustic_name):
s = to_seconds(r['time'])
p = TimePoint(s)
for name in properties:
p.add_value(name, r[name])
track.add(p)
return track
def get_acoustic_measure(self, acoustic_name, discourse, begin, end, channel=0, relative_time=False, **kwargs):
"""
Get acoustic for a given discourse and time range
Parameters
----------
acoustic_name : str
Name of acoustic track
discourse : str
Name of the discourse
begin : float
Beginning of time range
end : float
End of time range
channel : int, defaults to 0
Channel of the audio file
relative_time : bool, defaults to False
Flag for retrieving relative time instead of absolute time
kwargs : kwargs
Tags to filter on
Returns
-------
:class:`polyglotdb.acoustics.classes.Track`
Track object
"""
begin = Decimal(begin).quantize(Decimal('0.001'))
end = Decimal(end).quantize(Decimal('0.001'))
num_points = kwargs.pop('num_points', 0)
filter_string = generate_filter_string(discourse, begin, end, channel, num_points, kwargs)
properties = [x[0] for x in self.hierarchy.acoustic_properties[acoustic_name]]
property_names = ["{}".format(x) for x in properties]
if num_points:
columns = ', '.join(['mean({})'.format(x) for x in property_names])
else:
columns = '"time", {}'.format(', '.join(property_names))
query = '''select {} from "{}"
{};'''.format(columns, acoustic_name, filter_string)
result = self.execute_influxdb(query)
track = Track()
for r in result.get_points(acoustic_name):
s = to_seconds(r['time'])
if relative_time:
s = (s - begin) / (end - begin)
p = TimePoint(s)
for name in properties:
p.add_value(name, r[name])
track.add(p)
return track
def _save_measurement_tracks(self, acoustic_name, tracks, speaker):
data = []
measures = self.hierarchy.acoustic_properties[acoustic_name]
for seg, track in tracks.items():
if not len(track.keys()):
continue
file_path, begin, end, channel, utterance_id = seg.file_path, seg.begin, seg.end, seg.channel, seg[
'utterance_id']
res = self.execute_cypher(
'MATCH (d:Discourse:{corpus_name}) where d.low_freq_file_path = $file_path OR '
'd.vowel_file_path = $file_path OR '
'd.consonant_file_path = $file_path '
'RETURN d.name as name'.format(
corpus_name=self.cypher_safe_name), file_path=file_path)
for r in res:
discourse = r['name']
phone_type = getattr(self, self.phone_name)
min_time = min(track.keys())
max_time = max(track.keys())
if seg['annotation_type'] == 'phone':
set_label = seg['label']
else:
set_label = None
q = self.query_graph(phone_type).filter(phone_type.discourse.name == discourse)
q = q.filter(phone_type.utterance.id == utterance_id)
q = q.filter(phone_type.end >= min_time).filter(phone_type.begin <= max_time)
q = q.columns(phone_type.label.column_name('label'),
phone_type.begin.column_name('begin'),
phone_type.end.column_name('end')).order_by(phone_type.begin)
phones = [(x['label'], x['begin'], x['end']) for x in q.all()]
for time_point, value in track.items():
fields = {}
for name, type in measures:
v = sanitize_value(value[name], type)
if v is not None:
fields[name] = v
elif type in [int, float]:
fields[name] = type(-1)
if not fields:
continue
if set_label is None:
label = None
for i, p in enumerate(phones):
if p[1] > time_point:
break
label = p[0]
if i == len(phones) - 1:
break
else:
label = None
else:
label = set_label
if label is None:
continue
t_dict = {'speaker': speaker, 'discourse': discourse, 'channel': channel}
fields['phone'] = label
fields['utterance_id'] = utterance_id
d = {'measurement': acoustic_name,
'tags': t_dict,
'time': s_to_ms(time_point),
'fields': fields
}
data.append(d)
self.acoustic_client().write_points(data, batch_size=1000, time_precision='ms')
def _save_measurement(self, sound_file, track, acoustic_name, **kwargs):
if not len(track.keys()):
return
if isinstance(sound_file, str):
sound_file = self.discourse_sound_file(sound_file)
if sound_file is None:
return
measures = self.hierarchy.acoustic_properties[acoustic_name]
if kwargs.get('channel', None) is None:
kwargs['channel'] = 0
data = []
tag_dict = {}
if isinstance(sound_file, str):
kwargs['discourse'] = sound_file
else:
kwargs['discourse'] = sound_file['name']
utterance_id = kwargs.pop('utterance_id', None)
tag_dict.update(kwargs)
phone_type = getattr(self, self.phone_name)
min_time = min(track.keys())
max_time = max(track.keys())
q = self.query_graph(phone_type).filter(phone_type.discourse.name == kwargs['discourse'])
q = q.filter(phone_type.end >= min_time).filter(phone_type.begin <= max_time)
q = q.columns(phone_type.label.column_name('label'),
phone_type.begin.column_name('begin'),
phone_type.end.column_name('end'),
phone_type.speaker.name.column_name('speaker')).order_by(phone_type.begin)
phones = [(x['label'], x['begin'], x['end'], x['speaker']) for x in q.all()]
for time_point, value in track.items():
fields = {}
for name, type in measures:
v = sanitize_value(value[name], type)
if v is not None:
fields[name] = v
if not fields:
continue
label = None
speaker = None
for i, p in enumerate(phones):
if p[1] > time_point:
break
label = p[0]
speaker = p[-1]
if i == len(phones) - 1:
break
else:
label = None
speaker = None
if speaker is None:
continue
t_dict = {'speaker': speaker}
t_dict.update(tag_dict)
if utterance_id is not None:
fields['utterance_id'] = utterance_id
fields['phone'] = label
d = {'measurement': acoustic_name,
'tags': t_dict,
'time': s_to_nano(time_point),
'fields': fields
}
data.append(d)
self.acoustic_client().write_points(data, batch_size=1000)
def save_acoustic_track(self, acoustic_name, discourse, track, **kwargs):
"""
Save an acoustic track for a sound file
Parameters
----------
acoustic_name : str
Name of the acoustic type
discourse : str
Name of the discourse
track : :class:`~polyglotdb.acoustics.classes.Track`
Track to save
kwargs: kwargs
Tags to save for acoustic measurements
"""
self._save_measurement(discourse, track, acoustic_name, **kwargs)
def save_acoustic_tracks(self, acoustic_name, tracks, speaker):
"""
Save multiple acoustic tracks for a collection of analyzed segments
Parameters
----------
acoustic_name : str
Name of the acoustic type
tracks : iterable
Iterable of :class:`~polyglotdb.acoustics.classes.Track` objects to save
speaker : str
Name of the speaker of the tracks
"""
self._save_measurement_tracks(acoustic_name, tracks, speaker)
def discourse_has_acoustics(self, acoustic_name, discourse):
"""
Return whether a discourse has any specific acoustic values associated with it
Parameters
----------
acoustic_name : str
Name of the acoustic type
discourse : str
Name of the discourse
Returns
-------
bool
"""
if acoustic_name not in self.hierarchy.acoustics:
return False
discourse = discourse.replace("'", r"\'")
query = '''select * from "{}" WHERE "discourse" = '{}' LIMIT 1;'''.format(acoustic_name, discourse)
result = self.execute_influxdb(query)
if len(result) == 0:
return False
return True
def encode_acoustic_statistic(self, acoustic_name, statistic, by_phone=True, by_speaker=False):
"""
Computes and saves as type properties summary statistics on a by speaker or by phone basis (or both) for a
given acoustic measure.
Parameters
----------
acoustic_name : str
Name of the acoustic type
statistic : str
One of `mean`, `median`, `stddev`, `sum`, `mode`, `count`
by_speaker : bool, defaults to True
Flag for calculating summary statistic by speaker
by_phone : bool, defaults to False
Flag for calculating summary statistic by phone
"""
if not by_speaker and not by_phone:
raise (Exception('Please specify either by_phone, by_speaker or both.'))
if acoustic_name not in self.hierarchy.acoustics:
raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics))))
available_statistics = ['mean', 'median', 'stddev', 'sum', 'mode', 'count']
if statistic not in available_statistics:
raise ValueError('Statistic name should be one of: {}.'.format(', '.join(available_statistics)))
acoustic_name = acoustic_name.lower()
template = statistic + '("{0}") as "{0}"'
statistic_template = 'n.{statistic}_{measure} = d.{measure}'
measures = {x[0]: template.format(x[0]) for x in self.hierarchy.acoustic_properties[acoustic_name] if
x[1] in [int, float]}
if by_speaker and by_phone:
results = []
for p in self.phones:
query = '''select {} from "{}"
where "phone" = '{}' group by "speaker";'''.format(
', '.join(measures), acoustic_name, p)
influx_result = self.execute_influxdb(query)
for k, v in influx_result.items():
result = {'speaker': k[1]['speaker'], 'phone': p}
for measure in measures.keys():
result[measure] = list(v)[0][measure]
results.append(result)
set_statements = []
for measure in measures.keys():
set_statements.append(statistic_template.format(statistic=statistic, measure=measure))
statement = '''WITH $data as data
UNWIND data as d
MATCH (s:Speaker:{corpus_name}), (p:phone_type:{corpus_name})
WHERE p.label = d.phone AND s.name = d.speaker
WITH p, s, d
MERGE (s)<-[n:spoken_by]-(p)
WITH n, d
SET {set_statements}'''.format(corpus_name=self.cypher_safe_name,
set_statements='\nAND '.join(set_statements))
elif by_phone:
results = []
for p in self.phones:
query = '''select {} from "{}"
where "phone" = '{}';'''.format(', '.join(measures.values()),
acoustic_name, p)
influx_result = self.execute_influxdb(query)
result = {'phone': p}
for k, v in influx_result.items():
for measure in measures.keys():
result[measure] = list(v)[0][measure]
results.append(result)
set_statements = []
for measure in measures.keys():
set_statements.append(statistic_template.format(statistic=statistic, measure=measure))
statement = '''WITH $data as data
UNWIND data as d
MATCH (n:phone_type:{corpus_name})
WHERE n.label = d.phone
SET {set_statements}'''.format(corpus_name=self.cypher_safe_name,
set_statements='\nAND '.join(set_statements))
self.hierarchy.add_type_properties(self, 'phone',
[('{}_{}'.format(statistic, x), float) for x in measures.keys()])
elif by_speaker:
query = '''select {} from "{}" group by "speaker";'''.format(', '.join(measures), acoustic_name)
influx_result = self.execute_influxdb(query)
results = []
for k, v in influx_result.items():
result = {'speaker': k[1]['speaker']}
for measure in measures.keys():
result[measure] = list(v)[0][measure]
results.append(result)
set_statements = []
for measure in measures.keys():
set_statements.append(statistic_template.format(statistic=statistic, measure=measure))
statement = '''WITH $data as data
UNWIND data as d
MATCH (n:Speaker:{corpus_name})
WHERE n.name = d.speaker
SET {set_statements}'''.format(corpus_name=self.cypher_safe_name,
set_statements='\nAND '.join(set_statements))
self.hierarchy.add_speaker_properties(self,
[('{}_{}'.format(statistic, x), float) for x in measures.keys()])
self.execute_cypher(statement, data=results)
self.encode_hierarchy()
def get_acoustic_statistic(self, acoustic_name, statistic, by_phone=True, by_speaker=False):
"""
Computes summary statistics on a by speaker or by phone basis (or both) for a given acoustic measure.
Parameters
----------
acoustic_name : str
Name of the acoustic type
statistic : str
One of `mean`, `median`, `stddev`, `sum`, `mode`, `count`
by_speaker : bool, defaults to True
Flag for calculating summary statistic by speaker
by_phone : bool, defaults to False
Flag for calculating summary statistic by phone
Returns
-------
dict
Dictionary where keys are phone/speaker/phone-speaker pairs and values are the summary statistic
of the acoustic measure
"""
if acoustic_name not in self.hierarchy.acoustics:
raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics))))
if not by_speaker and not by_phone:
raise (Exception('Please specify either by_phone, by_speaker or both.'))
available_statistics = ['mean', 'median', 'stddev', 'sum', 'mode', 'count']
if statistic not in available_statistics:
raise ValueError('Statistic name should be one of: {}.'.format(', '.join(available_statistics)))
prop_template = 'n.{0} as {0}'
measures = ['{}_{}'.format(statistic, x[0]) for x in self.hierarchy.acoustic_properties[acoustic_name] if
x[1] in [int, float]]
returns = [prop_template.format(x) for x in measures]
if by_phone and by_speaker:
statement = '''MATCH (p:phone_type:{corpus_name})-[n:spoken_by]->(s:Speaker:{corpus_name})
return {return_list} LIMIT 1'''.format(corpus_name=self.cypher_safe_name, return_list=', '.join(returns))
results = self.execute_cypher(statement)
try:
first = results[0]
except IndexError:
first = None
if first is None:
self.encode_acoustic_statistic(acoustic_name, statistic, by_phone, by_speaker)
statement = '''MATCH (p:phone_type:{corpus_name})-[n:spoken_by]->(s:Speaker:{corpus_name})
return p.label as phone, s.name as speaker, {return_list}'''.format(
corpus_name=self.cypher_safe_name, return_list=', '.join(returns))
results = self.execute_cypher(statement)
results = {(x['speaker'], x['phone']): [x[n] for n in measures] for x in results}
elif by_phone:
if not self.hierarchy.has_type_property('phone', measures[0]):
self.encode_acoustic_statistic(acoustic_name, statistic, by_phone, by_speaker)
statement = '''MATCH (n:phone_type:{corpus_name})
return n.label as phone, {return_list}'''.format(
corpus_name=self.cypher_safe_name, return_list=', '.join(returns))
results = self.execute_cypher(statement)
results = {x['phone']: [x[n] for n in measures] for x in results}
elif by_speaker:
if not self.hierarchy.has_speaker_property(measures[0]):
self.encode_acoustic_statistic(acoustic_name, statistic, by_phone, by_speaker)
statement = '''MATCH (n:Speaker:{corpus_name})
return n.name as speaker, {return_list}'''.format(
corpus_name=self.cypher_safe_name, return_list=', '.join(returns))
results = self.execute_cypher(statement)
results = {x['speaker']: [x[n] for n in measures] for x in results}
return results
def reset_relativized_acoustic_measure(self, acoustic_name):
"""
Reset any relativized measures that have been encoded for a specified type of acoustics
Parameters
----------
acoustic_name : str
Name of the acoustic type
"""
if acoustic_name not in self.hierarchy.acoustics:
raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics))))
measures = ', '.join(
['"{}"'.format(x[0]) for x in self.hierarchy.acoustic_properties[acoustic_name] if x[1] in [int, float]
and not x[0].endswith('relativized')])
to_remove = [x[0] for x in self.hierarchy.acoustic_properties[acoustic_name] if x[0].endswith('relativized')]
client = self.acoustic_client()
query = """SELECT "phone", {measures}, "utterance_id"
INTO "{name}_copy" FROM "{name}" GROUP BY *;""".format(name=acoustic_name, measures=measures)
client.query(query)
client.query('DROP MEASUREMENT "{}"'.format(acoustic_name))
client.query('SELECT * INTO "{0}" FROM "{0}_copy" GROUP BY *'.format(acoustic_name))
client.query('DROP MEASUREMENT "{}_copy"'.format(acoustic_name))
self.hierarchy.remove_acoustic_properties(self, acoustic_name, to_remove)
self.encode_hierarchy()
def relativize_acoustic_measure(self, acoustic_name, by_speaker=True, by_phone=False):
"""
Relativize acoustic tracks by taking the z-score of the points (using by speaker or by phone means and standard
deviations, or both by-speaker, by phone) and save them as separate measures, i.e., F0_relativized from F0.
Parameters
----------
acoustic_name : str
Name of the acoustic measure
by_speaker : bool, defaults to True
Flag for relativizing by speaker
by_phone : bool, defaults to False
Flag for relativizing by phone
"""
if acoustic_name not in self.hierarchy.acoustics:
raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics))))
if not by_speaker and not by_phone:
raise Exception('Relativization must be by phone, speaker, or both.')
client = self.acoustic_client()
phone_type = getattr(self, self.phone_name)
template = 'mean("{0}") as mean_{0}, stddev("{0}") as sd_{0}'
summary_data = {}
props = [x for x in self.hierarchy.acoustic_properties[acoustic_name] if
x[1] in [int, float] and not x[0].endswith('relativized')]
statistics = {x[0]: template.format(x[0]) for x in props}
aliases = {x[0]: ('mean_' + x[0], 'sd_' + x[0]) for x in props}
if by_phone:
for p in self.phones:
if by_speaker:
query = '''select {statistics} from "{acoustic_type}"
where "phone" = '{phone}' group by "speaker";'''.format(acoustic_type=acoustic_name,
statistics=', '.join(statistics.values()),
phone=p)
result = client.query(query)
for k, v in result.items():
v = list(v)
for measure, (mean_name, sd_name) in aliases.items():
summary_data[(k[1]['speaker'], p, measure)] = v[0][mean_name], v[0][sd_name]
else:
query = '''select {statistics} from "{acoustic_type}"
where "phone" = '{phone}';'''.format(acoustic_type=acoustic_name,
statistics=', '.join(statistics.values()), phone=p)
result = client.query(query)
for k, v in result.items():
v = list(v)
for measure, (mean_name, sd_name) in aliases.items():
summary_data[(p, measure)] = v[0][mean_name], v[0][sd_name]
else:
query = '''select {statistics} from "{acoustic_type}"
where "phone" != '' group by "speaker";'''.format(acoustic_type=acoustic_name,
statistics=', '.join(statistics.values()))
result = client.query(query)
for k, v in result.items():
v = list(v)
for measure, (mean_name, sd_name) in aliases.items():
summary_data[(k[1]['speaker'], measure)] = v[0][mean_name], v[0][sd_name]
for s in self.speakers:
s = s.replace("'", r"\'")
all_query = '''select * from "{acoustic_type}"
where "phone" != '' and "speaker" = '{speaker}';'''.format(acoustic_type=acoustic_name, speaker=s)
all_results = client.query(all_query)
data = []
for _, r in all_results.items():
for t_dict in r:
phone = t_dict.pop('phone')
utterance_id = t_dict.pop('utterance_id', '')
time_point = t_dict.pop('time')
fields = {}
for measure, (mean_name, sd_name) in aliases.items():
if by_speaker and by_phone:
mean_value, sd_value = summary_data[(t_dict['speaker'], phone, measure)]
elif by_phone and not by_speaker:
mean_value, sd_value = summary_data[(phone, measure)]
elif by_speaker:
mean_value, sd_value = summary_data[(t_dict['speaker'], measure)]
if sd_value is None:
continue
value = t_dict.pop(measure)
if value is None:
continue
new_value = t_dict.pop('{}_relativized'.format(measure), None)
new_value= (value - mean_value) / sd_value
fields['{}_relativized'.format(measure)] = new_value
if not fields:
continue
time_point = s_to_ms(to_seconds(time_point))
d = {'measurement': acoustic_name,
'tags': t_dict,
"time": time_point,
"fields": fields
}
data.append(d)
client.write_points(data, batch_size=1000, time_precision='ms')
self.hierarchy.add_acoustic_properties(self, acoustic_name, [(x[0] +'_relativized', float) for x in props])
self.encode_hierarchy()
def reassess_utterances(self, acoustic_name):
"""
Update utterance IDs in InfluxDB for more efficient querying if utterances have been re-encoded after acoustic
measures were encoded
Parameters
----------
acoustic_name : str
Name of the measure for which to update utterance IDs
"""
if acoustic_name not in self.hierarchy.acoustics:
raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics))))
client = self.acoustic_client()
q = self.query_discourses()
q = q.columns(self.discourse.name.column_name('name'),
self.discourse.speakers.name.column_name('speakers'))
discourses = q.all()
props = [x[0] for x in self.hierarchy.acoustic_properties[acoustic_name]]
for d in discourses:
discourse_name = d['name']
data = []
for s in d['speakers']:
q = self.query_graph(self.utterance)
q = q.filter(self.utterance.discourse.name == discourse_name, self.utterance.speaker.name == s)
q = q.order_by(self.utterance.begin)
q = q.columns(self.utterance.id.column_name('utterance_id'),
self.utterance.begin.column_name('begin'),
self.utterance.end.column_name('end'))
utterances = q.all()
s = s.replace("'", r"\'")
discourse_name = discourse_name.replace("'", r"\'")
all_query = '''select * from "{}"
where "phone" != '' and
"discourse" = '{}' and
"speaker" = '{}';'''.format(acoustic_name, discourse_name, s)
all_results = client.query(all_query)
cur_index = 0
for _, r in all_results.items():
for t_dict in r:
phone = t_dict.pop('phone')
utterance_id = t_dict.pop('utterance_id', '')
for m in props:
value = t_dict.pop(m, None)
time_point = to_seconds(t_dict.pop('time'))
for i in range(cur_index, len(utterances)):
if utterances[i]['begin'] <= time_point <= utterances[i]['end']:
cur_index = i
break
time_point = s_to_ms(time_point)
d = {'measurement': acoustic_name,
'tags': t_dict,
"time": time_point,
"fields": {'utterance_id': utterances[cur_index]['utterance_id']}
}
data.append(d)
client.write_points(data, batch_size=1000, time_precision='ms')
|
{
"content_hash": "db3ef1392dcc84b089bb64ac9edb39c6",
"timestamp": "",
"source": "github",
"line_count": 1354,
"max_line_length": 172,
"avg_line_length": 41.540620384047266,
"alnum_prop": 0.5434697578494471,
"repo_name": "PhonologicalCorpusTools/PyAnnotationGraph",
"id": "e08a4258b122cd0ed56ee391a85a531ba399206a",
"size": "56246",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "polyglotdb/corpus/audio.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "308936"
},
{
"name": "Shell",
"bytes": "1157"
}
],
"symlink_target": ""
}
|
from __future__ import print_function # Python 2
import os
import time
import hashlib
import random
import socket
import logging
try:
import Queue as queue # noqa: N813
#except ModuleNotFoundError: # Python 3
except Exception:
import queue # Python 3
from json import dumps #, loads
from re import findall
from glob import glob
from pilot.common.errorcodes import ErrorCodes
from pilot.common.exception import ExcThread, PilotException #, JobAlreadyRunning
from pilot.info import infosys, JobData, InfoService, JobInfoProvider
from pilot.util import https
from pilot.util.auxiliary import get_batchsystem_jobid, get_job_scheduler_id, get_pilot_id, \
set_pilot_state, get_pilot_state, check_for_final_server_update, pilot_version_banner, is_virtual_machine, \
is_python3, show_memory_usage, has_instruction_sets, locate_core_file, get_display_info
from pilot.util.config import config
from pilot.util.common import should_abort, was_pilot_killed
from pilot.util.constants import PILOT_MULTIJOB_START_TIME, PILOT_PRE_GETJOB, PILOT_POST_GETJOB, PILOT_KILL_SIGNAL, LOG_TRANSFER_NOT_DONE, \
LOG_TRANSFER_IN_PROGRESS, LOG_TRANSFER_DONE, LOG_TRANSFER_FAILED, SERVER_UPDATE_TROUBLE, SERVER_UPDATE_FINAL, \
SERVER_UPDATE_UPDATING, SERVER_UPDATE_NOT_DONE
from pilot.util.container import execute
from pilot.util.filehandling import find_text_files, tail, is_json, copy, remove, write_json, establish_logging, write_file, \
create_symlink
from pilot.util.harvester import request_new_jobs, remove_job_request_file, parse_job_definition_file, \
is_harvester_mode, get_worker_attributes_file, publish_job_report, publish_work_report, get_event_status_file, \
publish_stageout_files
from pilot.util.jobmetrics import get_job_metrics
from pilot.util.math import mean
from pilot.util.middleware import containerise_general_command
from pilot.util.monitoring import job_monitor_tasks, check_local_space
from pilot.util.monitoringtime import MonitoringTime
from pilot.util.processes import cleanup, threads_aborted, kill_process, kill_processes
from pilot.util.proxy import get_distinguished_name
from pilot.util.queuehandling import scan_for_jobs, put_in_queue, queue_report, purge_queue
from pilot.util.timing import add_to_pilot_timing, timing_report, get_postgetjob_time, get_time_since, time_stamp
from pilot.util.workernode import get_disk_space, collect_workernode_info, get_node_name, get_cpu_model
logger = logging.getLogger(__name__)
errors = ErrorCodes()
def control(queues, traces, args):
"""
Main function of job control.
:param queues: internal queues for job handling.
:param traces: tuple containing internal pilot states.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return:
"""
targets = {'validate': validate, 'retrieve': retrieve, 'create_data_payload': create_data_payload,
'queue_monitor': queue_monitor, 'job_monitor': job_monitor, 'fast_job_monitor': fast_job_monitor}
threads = [ExcThread(bucket=queue.Queue(), target=target, kwargs={'queues': queues, 'traces': traces, 'args': args},
name=name) for name, target in list(targets.items())] # Python 2/3
[thread.start() for thread in threads]
# if an exception is thrown, the graceful_stop will be set by the ExcThread class run() function
while not args.graceful_stop.is_set():
for thread in threads:
bucket = thread.get_bucket()
try:
exc = bucket.get(block=False)
except queue.Empty:
pass
else:
exc_type, exc_obj, exc_trace = exc
logger.warning("thread \'%s\' received an exception from bucket: %s", thread.name, exc_obj)
# deal with the exception
# ..
thread.join(0.1)
time.sleep(0.1)
time.sleep(0.5)
logger.debug('job control ending since graceful_stop has been set')
if args.abort_job.is_set():
if traces.pilot['command'] == 'aborting':
logger.warning('jobs are aborting')
elif traces.pilot['command'] == 'abort':
logger.warning('job control detected a set abort_job (due to a kill signal)')
traces.pilot['command'] = 'aborting'
# find all running jobs and stop them, find all jobs in queues relevant to this module
#abort_jobs_in_queues(queues, args.signal)
# proceed to set the job_aborted flag?
if threads_aborted():
logger.debug('will proceed to set job_aborted')
args.job_aborted.set()
else:
logger.debug('will not set job_aborted yet')
logger.debug('[job] control thread has finished')
# test kill signal during end of generic workflow
#import signal
#os.kill(os.getpid(), signal.SIGBUS)
def _validate_job(job):
"""
Verify job parameters for specific problems.
:param job: job object.
:return: Boolean.
"""
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
user = __import__('pilot.user.%s.common' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
container = __import__('pilot.user.%s.container' % pilot_user, globals(), locals(), [user], 0) # Python 2/3
# should a container be used for the payload?
try:
kwargs = {'job': job}
job.usecontainer = container.do_use_container(**kwargs)
except Exception as error:
logger.warning('exception caught: %s', error)
return True if user.verify_job(job) else False
def verify_error_code(job):
"""
Make sure an error code is properly set.
This makes sure that job.piloterrorcode is always set for a failed/holding job, that not only
job.piloterrorcodes are set but not job.piloterrorcode. This function also negates the sign of the error code
and sets job state 'holding' (instead of 'failed') if the error is found to be recoverable by a later job (user
jobs only).
:param job: job object.
:return:
"""
if job.piloterrorcode == 0 and len(job.piloterrorcodes) > 0:
logger.warning('piloterrorcode set to first piloterrorcodes list entry: %s', str(job.piloterrorcodes))
job.piloterrorcode = job.piloterrorcodes[0]
if job.piloterrorcode != 0 and job.is_analysis():
if errors.is_recoverable(code=job.piloterrorcode):
job.piloterrorcode = -abs(job.piloterrorcode)
job.state = 'failed'
logger.info('failed user job is recoverable (error code=%s)', job.piloterrorcode)
else:
logger.info('failed user job is not recoverable')
else:
logger.info('verified error code')
def get_proper_state(job, state):
"""
Return a proper job state to send to server.
This function should only return 'starting', 'running', 'finished', 'holding' or 'failed'.
If the internal job.serverstate is not yet set, it means it is the first server update, ie 'starting' should be
sent.
:param job: job object.
:param state: internal pilot state (string).
:return: valid server state (string).
"""
if job.serverstate == "finished" or job.serverstate == "failed":
pass
elif job.serverstate == "" and state != "finished" and state != "failed":
job.serverstate = 'starting'
elif state == "finished" or state == "failed" or state == "holding":
job.serverstate = state
else:
job.serverstate = 'running'
return job.serverstate
def publish_harvester_reports(state, args, data, job, final):
"""
Publish all reports needed by Harvester.
:param state: job state (string).
:param args: pilot args object.
:param data: data structure for server update (dictionary).
:param job: job object.
:param final: is this the final update? (Boolean).
:return: True if successful, False otherwise (Boolean).
"""
# write part of the heartbeat message to worker attributes files needed by Harvester
path = get_worker_attributes_file(args)
# add jobStatus (state) for Harvester
data['jobStatus'] = state
# publish work report
if not publish_work_report(data, path):
logger.debug('failed to write to workerAttributesFile %s', path)
return False
# check if we are in final state then write out information for output files
if final:
# Use the job information to write Harvester event_status.dump file
event_status_file = get_event_status_file(args)
if publish_stageout_files(job, event_status_file):
logger.debug('wrote log and output files to file %s', event_status_file)
else:
logger.warning('could not write log and output files to file %s', event_status_file)
return False
# publish job report
_path = os.path.join(job.workdir, config.Payload.jobreport)
if os.path.exists(_path):
if publish_job_report(job, args, config.Payload.jobreport):
logger.debug('wrote job report file')
return True
else:
logger.warning('failed to write job report file')
return False
else:
logger.info('finished writing various report files in Harvester mode')
return True
def write_heartbeat_to_file(data):
"""
Write heartbeat dictionary to file.
This is only done when server updates are not wanted.
:param data: server data (dictionary).
:return: True if successful, False otherwise (Boolean).
"""
path = os.path.join(os.environ.get('PILOT_HOME'), config.Pilot.heartbeat_message)
if write_json(path, data):
logger.debug('heartbeat dictionary: %s', data)
logger.debug('wrote heartbeat to file %s', path)
return True
else:
return False
def send_state(job, args, state, xml=None, metadata=None, test_tobekilled=False):
"""
Update the server (send heartbeat message).
Interpret and handle any server instructions arriving with the updateJob back channel.
:param job: job object.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:param state: job state (string).
:param xml: optional metadata xml (string).
:param metadata: job report metadata read as a string.
:param test_tobekilled: emulate a tobekilled command (boolean).
:return: boolean (True if successful, False otherwise).
"""
state = get_proper_state(job, state)
# should the pilot make any server updates?
if not args.update_server:
logger.info('pilot will not update the server (heartbeat message will be written to file)')
tag = 'sending' if args.update_server else 'writing'
if state == 'finished' or state == 'failed' or state == 'holding':
final = True
os.environ['SERVER_UPDATE'] = SERVER_UPDATE_UPDATING
logger.info('job %s has %s - %s final server update', job.jobid, state, tag)
# make sure that job.state is 'failed' if there's a set error code
if job.piloterrorcode or job.piloterrorcodes:
logger.warning('making sure that job.state is set to failed since a pilot error code is set')
state = 'failed'
job.state = state
# make sure an error code is properly set
elif state != 'finished':
verify_error_code(job)
else:
final = False
logger.info('job %s has state \'%s\' - %s heartbeat', job.jobid, state, tag)
# build the data structure needed for getJob, updateJob
data = get_data_structure(job, state, args, xml=xml, metadata=metadata)
# write the heartbeat message to file if the server is not to be updated by the pilot (Nordugrid mode)
if not args.update_server:
logger.debug('is_harvester_mode(args) : {0}'.format(is_harvester_mode(args)))
# if in harvester mode write to files required by harvester
if is_harvester_mode(args):
return publish_harvester_reports(state, args, data, job, final)
else:
# store the file in the main workdir
return write_heartbeat_to_file(data)
try:
if config.Pilot.pandajob == 'real':
time_before = int(time.time())
max_attempts = 10
attempt = 0
done = False
while attempt < max_attempts and not done:
logger.info('job update attempt %d/%d', attempt + 1, max_attempts)
# get the URL for the PanDA server from pilot options or from config
pandaserver = get_panda_server(args.url, args.port)
res = https.request('{pandaserver}/server/panda/updateJob'.format(pandaserver=pandaserver), data=data)
if res is not None:
done = True
attempt += 1
time_after = int(time.time())
logger.info('server updateJob request completed in %ds for job %s', time_after - time_before, job.jobid)
logger.info("server responded with: res = %s", str(res))
show_memory_usage()
if res is not None:
# does the server update contain any backchannel information? if so, update the job object
handle_backchannel_command(res, job, args, test_tobekilled=test_tobekilled)
if final:
os.environ['SERVER_UPDATE'] = SERVER_UPDATE_FINAL
logger.debug('set SERVER_UPDATE=SERVER_UPDATE_FINAL')
return True
else:
logger.info('skipping job update for fake test job')
return True
except Exception as error:
logger.warning('exception caught while sending https request: %s', error)
logger.warning('possibly offending data: %s', data)
if final:
os.environ['SERVER_UPDATE'] = SERVER_UPDATE_TROUBLE
logger.debug('set SERVER_UPDATE=SERVER_UPDATE_TROUBLE')
return False
def get_job_status_from_server(job_id, url, port):
"""
Return the current status of job <jobId> from the dispatcher.
typical dispatcher response: 'status=finished&StatusCode=0'
StatusCode 0: succeeded
10: time-out
20: general error
30: failed
In the case of time-out, the dispatcher will be asked one more time after 10 s.
:param job_id: PanDA job id (int).
:param url: PanDA server URL (string).
:param port: PanDA server port (int).
:return: status (string; e.g. holding), attempt_nr (int), status_code (int)
"""
status = 'unknown'
attempt_nr = 0
status_code = 0
if config.Pilot.pandajob == 'fake':
return status, attempt_nr, status_code
data = {}
data['ids'] = job_id
# get the URL for the PanDA server from pilot options or from config
pandaserver = get_panda_server(url, port)
# ask dispatcher about lost job status
trial = 1
max_trials = 2
while trial <= max_trials:
try:
# open connection
ret = https.request('{pandaserver}/server/panda/getStatus'.format(pandaserver=pandaserver), data=data)
response = ret[1]
logger.info("response: %s", str(response))
if response:
try:
# decode the response
# eg. var = ['status=notfound', 'attemptNr=0', 'StatusCode=0']
# = response
status = response['status'] # e.g. 'holding'
attempt_nr = int(response['attemptNr']) # e.g. '0'
status_code = int(response['StatusCode']) # e.g. '0'
except Exception as error:
logger.warning(
"exception: dispatcher did not return allowed values: %s, %s", str(ret), error)
status = "unknown"
attempt_nr = -1
status_code = 20
else:
logger.debug('server job status=%s, attempt_nr=%d, status_code=%d', status, attempt_nr, status_code)
else:
logger.warning("dispatcher did not return allowed values: %s", str(ret))
status = "unknown"
attempt_nr = -1
status_code = 20
except Exception as error:
logger.warning("could not interpret job status from dispatcher: %s", error)
status = 'unknown'
attempt_nr = -1
status_code = -1
break
else:
if status_code == 0: # success
break
elif status_code == 10: # time-out
trial += 1
time.sleep(10)
continue
elif status_code == 20: # other error
if ret[0] == 13056 or ret[0] == '13056':
logger.warning("wrong certificate used with curl operation? (encountered error 13056)")
break
else: # general error
break
return status, attempt_nr, status_code
def get_panda_server(url, port):
"""
Get the URL for the PanDA server.
:param url: URL string, if set in pilot option (port not included).
:param port: port number, if set in pilot option (int).
:return: full URL (either from pilot options or from config file)
"""
if url.startswith('https://'):
url = url.replace('https://', '')
if url != '' and port != 0:
pandaserver = '%s:%s' % (url, port) if ":" not in url else url
else:
pandaserver = config.Pilot.pandaserver
if not pandaserver.startswith('http'):
pandaserver = 'https://' + pandaserver
# add randomization for PanDA server
default = 'pandaserver.cern.ch'
if default in pandaserver:
rnd = random.choice([socket.getfqdn(vv) for vv in set([v[-1][0] for v in socket.getaddrinfo(default, 25443, socket.AF_INET)])])
pandaserver = pandaserver.replace(default, rnd)
logger.debug('updated %s to %s', default, pandaserver)
return pandaserver
def get_debug_command(cmd):
"""
Identify and filter the given debug command.
Note: only a single command will be allowed from a predefined list: tail, ls, gdb, ps, du.
:param cmd: raw debug command from job definition (string).
:return: debug_mode (Boolean, True if command is deemed ok), debug_command (string).
"""
debug_mode = False
debug_command = ""
allowed_commands = ['tail', 'ls', 'ps', 'gdb', 'du']
forbidden_commands = ['rm']
# remove any 'debug,' command that the server might send redundantly
if ',' in cmd and 'debug' in cmd:
cmd = cmd.replace('debug,', '').replace(',debug', '')
try:
tmp = cmd.split(' ')
com = tmp[0]
except Exception as error:
logger.warning('failed to identify debug command: %s', error)
else:
if com not in allowed_commands:
logger.warning('command=%s is not in the list of allowed commands: %s', com, str(allowed_commands))
elif ';' in cmd or ';' in cmd:
logger.warning('debug command cannot contain \';\': \'%s\'', cmd)
elif com in forbidden_commands:
logger.warning('command=%s is not allowed', com)
else:
debug_mode = True
debug_command = cmd
return debug_mode, debug_command
def handle_backchannel_command(res, job, args, test_tobekilled=False):
"""
Does the server update contain any backchannel information? if so, update the job object.
:param res: server response (dictionary).
:param job: job object.
:param args: pilot args object.
:param test_tobekilled: emulate a tobekilled command (boolean).
:return:
"""
if test_tobekilled:
logger.info('faking a \'tobekilled\' command')
res['command'] = 'tobekilled'
if 'command' in res and res.get('command') != 'NULL':
# warning: server might return comma-separated string, 'debug,tobekilled'
cmd = res.get('command')
# is it a 'command options'-type? debug_command=tail .., ls .., gdb .., ps .., du ..
if ' ' in cmd and 'tobekilled' not in cmd:
try:
job.debug, job.debug_command = get_debug_command(cmd)
except Exception as error:
logger.debug('exception caught in get_debug_command(): %s', error)
elif 'tobekilled' in cmd:
logger.info('pilot received a panda server signal to kill job %s at %s', job.jobid, time_stamp())
set_pilot_state(job=job, state="failed")
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(errors.PANDAKILL)
if job.pid:
logger.debug('killing payload process')
kill_process(job.pid)
else:
logger.debug('no pid to kill')
args.abort_job.set()
elif 'softkill' in cmd:
logger.info('pilot received a panda server signal to softkill job %s at %s', job.jobid, time_stamp())
# event service kill instruction
job.debug_command = 'softkill'
elif 'debug' in cmd:
logger.info('pilot received a command to turn on standard debug mode from the server')
job.debug = True
job.debug_command = 'debug'
elif 'debugoff' in cmd:
logger.info('pilot received a command to turn off debug mode from the server')
job.debug = False
job.debug_command = 'debugoff'
else:
logger.warning('received unknown server command via backchannel: %s', cmd)
# for testing debug mode
# job.debug = True
# job.debug_command = 'du -sk'
# job.debug_command = 'tail -30 payload.stdout'
# job.debug_command = 'ls -ltr workDir' # not really tested
# job.debug_command = 'ls -ltr %s' % job.workdir
# job.debug_command = 'ps -ef'
# job.debug_command = 'ps axo pid,ppid,pgid,args'
# job.debug_command = 'gdb --pid % -ex \'generate-core-file\''
def add_data_structure_ids(data, version_tag):
"""
Add pilot, batch and scheduler ids to the data structure for getJob, updateJob.
:param data: data structure (dict).
:return: updated data structure (dict).
"""
schedulerid = get_job_scheduler_id()
if schedulerid:
data['schedulerID'] = schedulerid
pilotid = get_pilot_id()
if pilotid:
pilotversion = os.environ.get('PILOT_VERSION')
# report the batch system job id, if available
batchsystem_type, batchsystem_id = get_batchsystem_jobid()
if batchsystem_type:
data['pilotID'] = "%s|%s|%s|%s" % (pilotid, batchsystem_type, version_tag, pilotversion)
data['batchID'] = batchsystem_id
else:
data['pilotID'] = "%s|%s|%s" % (pilotid, version_tag, pilotversion)
return data
def get_data_structure(job, state, args, xml=None, metadata=None):
"""
Build the data structure needed for getJob, updateJob.
:param job: job object.
:param state: state of the job (string).
:param args:
:param xml: optional XML string.
:param metadata: job report metadata read as a string.
:return: data structure (dictionary).
"""
data = {'jobId': job.jobid,
'state': state,
'timestamp': time_stamp(),
'siteName': os.environ.get('PILOT_SITENAME'), # args.site,
'node': get_node_name(),
'attemptNr': job.attemptnr}
# add pilot, batch and scheduler ids to the data structure
data = add_data_structure_ids(data, args.version_tag)
starttime = get_postgetjob_time(job.jobid, args)
if starttime:
data['startTime'] = starttime
job_metrics = get_job_metrics(job)
if job_metrics:
data['jobMetrics'] = job_metrics
if xml is not None:
data['xml'] = xml
if metadata is not None:
data['metaData'] = metadata
# in debug mode, also send a tail of the latest log file touched by the payload
if job.debug:
data['stdout'] = process_debug_mode(job)
# add the core count
if job.corecount and job.corecount != 'null' and job.corecount != 'NULL':
data['coreCount'] = job.corecount
#data['coreCount'] = mean(job.corecounts) if job.corecounts else job.corecount
if job.corecounts:
_mean = mean(job.corecounts)
logger.info('mean actualcorecount: %f', _mean)
data['meanCoreCount'] = _mean
# get the number of events, should report in heartbeat in case of preempted.
if job.nevents != 0:
data['nEvents'] = job.nevents
logger.info("total number of processed events: %d (read)", job.nevents)
else:
logger.info("payload/TRF did not report the number of read events")
# get the CU consumption time
constime = get_cpu_consumption_time(job.cpuconsumptiontime)
if constime and constime != -1:
data['cpuConsumptionTime'] = constime
data['cpuConversionFactor'] = job.cpuconversionfactor
data['cpuConsumptionUnit'] = job.cpuconsumptionunit + "+" + get_cpu_model()
instruction_sets = has_instruction_sets(['AVX2'])
product, vendor = get_display_info()
if instruction_sets:
if 'cpuConsumptionUnit' in data:
data['cpuConsumptionUnit'] += '+' + instruction_sets
else:
data['cpuConsumptionUnit'] = instruction_sets
if product and vendor:
logger.debug('cpuConsumptionUnit: could have added: product=%s, vendor=%s', product, vendor)
# add memory information if available
add_memory_info(data, job.workdir, name=job.memorymonitor)
if state == 'finished' or state == 'failed':
add_timing_and_extracts(data, job, state, args)
add_error_codes(data, job)
return data
def process_debug_mode(job):
"""
Handle debug mode - preprocess debug command, get the output and kill the payload in case of gdb.
:param job: job object.
:return: stdout from debug command (string).
"""
# for gdb commands, use the proper gdb version (the system one may be too old)
if job.debug_command.startswith('gdb '):
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
user = __import__('pilot.user.%s.common' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
user.preprocess_debug_command(job)
stdout = get_debug_stdout(job)
if stdout:
# in case gdb was successfully used, the payload can now be killed
if job.debug_command.startswith('gdb ') and job.pid:
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(errors.PANDAKILL,
msg='payload was killed after gdb produced requested core file')
logger.debug('will proceed to kill payload processes')
kill_processes(job.pid)
return stdout
def get_debug_stdout(job):
"""
Return the requested output from a given debug command.
:param job: job object.
:return: output (string).
"""
if job.debug_command == 'debug':
return get_payload_log_tail(job.workdir)
elif 'tail ' in job.debug_command:
return get_requested_log_tail(job.debug_command, job.workdir)
elif 'ls ' in job.debug_command:
return get_ls(job.debug_command, job.workdir)
elif 'ps ' in job.debug_command or 'gdb ' in job.debug_command:
return get_general_command_stdout(job)
else:
# general command, execute and return output
_, stdout, _ = execute(job.debug_command)
logger.info('debug_command: %s:\n\n%s\n', job.debug_command, stdout)
return stdout
def get_general_command_stdout(job):
"""
Return the output from the requested debug command.
:param job: job object.
:return: output (string).
"""
stdout = ''
# for gdb, we might have to process the debug command (e.g. to identify the proper pid to debug)
if 'gdb ' in job.debug_command and '--pid %' in job.debug_command:
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
user = __import__('pilot.user.%s.common' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
job.debug_command = user.process_debug_command(job.debug_command, job.jobid)
if job.debug_command:
_containerisation = False # set this with some logic instead - not used for now
if _containerisation:
try:
containerise_general_command(job, job.infosys.queuedata.container_options,
label='general',
container_type='container')
except PilotException as error:
logger.warning('general containerisation threw a pilot exception: %s', error)
except Exception as error:
logger.warning('general containerisation threw an exception: %s', error)
else:
_, stdout, stderr = execute(job.debug_command)
logger.debug("%s (stdout):\n\n%s\n\n", job.debug_command, stdout)
logger.debug("%s (stderr):\n\n%s\n\n", job.debug_command, stderr)
# in case a core file was produced, locate it
path = locate_core_file(cmd=job.debug_command) if 'gdb ' in job.debug_command else ''
if path:
# copy it to the working directory (so it will be saved in the log)
try:
copy(path, job.workdir)
except Exception:
pass
return stdout
def get_ls(debug_command, workdir):
"""
Return the requested ls debug command.
:param debug_command: full debug command (string).
:param workdir: job work directory (string).
:return: output (string).
"""
items = debug_command.split(' ')
# cmd = items[0]
options = ' '.join(items[1:])
path = options.split(' ')[-1] if ' ' in options else options
if path.startswith('-'):
path = '.'
finalpath = os.path.join(workdir, path)
debug_command = debug_command.replace(path, finalpath)
_, stdout, _ = execute(debug_command)
logger.debug("%s:\n\n%s\n\n", debug_command, stdout)
return stdout
def get_requested_log_tail(debug_command, workdir):
"""
Return the tail of the requested debug log.
Examples
tail workdir/tmp.stdout* <- pilot finds the requested log file in the specified relative path
tail log.RAWtoALL <- pilot finds the requested log file
:param debug_command: full debug command (string).
:param workdir: job work directory (string).
:return: output (string).
"""
_tail = ""
items = debug_command.split(' ')
cmd = items[0]
options = ' '.join(items[1:])
logger.debug('debug command: %s', cmd)
logger.debug('debug options: %s', options)
# assume that the path is the last of the options; <some option> <some path>
path = options.split(' ')[-1] if ' ' in options else options
fullpath = os.path.join(workdir, path)
# find all files with the given pattern and pick the latest updated file (if several)
files = glob(fullpath)
if files:
logger.info('files found: %s', str(files))
_tail = get_latest_log_tail(files)
else:
logger.warning('did not find \'%s\' in path %s', path, fullpath)
if _tail:
logger.debug('tail =\n\n%s\n\n', _tail)
return _tail
def add_error_codes(data, job):
"""
Add error codes to data structure.
:param data: data dictionary.
:param job: job object.
:return:
"""
# error codes
pilot_error_code = job.piloterrorcode
pilot_error_codes = job.piloterrorcodes
if pilot_error_codes != []:
logger.warning('pilotErrorCodes = %s (will report primary/first error code)', str(pilot_error_codes))
data['pilotErrorCode'] = pilot_error_codes[0]
else:
data['pilotErrorCode'] = pilot_error_code
# add error info
pilot_error_diag = job.piloterrordiag
pilot_error_diags = job.piloterrordiags
if pilot_error_diags != []:
logger.warning('pilotErrorDiags = %s (will report primary/first error diag)', str(pilot_error_diags))
data['pilotErrorDiag'] = pilot_error_diags[0]
else:
data['pilotErrorDiag'] = pilot_error_diag
data['transExitCode'] = job.transexitcode
data['exeErrorCode'] = job.exeerrorcode
data['exeErrorDiag'] = job.exeerrordiag
def get_cpu_consumption_time(cpuconsumptiontime):
"""
Get the CPU consumption time.
The function makes sure that the value exists and is within allowed limits (< 10^9).
:param cpuconsumptiontime: CPU consumption time (int/None).
:return: properly set CPU consumption time (int/None).
"""
constime = None
try:
constime = int(cpuconsumptiontime)
except Exception:
constime = None
if constime and constime > 10 ** 9:
logger.warning("unrealistic cpuconsumptiontime: %d (reset to -1)", constime)
constime = -1
return constime
def add_timing_and_extracts(data, job, state, args):
"""
Add timing info and log extracts to data structure for a completed job (finished or failed) to be sent to server.
Note: this function updates the data dictionary.
:param data: data structure (dictionary).
:param job: job object.
:param state: state of the job (string).
:param args: pilot args.
:return:
"""
time_getjob, time_stagein, time_payload, time_stageout, time_total_setup = timing_report(job.jobid, args)
data['pilotTiming'] = "%s|%s|%s|%s|%s" % \
(time_getjob, time_stagein, time_payload, time_stageout, time_total_setup)
# add log extracts (for failed/holding jobs or for jobs with outbound connections)
extracts = ""
if state == 'failed' or state == 'holding':
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
user = __import__('pilot.user.%s.diagnose' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
extracts = user.get_log_extracts(job, state)
if extracts != "":
logger.warning('\nXXXXXXXXXXXXXXXXXXXXX[begin log extracts]\n%s\nXXXXXXXXXXXXXXXXXXXXX[end log extracts]', extracts)
data['pilotLog'] = extracts[:1024]
data['endTime'] = time.time()
def add_memory_info(data, workdir, name=""):
"""
Add memory information (if available) to the data structure that will be sent to the server with job updates
Note: this function updates the data dictionary.
:param data: data structure (dictionary).
:param workdir: working directory of the job (string).
:param name: name of memory monitor (string).
:return:
"""
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
utilities = __import__('pilot.user.%s.utilities' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
try:
utility_node = utilities.get_memory_monitor_info(workdir, name=name)
data.update(utility_node)
except Exception as error:
logger.info('memory information not available: %s', error)
def remove_pilot_logs_from_list(list_of_files):
"""
Remove any pilot logs from the list of last updated files.
:param list_of_files: list of last updated files (list).
:return: list of files (list).
"""
# note: better to move experiment specific files to user area
# ignore the pilot log files
try:
to_be_removed = [config.Pilot.pilotlog, config.Pilot.stageinlog, config.Pilot.stageoutlog,
config.Pilot.timing_file, config.Pilot.remotefileverification_dictionary,
config.Pilot.remotefileverification_log, config.Pilot.base_trace_report,
config.Container.container_script, config.Container.release_setup,
config.Container.stagein_status_dictionary, config.Container.stagein_replica_dictionary,
'eventLoopHeartBeat.txt', 'memory_monitor_output.txt', 'memory_monitor_summary.json_snapshot']
except Exception as error:
logger.warning('exception caught: %s', error)
to_be_removed = []
new_list_of_files = []
for filename in list_of_files:
if os.path.basename(filename) not in to_be_removed and '/pilot/' not in filename and 'prmon' not in filename:
new_list_of_files.append(filename)
return new_list_of_files
def get_payload_log_tail(workdir):
"""
Return the tail of the payload stdout or its latest updated log file.
:param workdir: job work directory (string).
:return: tail of stdout (string).
"""
# find the latest updated log file
# list_of_files = get_list_of_log_files()
# find the latest updated text file
list_of_files = find_text_files()
list_of_files = remove_pilot_logs_from_list(list_of_files)
if not list_of_files:
logger.info('no log files were found (will use default %s)', config.Payload.payloadstdout)
list_of_files = [os.path.join(workdir, config.Payload.payloadstdout)]
return get_latest_log_tail(list_of_files)
def get_latest_log_tail(files):
"""
Get the tail of the latest updated file from the given file list.
:param files: files (list).
"""
stdout_tail = ""
try:
latest_file = max(files, key=os.path.getmtime)
logger.info('tail of file %s will be added to heartbeat', latest_file)
# now get the tail of the found log file and protect against potentially large tails
stdout_tail = latest_file + "\n" + tail(latest_file)
stdout_tail = stdout_tail[-2048:]
except OSError as exc:
logger.warning('failed to get payload stdout tail: %s', exc)
return stdout_tail
def validate(queues, traces, args):
"""
Perform validation of job.
:param queues: queues object.
:param traces: traces object.
:param args: args object.
:return:
"""
while not args.graceful_stop.is_set():
time.sleep(0.5)
try:
job = queues.jobs.get(block=True, timeout=1)
except queue.Empty:
continue
traces.pilot['nr_jobs'] += 1
# set the environmental variable for the task id
os.environ['PanDA_TaskID'] = str(job.taskid)
logger.info('processing PanDA job %s from task %s', job.jobid, job.taskid)
if _validate_job(job):
# Define a new parent group
os.setpgrp()
job_dir = os.path.join(args.mainworkdir, 'PanDA_Pilot-%s' % job.jobid)
logger.debug('creating job working directory: %s', job_dir)
try:
os.mkdir(job_dir)
os.chmod(job_dir, 0o770)
job.workdir = job_dir
except Exception as error:
logger.debug('cannot create working directory: %s', error)
traces.pilot['error_code'] = errors.MKDIR
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(traces.pilot['error_code'])
job.piloterrordiag = error
put_in_queue(job, queues.failed_jobs)
break
else:
create_k8_link(job_dir)
# try:
# # stream the job object to file
# job_dict = job.to_json()
# write_json(os.path.join(job.workdir, 'job.json'), job_dict)
# except Exception as error:
# logger.debug('exception caught: %s', error)
# else:
# try:
# _job_dict = read_json(os.path.join(job.workdir, 'job.json'))
# job_dict = loads(_job_dict)
# _job = JobData(job_dict, use_kmap=False)
# except Exception as error:
# logger.warning('exception caught: %s', error)
create_symlink(from_path='../%s' % config.Pilot.pilotlog, to_path=os.path.join(job_dir, config.Pilot.pilotlog))
# pre-cleanup
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
utilities = __import__('pilot.user.%s.utilities' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
try:
utilities.precleanup()
except Exception as error:
logger.warning('exception caught: %s', error)
# store the PanDA job id for the wrapper to pick up
store_jobid(job.jobid, args.sourcedir)
# run the delayed space check now
delayed_space_check(queues, traces, args, job)
# make sure that ctypes is available (needed at the end by orphan killer)
verify_ctypes(queues, job)
else:
logger.debug('Failed to validate job=%s', job.jobid)
put_in_queue(job, queues.failed_jobs)
# proceed to set the job_aborted flag?
if threads_aborted():
logger.debug('will proceed to set job_aborted')
args.job_aborted.set()
else:
logger.debug('will not set job_aborted yet')
logger.debug('[job] validate thread has finished')
def verify_ctypes(queues, job):
"""
Verify ctypes and make sure all subprocess are parented.
:param queues: queues object.
:param job: job object.
:return:
"""
try:
import ctypes
# except ModuleNotFoundError as error: # Python 3
except Exception as error:
diagnostics = 'ctypes python module could not be imported: %s' % error
logger.warning(diagnostics)
#job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(errors.NOCTYPES, msg=diagnostics)
#logger.debug('Failed to validate job=%s', job.jobid)
#put_in_queue(job, queues.failed_jobs)
else:
logger.debug('ctypes python module imported')
# make sure all children are parented by the pilot
# specifically, this will include any 'orphans', i.e. if the pilot kills all subprocesses at the end,
# 'orphans' will be included (orphans seem like the wrong name)
libc = ctypes.CDLL('libc.so.6')
pr_set_child_subreaper = 36
libc.prctl(pr_set_child_subreaper, 1)
logger.debug('all child subprocesses will be parented')
def delayed_space_check(queues, traces, args, job):
"""
Run the delayed space check if necessary.
:param queues: queues object.
:param traces: traces object.
:param args: args object.
:param job: job object.
:return:
"""
proceed_with_local_space_check = True if (args.harvester_submitmode.lower() == 'push' and args.update_server) else False
if proceed_with_local_space_check:
logger.debug('pilot will now perform delayed space check')
exit_code, diagnostics = check_local_space()
if exit_code != 0:
traces.pilot['error_code'] = errors.NOLOCALSPACE
# set the corresponding error code
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(errors.NOLOCALSPACE, msg=diagnostics)
logger.debug('Failed to validate job=%s', job.jobid)
put_in_queue(job, queues.failed_jobs)
else:
put_in_queue(job, queues.validated_jobs)
else:
put_in_queue(job, queues.validated_jobs)
def create_k8_link(job_dir):
"""
Create a soft link to the payload workdir on Kubernetes if SHARED_DIR exists.
:param job_dir: payload workdir (string).
"""
shared_dir = os.environ.get('SHARED_DIR', None)
if shared_dir:
#create_symlink(from_path=os.path.join(shared_dir, 'payload_workdir'), to_path=job_dir)
create_symlink(from_path=job_dir, to_path=os.path.join(shared_dir, 'payload_workdir'))
else:
logger.debug('will not create symlink in SHARED_DIR')
def store_jobid(jobid, init_dir):
"""
Store the PanDA job id in a file that can be picked up by the wrapper for other reporting.
:param jobid: job id (int).
:param init_dir: pilot init dir (string).
:return:
"""
try:
path = os.path.join(os.path.join(init_dir, 'pilot2'), config.Pilot.jobid_file)
path = path.replace('pilot2/pilot2', 'pilot2') # dirty fix for bad paths
mode = 'a' if os.path.exists(path) else 'w'
logger.debug('path=%s mode=%s', path, mode)
write_file(path, "%s\n" % str(jobid), mode=mode, mute=False)
except Exception as error:
logger.warning('exception caught while trying to store job id: %s', error)
def create_data_payload(queues, traces, args):
"""
Get a Job object from the "validated_jobs" queue.
If the job has defined input files, move the Job object to the "data_in" queue and put the internal pilot state to
"stagein". In case there are no input files, place the Job object in the "finished_data_in" queue. For either case,
the thread also places the Job object in the "payloads" queue (another thread will retrieve it and wait for any
stage-in to finish).
:param queues: internal queues for job handling.
:param traces: tuple containing internal pilot states.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return:
"""
while not args.graceful_stop.is_set():
time.sleep(0.5)
try:
job = queues.validated_jobs.get(block=True, timeout=1)
except queue.Empty:
continue
if job.indata:
# if the job has input data, put the job object in the data_in queue which will trigger stage-in
set_pilot_state(job=job, state='stagein')
put_in_queue(job, queues.data_in)
else:
# if the job does not have any input data, then pretend that stage-in has finished and put the job
# in the finished_data_in queue
put_in_queue(job, queues.finished_data_in)
put_in_queue(job, queues.payloads)
# proceed to set the job_aborted flag?
if threads_aborted():
logger.debug('will proceed to set job_aborted')
args.job_aborted.set()
else:
logger.debug('will not set job_aborted yet')
logger.debug('[job] create_data_payload thread has finished')
def get_task_id():
"""
Return the task id for the current job.
Note: currently the implementation uses an environmental variable to store this number (PanDA_TaskID).
:return: task id (string). Returns empty string in case of error.
"""
if "PanDA_TaskID" in os.environ:
taskid = os.environ["PanDA_TaskID"]
else:
logger.warning('PanDA_TaskID not set in environment')
taskid = ""
return taskid
def get_job_label(args):
"""
Return a proper job label.
The function returns a job label that corresponds to the actual pilot version, ie if the pilot is a development
version (ptest or rc_test2) or production version (managed or user).
Example: -i RC -> job_label = rc_test2.
NOTE: it should be enough to only use the job label, -j rc_test2 (and not specify -i RC at all).
:param args: pilot args object.
:return: job_label (string).
"""
# PQ status
status = infosys.queuedata.status
if args.version_tag == 'RC' and args.job_label == 'rc_test2':
job_label = 'rc_test2'
elif args.version_tag == 'RC' and args.job_label == 'ptest':
job_label = args.job_label
elif args.version_tag == 'RCM' and args.job_label == 'ptest':
job_label = 'rcm_test2'
elif args.version_tag == 'ALRB':
job_label = 'rc_alrb'
elif status == 'test' and args.job_label != 'ptest':
logger.warning('PQ status set to test - will use job label / prodSourceLabel test')
job_label = 'test'
else:
job_label = args.job_label
return job_label
def get_dispatcher_dictionary(args):
"""
Return a dictionary with required fields for the dispatcher getJob operation.
The dictionary should contain the following fields: siteName, computingElement (queue name),
prodSourceLabel (e.g. user, test, ptest), diskSpace (available disk space for a job in MB),
workingGroup, countryGroup, cpu (float), mem (float) and node (worker node name).
workingGroup, countryGroup and allowOtherCountry
we add a new pilot setting allowOtherCountry=True to be used in conjunction with countryGroup=us for
US pilots. With these settings, the Panda server will produce the desired behaviour of dedicated X% of
the resource exclusively (so long as jobs are available) to countryGroup=us jobs. When allowOtherCountry=false
this maintains the behavior relied on by current users of the countryGroup mechanism -- to NOT allow
the resource to be used outside the privileged group under any circumstances.
:param args: arguments (e.g. containing queue name, queuedata dictionary, etc).
:returns: dictionary prepared for the dispatcher getJob operation.
"""
_diskspace = get_disk_space(infosys.queuedata)
_mem, _cpu, _disk = collect_workernode_info()
_nodename = get_node_name()
# override for RC dev pilots
job_label = get_job_label(args)
data = {
'siteName': infosys.queuedata.resource, # next: remove redundant '-r' option of pilot.py
'computingElement': args.queue,
'prodSourceLabel': job_label,
'diskSpace': _diskspace,
'workingGroup': args.working_group,
'cpu': _cpu,
'mem': _mem,
'node': _nodename
}
if args.jobtype != "":
data['jobType'] = args.jobtype
if args.allow_other_country != "":
data['allowOtherCountry'] = args.allow_other_country
if args.country_group != "":
data['countryGroup'] = args.country_group
if args.job_label == 'self':
dn = get_distinguished_name()
data['prodUserID'] = dn
taskid = get_task_id()
if taskid != "" and args.allow_same_user:
data['taskID'] = taskid
logger.info("will download a new job belonging to task id: %s", data['taskID'])
if args.resource_type != "":
data['resourceType'] = args.resource_type
# add harvester fields
if 'HARVESTER_ID' in os.environ:
data['harvester_id'] = os.environ.get('HARVESTER_ID')
if 'HARVESTER_WORKER_ID' in os.environ:
data['worker_id'] = os.environ.get('HARVESTER_WORKER_ID')
# instruction_sets = has_instruction_sets(['AVX', 'AVX2'])
# if instruction_sets:
# data['cpuConsumptionUnit'] = instruction_sets
return data
def proceed_with_getjob(timefloor, starttime, jobnumber, getjob_requests, max_getjob_requests, update_server, submitmode, harvester, verify_proxy, traces):
"""
Can we proceed with getJob?
We may not proceed if we have run out of time (timefloor limit), if the proxy is too short, if disk space is too
small or if we have already proceed enough jobs.
:param timefloor: timefloor limit (s) (int).
:param starttime: start time of retrieve() (s) (int).
:param jobnumber: number of downloaded jobs (int).
:param getjob_requests: number of getjob requests (int).
:param update_server: should pilot update server? (Boolean).
:param submitmode: Harvester submit mode, PULL or PUSH (string).
:param harvester: True if Harvester is used, False otherwise. Affects the max number of getjob reads (from file) (Boolean).
:param verify_proxy: True if the proxy should be verified. False otherwise (Boolean).
:param traces: traces object (to be able to propagate a proxy error all the way back to the wrapper).
:return: True if pilot should proceed with getJob (Boolean).
"""
# use for testing thread exceptions. the exception will be picked up by ExcThread run() and caught in job.control()
# raise NoLocalSpace('testing exception from proceed_with_getjob')
#timefloor = 600
currenttime = time.time()
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
common = __import__('pilot.user.%s.common' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
if not common.allow_timefloor(submitmode):
timefloor = 0
# should the proxy be verified?
if verify_proxy:
userproxy = __import__('pilot.user.%s.proxy' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
# is the proxy still valid?
exit_code, diagnostics = userproxy.verify_proxy()
if traces.pilot['error_code'] == 0: # careful so we don't overwrite another error code
traces.pilot['error_code'] = exit_code
if exit_code == errors.NOPROXY or exit_code == errors.NOVOMSPROXY:
logger.warning(diagnostics)
return False
# is there enough local space to run a job?
# note: do not run this test at this point if submit mode=PUSH and we are in truePilot mode on ARC
# (available local space will in this case be checked after the job definition has been read from file, so the
# pilot can report the error with a server update)
proceed_with_local_space_check = False if (submitmode.lower() == 'push' and update_server) else True
if proceed_with_local_space_check:
exit_code, diagnostics = check_local_space()
if exit_code != 0:
traces.pilot['error_code'] = errors.NOLOCALSPACE
return False
else:
logger.debug('pilot will delay local space check until after job definition has been read from file')
maximum_getjob_requests = 60 if harvester else max_getjob_requests # 1 s apart (if harvester)
if getjob_requests > int(maximum_getjob_requests):
logger.warning('reached maximum number of getjob requests (%s) -- will abort pilot', maximum_getjob_requests)
# use singleton:
# instruct the pilot to wrap up quickly
os.environ['PILOT_WRAP_UP'] = 'QUICKLY'
return False
if timefloor == 0 and jobnumber > 0:
logger.warning("since timefloor is set to 0, pilot was only allowed to run one job")
# use singleton:
# instruct the pilot to wrap up quickly
os.environ['PILOT_WRAP_UP'] = 'QUICKLY'
return False
if (currenttime - starttime > timefloor) and jobnumber > 0:
logger.warning("the pilot has run out of time (timefloor=%d has been passed)", timefloor)
# use singleton:
# instruct the pilot to wrap up quickly
os.environ['PILOT_WRAP_UP'] = 'QUICKLY'
return False
# timefloor not relevant for the first job
if jobnumber > 0:
logger.info('since timefloor=%d s and only %d s has passed since launch, pilot can run another job', timefloor, currenttime - starttime)
if harvester and jobnumber > 0:
# unless it's the first job (which is preplaced in the init dir), instruct Harvester to place another job
# in the init dir
logger.info('asking Harvester for another job')
request_new_jobs()
if os.environ.get('SERVER_UPDATE', '') == SERVER_UPDATE_UPDATING:
logger.info('still updating previous job, will not ask for a new job yet')
return False
os.environ['SERVER_UPDATE'] = SERVER_UPDATE_NOT_DONE
return True
def getjob_server_command(url, port):
"""
Prepare the getJob server command.
:param url: PanDA server URL (string)
:param port: PanDA server port
:return: full server command (URL string)
"""
if url != "":
port_pattern = '.:([0-9]+)'
if not findall(port_pattern, url):
url = url + ':%s' % port
else:
logger.debug('URL already contains port: %s', url)
else:
url = config.Pilot.pandaserver
if url == "":
logger.fatal('PanDA server url not set (either as pilot option or in config file)')
elif not url.startswith("http"):
url = 'https://' + url
logger.warning('detected missing protocol in server url (added)')
return '{pandaserver}/server/panda/getJob'.format(pandaserver=url)
def get_job_definition_from_file(path, harvester):
"""
Get a job definition from a pre-placed file.
In Harvester mode, also remove any existing job request files since it is no longer needed/wanted.
:param path: path to job definition file.
:param harvester: True if Harvester is being used (determined from args.harvester), otherwise False
:return: job definition dictionary.
"""
# remove any existing Harvester job request files (silent in non-Harvester mode) and read the JSON
if harvester:
remove_job_request_file()
if is_json(path):
job_definition_list = parse_job_definition_file(path)
if not job_definition_list:
logger.warning('no jobs were found in Harvester job definitions file: %s', path)
return {}
else:
# remove the job definition file from the original location, place a renamed copy in the pilot dir
new_path = os.path.join(os.environ.get('PILOT_HOME'), 'job_definition.json')
copy(path, new_path)
remove(path)
# note: the pilot can only handle one job at the time from Harvester
return job_definition_list[0]
# old style
res = {}
with open(path, 'r') as jobdatafile:
response = jobdatafile.read()
if len(response) == 0:
logger.fatal('encountered empty job definition file: %s', path)
res = None # this is a fatal error, no point in continuing as the file will not be replaced
else:
# parse response message
# logger.debug('%s:\n\n%s\n\n', path, response)
try:
from urlparse import parse_qsl # Python 2
# except ModuleNotFoundError: # Python 3
except Exception:
from urllib.parse import parse_qsl # Python 3
datalist = parse_qsl(response, keep_blank_values=True)
# convert to dictionary
for data in datalist:
res[data[0]] = data[1]
if os.path.exists(path):
remove(path)
return res
def get_job_definition_from_server(args):
"""
Get a job definition from a server.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return: job definition dictionary.
"""
res = {}
# get the job dispatcher dictionary
data = get_dispatcher_dictionary(args)
cmd = getjob_server_command(args.url, args.port)
if cmd != "":
logger.info('executing server command: %s', cmd)
res = https.request(cmd, data=data)
return res
def locate_job_definition(args):
"""
Locate the job definition file among standard locations.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return: path (string).
"""
if args.harvester_datadir:
paths = [os.path.join(args.harvester_datadir, config.Pilot.pandajobdata)]
else:
paths = [os.path.join("%s/.." % args.sourcedir, config.Pilot.pandajobdata),
os.path.join(args.sourcedir, config.Pilot.pandajobdata),
os.path.join(os.environ['PILOT_WORK_DIR'], config.Pilot.pandajobdata)]
if args.harvester_workdir:
paths.append(os.path.join(args.harvester_workdir, config.Harvester.pandajob_file))
if 'HARVESTER_WORKDIR' in os.environ:
paths.append(os.path.join(os.environ['HARVESTER_WORKDIR'], config.Harvester.pandajob_file))
path = ""
for _path in paths:
if os.path.exists(_path):
path = _path
break
if path == "":
logger.info('did not find any local job definition file')
return path
def get_job_definition(args):
"""
Get a job definition from a source (server or pre-placed local file).
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return: job definition dictionary.
"""
res = {}
path = locate_job_definition(args)
# should we run a normal 'real' job or a 'fake' job?
if config.Pilot.pandajob == 'fake':
logger.info('will use a fake PanDA job')
res = get_fake_job()
elif os.path.exists(path):
logger.info('will read job definition from file %s', path)
res = get_job_definition_from_file(path, args.harvester)
else:
if args.harvester and args.harvester_submitmode.lower() == 'push':
pass # local job definition file not found (go to sleep)
else:
logger.info('will download job definition from server')
res = get_job_definition_from_server(args)
return res
def now():
"""
Return the current epoch as a UTF-8 encoded string.
:return: current time as encoded string
"""
return str(time.time()).encode('utf-8')
def get_fake_job(input=True):
"""
Return a job definition for internal pilot testing.
Note: this function is only used for testing purposes. The job definitions below are ATLAS specific.
:param input: Boolean, set to False if no input files are wanted
:return: job definition (dictionary).
"""
res = None
# create hashes
hash = hashlib.md5()
hash.update(now())
log_guid = hash.hexdigest()
hash.update(now())
guid = hash.hexdigest()
hash.update(now())
job_name = hash.hexdigest()
if config.Pilot.testjobtype == 'production':
logger.info('creating fake test production job definition')
res = {'jobsetID': 'NULL',
'logGUID': log_guid,
'cmtConfig': 'x86_64-slc6-gcc48-opt',
'prodDBlocks': 'user.mlassnig:user.mlassnig.pilot.test.single.hits',
'dispatchDBlockTokenForOut': 'NULL,NULL',
'destinationDBlockToken': 'NULL,NULL',
'destinationSE': 'AGLT2_TEST',
'realDatasets': job_name,
'prodUserID': 'no_one',
'GUID': guid,
'realDatasetsIn': 'user.mlassnig:user.mlassnig.pilot.test.single.hits',
'nSent': 0,
'cloud': 'US',
'StatusCode': 0,
'homepackage': 'AtlasProduction/20.1.4.14',
'inFiles': 'HITS.06828093._000096.pool.root.1',
'processingType': 'pilot-ptest',
'ddmEndPointOut': 'UTA_SWT2_DATADISK,UTA_SWT2_DATADISK',
'fsize': '94834717',
'fileDestinationSE': 'AGLT2_TEST,AGLT2_TEST',
'scopeOut': 'panda',
'minRamCount': 0,
'jobDefinitionID': 7932,
'maxWalltime': 'NULL',
'scopeLog': 'panda',
'transformation': 'Reco_tf.py',
'maxDiskCount': 0,
'coreCount': 1,
'prodDBlockToken': 'NULL',
'transferType': 'NULL',
'destinationDblock': job_name,
'dispatchDBlockToken': 'NULL',
'jobPars': '--maxEvents=1 --inputHITSFile HITS.06828093._000096.pool.root.1 --outputRDOFile RDO_%s.root' % job_name,
'attemptNr': 0,
'swRelease': 'Atlas-20.1.4',
'nucleus': 'NULL',
'maxCpuCount': 0,
'outFiles': 'RDO_%s.root,%s.job.log.tgz' % (job_name, job_name),
'currentPriority': 1000,
'scopeIn': 'mc15_13TeV',
'PandaID': '0',
'sourceSite': 'NULL',
'dispatchDblock': 'NULL',
'prodSourceLabel': 'ptest',
'checksum': 'ad:5d000974',
'jobName': job_name,
'ddmEndPointIn': 'UTA_SWT2_DATADISK',
'taskID': 'NULL',
'logFile': '%s.job.log.tgz' % job_name}
elif config.Pilot.testjobtype == 'user':
logger.info('creating fake test user job definition')
res = {'jobsetID': 'NULL',
'logGUID': log_guid,
'cmtConfig': 'x86_64-slc6-gcc49-opt',
'prodDBlocks': 'data15_13TeV:data15_13TeV.00276336.physics_Main.merge.AOD.r7562_p2521_tid07709524_00',
'dispatchDBlockTokenForOut': 'NULL,NULL',
'destinationDBlockToken': 'NULL,NULL',
'destinationSE': 'ANALY_SWT2_CPB',
'realDatasets': job_name,
'prodUserID': 'None',
'GUID': guid,
'realDatasetsIn': 'data15_13TeV:data15_13TeV.00276336.physics_Main.merge.AOD.r7562_p2521_tid07709524_00',
'nSent': '0',
'cloud': 'US',
'StatusCode': 0,
'homepackage': 'AnalysisTransforms-AtlasDerivation_20.7.6.4',
'inFiles': 'AOD.07709524._000050.pool.root.1',
'processingType': 'pilot-ptest',
'ddmEndPointOut': 'SWT2_CPB_SCRATCHDISK,SWT2_CPB_SCRATCHDISK',
'fsize': '1564780952',
'fileDestinationSE': 'ANALY_SWT2_CPB,ANALY_SWT2_CPB',
'scopeOut': 'user.gangarbt',
'minRamCount': '0',
'jobDefinitionID': '9445',
'maxWalltime': 'NULL',
'scopeLog': 'user.gangarbt',
'transformation': 'http://pandaserver.cern.ch:25080/trf/user/runAthena-00-00-11',
'maxDiskCount': '0',
'coreCount': '1',
'prodDBlockToken': 'NULL',
'transferType': 'NULL',
'destinationDblock': job_name,
'dispatchDBlockToken': 'NULL',
'jobPars': '-a sources.20115461.derivation.tgz -r ./ -j "Reco_tf.py '
'--inputAODFile AOD.07709524._000050.pool.root.1 --outputDAODFile test.pool.root '
'--reductionConf HIGG3D1" -i "[\'AOD.07709524._000050.pool.root.1\']" -m "[]" -n "[]" --trf'
' --useLocalIO --accessmode=copy -o '
'"{\'IROOT\': [(\'DAOD_HIGG3D1.test.pool.root\', \'%s.root\')]}" '
'--sourceURL https://aipanda012.cern.ch:25443' % (job_name),
'attemptNr': '0',
'swRelease': 'Atlas-20.7.6',
'nucleus': 'NULL',
'maxCpuCount': '0',
'outFiles': '%s.root,%s.job.log.tgz' % (job_name, job_name),
'currentPriority': '1000',
'scopeIn': 'data15_13TeV',
'PandaID': '0',
'sourceSite': 'NULL',
'dispatchDblock': 'data15_13TeV:data15_13TeV.00276336.physics_Main.merge.AOD.r7562_p2521_tid07709524_00',
'prodSourceLabel': 'ptest',
'checksum': 'ad:b11f45a7',
'jobName': job_name,
'ddmEndPointIn': 'SWT2_CPB_SCRATCHDISK',
'taskID': 'NULL',
'logFile': '%s.job.log.tgz' % job_name}
else:
logger.warning('unknown test job type: %s', config.Pilot.testjobtype)
if res:
if not input:
res['inFiles'] = 'NULL'
res['GUID'] = 'NULL'
res['scopeIn'] = 'NULL'
res['fsize'] = 'NULL'
res['realDatasetsIn'] = 'NULL'
res['checksum'] = 'NULL'
if config.Pilot.testtransfertype == "NULL" or config.Pilot.testtransfertype == 'direct':
res['transferType'] = config.Pilot.testtransfertype
else:
logger.warning('unknown test transfer type: %s (ignored)', config.Pilot.testtransfertype)
if config.Pilot.testjobcommand == 'sleep':
res['transformation'] = 'sleep'
res['jobPars'] = '1'
res['inFiles'] = ''
res['outFiles'] = ''
# convert to unicode for Python 2
try: # in case some later version of Python 3 has problems using u'' (seems ok with 3.7 at least)
if not is_python3():
_res = {}
for entry in res:
if type(res[entry]) is str:
_res[u'%s' % entry] = u'%s' % res[entry]
else:
_res[u'%s' % entry] = res[entry]
res = _res
except Exception:
pass
return res
def get_job_retrieval_delay(harvester):
"""
Return the proper delay between job retrieval attempts.
In Harvester mode, the pilot will look once per second for a job definition file.
:param harvester: True if Harvester is being used (determined from args.harvester), otherwise False
:return: sleep (s)
"""
return 1 if harvester else 60
def retrieve(queues, traces, args): # noqa: C901
"""
Retrieve all jobs from a source.
The job definition is a json dictionary that is either present in the launch
directory (preplaced) or downloaded from a server specified by `args.url`.
The function retrieves the job definition from the proper source and places
it in the `queues.jobs` queue.
WARNING: this function is nearly too complex. Be careful with adding more lines as flake8 will fail it.
:param queues: internal queues for job handling.
:param traces: tuple containing internal pilot states.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:raises PilotException: if create_job fails (e.g. because queuedata could not be downloaded).
:return:
"""
timefloor = infosys.queuedata.timefloor
starttime = time.time()
jobnumber = 0 # number of downloaded jobs
getjob_requests = 0
getjob_failures = 0
print_node_info()
while not args.graceful_stop.is_set():
time.sleep(0.5)
getjob_requests += 1
if not proceed_with_getjob(timefloor, starttime, jobnumber, getjob_requests, args.getjob_requests,
args.update_server, args.harvester_submitmode, args.harvester, args.verify_proxy, traces):
# do not set graceful stop if pilot has not finished sending the final job update
# i.e. wait until SERVER_UPDATE is DONE_FINAL
check_for_final_server_update(args.update_server)
args.graceful_stop.set()
break
# store time stamp
time_pre_getjob = time.time()
# get a job definition from a source (file or server)
res = get_job_definition(args)
logger.info('job definition = %s', str(res))
if res is None:
logger.fatal('fatal error in job download loop - cannot continue')
# do not set graceful stop if pilot has not finished sending the final job update
# i.e. wait until SERVER_UPDATE is DONE_FINAL
check_for_final_server_update(args.update_server)
args.graceful_stop.set()
break
if not res:
getjob_failures += 1
if getjob_failures >= args.getjob_failures:
logger.warning('did not get a job -- max number of job request failures reached: %d', getjob_failures)
args.graceful_stop.set()
break
delay = get_job_retrieval_delay(args.harvester)
if not args.harvester:
logger.warning('did not get a job -- sleep %d s and repeat', delay)
for _ in range(delay):
if args.graceful_stop.is_set():
break
time.sleep(1)
else:
# it seems the PanDA server returns StatusCode as an int, but the aCT returns it as a string
# note: StatusCode keyword is not available in job definition files from Harvester (not needed)
if 'StatusCode' in res and res['StatusCode'] != '0' and res['StatusCode'] != 0:
getjob_failures += 1
if getjob_failures >= args.getjob_failures:
logger.warning('did not get a job -- max number of job request failures reached: %d',
getjob_failures)
args.graceful_stop.set()
break
logger.warning('did not get a job -- sleep 60s and repeat -- status: %s', res['StatusCode'])
for i in range(60):
if args.graceful_stop.is_set():
break
time.sleep(1)
else:
# create the job object out of the raw dispatcher job dictionary
try:
job = create_job(res, args.queue)
except PilotException as error:
raise error
#else:
# verify the job status on the server
#try:
# job_status, job_attempt_nr, job_status_code = get_job_status_from_server(job.jobid, args.url, args.port)
# if job_status == "running":
# pilot_error_diag = "job %s is already running elsewhere - aborting" % job.jobid
# logger.warning(pilot_error_diag)
# raise JobAlreadyRunning(pilot_error_diag)
#except Exception as error:
# logger.warning("%s", error)
# write time stamps to pilot timing file
# note: PILOT_POST_GETJOB corresponds to START_TIME in Pilot 1
add_to_pilot_timing(job.jobid, PILOT_PRE_GETJOB, time_pre_getjob, args)
add_to_pilot_timing(job.jobid, PILOT_POST_GETJOB, time.time(), args)
# add the job definition to the jobs queue and increase the job counter,
# and wait until the job has finished
put_in_queue(job, queues.jobs)
jobnumber += 1
while not args.graceful_stop.is_set():
if has_job_completed(queues, args):
# purge queue(s) that retains job object
purge_queue(queues.finished_data_in)
args.job_aborted.clear()
args.abort_job.clear()
logger.info('ready for new job')
# re-establish logging
logging.info('pilot has finished for previous job - re-establishing logging')
logging.handlers = []
logging.shutdown()
establish_logging(debug=args.debug, nopilotlog=args.nopilotlog)
pilot_version_banner()
getjob_requests = 0
add_to_pilot_timing('1', PILOT_MULTIJOB_START_TIME, time.time(), args)
break
time.sleep(0.5)
# proceed to set the job_aborted flag?
if threads_aborted():
logger.debug('will proceed to set job_aborted')
args.job_aborted.set()
else:
logger.debug('will not set job_aborted yet')
logger.debug('[job] retrieve thread has finished')
def print_node_info():
"""
Print information about the local node to the log.
:return:
"""
if is_virtual_machine():
logger.info("pilot is running in a virtual machine")
else:
logger.info("pilot is not running in a virtual machine")
def create_job(dispatcher_response, queue):
"""
Create a job object out of the dispatcher response.
:param dispatcher_response: raw job dictionary from the dispatcher.
:param queue: queue name (string).
:return: job object
"""
# initialize (job specific) InfoService instance
job = JobData(dispatcher_response)
jobinfosys = InfoService()
jobinfosys.init(queue, infosys.confinfo, infosys.extinfo, JobInfoProvider(job))
job.init(infosys)
#job.workdir = os.getcwd()
logger.info('received job: %s (sleep until the job has finished)', job.jobid)
logger.info('job details: \n%s', job)
# payload environment wants the PANDAID to be set, also used below
os.environ['PANDAID'] = job.jobid
return job
def has_job_completed(queues, args):
"""
Has the current job completed (finished or failed)?
Note: the job object was extracted from monitored_payloads queue before this function was called.
:param queues: Pilot queues object.
:return: True is the payload has finished or failed
"""
# check if the job has finished
try:
job = queues.completed_jobs.get(block=True, timeout=1)
except queue.Empty:
# logger.info("(job still running)")
pass
else:
make_job_report(job)
cmd = 'ls -lF %s' % os.environ.get('PILOT_HOME')
logger.debug('%s:\n', cmd)
_, stdout, _ = execute(cmd)
logger.debug(stdout)
queue_report(queues)
job.reset_errors()
logger.info("job %s has completed (purged errors)", job.jobid)
# cleanup of any remaining processes
if job.pid:
job.zombies.append(job.pid)
cleanup(job, args)
return True
# is there anything in the finished_jobs queue?
#finished_queue_snapshot = list(queues.finished_jobs.queue)
#peek = [obj for obj in finished_queue_snapshot if jobid == obj.jobid]
#if peek:
# logger.info("job %s has completed (finished)", jobid)
# return True
# is there anything in the failed_jobs queue?
#failed_queue_snapshot = list(queues.failed_jobs.queue)
#peek = [obj for obj in failed_queue_snapshot if jobid == obj.jobid]
#if peek:
# logger.info("job %s has completed (failed)", jobid)
# return True
return False
def get_job_from_queue(queues, state):
"""
Check if the job has finished or failed and if so return it.
:param queues: pilot queues.
:param state: job state (e.g. finished/failed) (string).
:return: job object.
"""
try:
if state == "finished":
job = queues.finished_jobs.get(block=True, timeout=1)
elif state == "failed":
job = queues.failed_jobs.get(block=True, timeout=1)
else:
job = None
except queue.Empty:
# logger.info("(job still running)")
job = None
else:
# make sure that state=failed
set_pilot_state(job=job, state=state)
logger.info("job %s has state=%s", job.jobid, job.state)
return job
def is_queue_empty(queues, queue):
"""
Check if the given queue is empty (without pulling).
:param queues: pilot queues object.
:param queue: queue name (string).
:return: True if queue is empty, False otherwise
"""
status = False
if queue in queues._fields:
_queue = getattr(queues, queue)
jobs = list(_queue.queue)
if len(jobs) > 0:
logger.info('queue %s not empty: found %d job(s)', queue, len(jobs))
else:
logger.info('queue %s is empty', queue)
status = True
else:
logger.warning('queue %s not present in %s', queue, queues._fields)
return status
def order_log_transfer(queues, job):
"""
Order a log transfer for a failed job.
:param queues: pilot queues object.
:param job: job object.
:return:
"""
# add the job object to the data_out queue to have it staged out
job.stageout = 'log' # only stage-out log file
#set_pilot_state(job=job, state='stageout')
put_in_queue(job, queues.data_out)
logger.debug('job added to data_out queue')
# wait for the log transfer to finish
n = 0
nmax = 60
while n < nmax:
# refresh the log_transfer since it might have changed
log_transfer = job.get_status('LOG_TRANSFER')
logger.info('waiting for log transfer to finish (#%d/#%d): %s', n + 1, nmax, log_transfer)
if is_queue_empty(queues, 'data_out') and \
(log_transfer == LOG_TRANSFER_DONE or log_transfer == LOG_TRANSFER_FAILED): # set in data component
logger.info('stage-out of log has completed')
break
else:
if log_transfer == LOG_TRANSFER_IN_PROGRESS: # set in data component, job object is singleton
logger.info('log transfer is in progress')
time.sleep(2)
n += 1
logger.info('proceeding with server update (n=%d)', n)
def wait_for_aborted_job_stageout(args, queues, job):
"""
Wait for stage-out to finish for aborted job.
:param args: pilot args object.
:param queues: pilot queues object.
:param job: job object.
:return:
"""
# if the pilot received a kill signal, how much time has passed since the signal was intercepted?
try:
time_since_kill = get_time_since('1', PILOT_KILL_SIGNAL, args)
was_killed = was_pilot_killed(args.timing)
if was_killed:
logger.info('%d s passed since kill signal was intercepted - make sure that stage-out has finished', time_since_kill)
except Exception as error:
logger.warning('exception caught: %s', error)
time_since_kill = 60
else:
if time_since_kill > 60 or time_since_kill < 0: # fail-safe
logger.warning('reset time_since_kill to 60 since value is out of allowed limits')
time_since_kill = 60
# if stage-out has not finished, we need to wait (less than two minutes or the batch system will issue
# a hard SIGKILL)
max_wait_time = 2 * 60 - time_since_kill - 5
logger.debug('using max_wait_time = %d s', max_wait_time)
t0 = time.time()
while time.time() - t0 < max_wait_time:
if job in queues.finished_data_out.queue or job in queues.failed_data_out.queue:
logger.info('stage-out has finished, proceed with final server update')
break
else:
time.sleep(0.5)
logger.info('proceeding with final server update')
def get_job_status(job, key):
"""
Wrapper function around job.get_status().
If key = 'LOG_TRANSFER' but job object is not defined, the function will return value = LOG_TRANSFER_NOT_DONE.
:param job: job object.
:param key: key name (string).
:return: value (string).
"""
value = ""
if job:
value = job.get_status(key)
else:
if key == 'LOG_TRANSFER':
value = LOG_TRANSFER_NOT_DONE
return value
def queue_monitor(queues, traces, args): # noqa: C901
"""
Monitoring of queues.
This function monitors queue activity, specifically if a job has finished or failed and then reports to the server.
:param queues: internal queues for job handling.
:param traces: tuple containing internal pilot states.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return:
"""
# scan queues until at least one queue has a job object. abort if it takes too long time
if not scan_for_jobs(queues):
logger.warning('queues are still empty of jobs - will begin queue monitoring anyway')
job = None
while True: # will abort when graceful_stop has been set or if enough time has passed after kill signal
time.sleep(1)
if traces.pilot['command'] == 'abort':
logger.warning('job queue monitor received an abort instruction')
args.graceful_stop.set()
# abort in case graceful_stop has been set, and less than 30 s has passed since MAXTIME was reached (if set)
# (abort at the end of the loop)
abort_thread = should_abort(args, label='job:queue_monitor')
if abort_thread and os.environ.get('PILOT_WRAP_UP', '') == 'NORMAL':
pause_queue_monitor(20)
# check if the job has finished
imax = 20
i = 0
while i < imax and os.environ.get('PILOT_WRAP_UP', '') == 'NORMAL':
job = get_finished_or_failed_job(args, queues)
if job:
logger.debug('returned job has state=%s', job.state)
#if job.state == 'failed':
# logger.warning('will abort failed job (should prepare for final server update)')
break
i += 1
state = get_pilot_state() # the job object is not available, but the state is also kept in PILOT_JOB_STATE
if state != 'stage-out':
# logger.info("no need to wait since job state=\'%s\'", state)
break
pause_queue_monitor(1) if not abort_thread else pause_queue_monitor(10)
# job has not been defined if it's still running
if not job and not abort_thread:
continue
completed_jobids = queues.completed_jobids.queue if queues.completed_jobids else []
if job and job.jobid not in completed_jobids:
logger.info("preparing for final server update for job %s in state=\'%s\'", job.jobid, job.state)
if args.job_aborted.is_set():
# wait for stage-out to finish for aborted job
wait_for_aborted_job_stageout(args, queues, job)
# send final server update
update_server(job, args)
# we can now stop monitoring this job, so remove it from the monitored_payloads queue and add it to the
# completed_jobs queue which will tell retrieve() that it can download another job
try:
_job = queues.monitored_payloads.get(block=True, timeout=1)
except queue.Empty:
logger.warning('failed to dequeue job: queue is empty (did job fail before job monitor started?)')
make_job_report(job)
else:
logger.debug('job %s was dequeued from the monitored payloads queue', _job.jobid)
# now ready for the next job (or quit)
put_in_queue(job.jobid, queues.completed_jobids)
put_in_queue(job, queues.completed_jobs)
del _job
logger.debug('tmp job object deleted')
if abort_thread:
break
# proceed to set the job_aborted flag?
if threads_aborted():
logger.debug('will proceed to set job_aborted')
args.job_aborted.set()
else:
logger.debug('will not set job_aborted yet')
logger.debug('[job] queue monitor thread has finished')
def update_server(job, args):
"""
Update the server (wrapper for send_state() that also prepares the metadata).
:param job: job object.
:param args: pilot args object.
:return:
"""
# user specific actions
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
user = __import__('pilot.user.%s.common' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
metadata = user.get_metadata(job.workdir)
try:
user.update_server(job)
except Exception as error:
logger.warning('exception caught in update_server(): %s', error)
if job.fileinfo:
send_state(job, args, job.state, xml=dumps(job.fileinfo), metadata=metadata)
else:
send_state(job, args, job.state, metadata=metadata)
def pause_queue_monitor(delay):
"""
Pause the queue monitor to let log transfer complete.
Note: this function should use globally available object. Use sleep for now.
:param delay: sleep time in seconds (int).
:return:
"""
logger.warning('since job:queue_monitor is responsible for sending job updates, we sleep for %d s', delay)
time.sleep(delay)
def get_finished_or_failed_job(args, queues):
"""
Check if the job has either finished or failed and if so return it.
If failed, order a log transfer. If the job is in state 'failed' and abort_job is set, set job_aborted.
:param args: pilot args object.
:param queues: pilot queues object.
:return: job object.
"""
job = get_job_from_queue(queues, "finished")
if job:
# logger.debug('get_finished_or_failed_job: job has finished')
pass
else:
# logger.debug('check_job: job has not finished')
job = get_job_from_queue(queues, "failed")
if job:
logger.debug('get_finished_or_failed_job: job has failed')
job.state = 'failed'
args.job_aborted.set()
# get the current log transfer status
log_transfer = get_job_status(job, 'LOG_TRANSFER')
if log_transfer == LOG_TRANSFER_NOT_DONE:
# order a log transfer for a failed job
order_log_transfer(queues, job)
# check if the job has failed
if job and job.state == 'failed':
# set job_aborted in case of kill signals
if args.abort_job.is_set():
logger.warning('queue monitor detected a set abort_job (due to a kill signal)')
# do not set graceful stop if pilot has not finished sending the final job update
# i.e. wait until SERVER_UPDATE is DONE_FINAL
#check_for_final_server_update(args.update_server)
#args.job_aborted.set()
return job
def get_heartbeat_period(debug=False):
"""
Return the proper heartbeat period, as determined by normal or debug mode.
In normal mode, the heartbeat period is 30*60 s, while in debug mode it is 5*60 s. Both values are defined in the
config file.
:param debug: Boolean, True for debug mode. False otherwise.
:return: heartbeat period (int).
"""
try:
return int(config.Pilot.heartbeat if not debug else config.Pilot.debug_heartbeat)
except Exception as error:
logger.warning('bad config data for heartbeat period: %s (will use default 1800 s)', error)
return 1800
def check_for_abort_job(args, caller=''):
"""
Check if args.abort_job.is_set().
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:param caller: function name of caller (string).
:return: Boolean, True if args_job.is_set()
"""
abort_job = False
if args.abort_job.is_set():
logger.warning('%s detected an abort_job request (signal=%s)', caller, args.signal)
logger.warning('in case pilot is running more than one job, all jobs will be aborted')
abort_job = True
return abort_job
def interceptor(queues, traces, args):
"""
MOVE THIS TO INTERCEPTOR.PY; TEMPLATE FOR THREADS
:param queues: internal queues for job handling.
:param traces: tuple containing internal pilot states.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return:
"""
# overall loop counter (ignoring the fact that more than one job may be running)
n = 0
while not args.graceful_stop.is_set():
time.sleep(0.1)
# abort in case graceful_stop has been set, and less than 30 s has passed since MAXTIME was reached (if set)
# (abort at the end of the loop)
abort = should_abort(args, label='job:interceptor')
# check for any abort_job requests
abort_job = check_for_abort_job(args, caller='interceptor')
if not abort_job:
# peek at the jobs in the validated_jobs queue and send the running ones to the heartbeat function
jobs = queues.monitored_payloads.queue
if jobs:
for _ in range(len(jobs)):
logger.info('interceptor loop %d: looking for communication file', n)
time.sleep(30)
n += 1
if abort or abort_job:
break
# proceed to set the job_aborted flag?
if threads_aborted():
logger.debug('will proceed to set job_aborted')
args.job_aborted.set()
else:
logger.debug('will not set job_aborted yet')
logger.debug('[job] interceptor thread has finished')
def fast_monitor_tasks(job):
"""
Perform user specific fast monitoring tasks.
:param job: job object.
:return: exit code (int).
"""
exit_code = 0
pilot_user = os.environ.get('PILOT_USER', 'generic').lower()
user = __import__('pilot.user.%s.monitoring' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3
try:
exit_code = user.fast_monitor_tasks(job)
except Exception as exc:
logger.warning('caught exception: %s', exc)
return exit_code
def fast_job_monitor(queues, traces, args):
"""
Fast monitoring of job parameters.
This function can be used for monitoring processes below the one minute threshold of the normal job_monitor thread.
:param queues: internal queues for job handling.
:param traces: tuple containing internal pilot states.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return:
"""
# peeking and current time; peeking_time gets updated if and when jobs are being monitored, update_time is only
# used for sending the heartbeat and is updated after a server update
#peeking_time = int(time.time())
#update_time = peeking_time
# end thread immediately, unless fast monitoring is required
if not args.use_realtime_logging:
logger.warning('fast monitoring not required - ending thread')
return
while not args.graceful_stop.is_set():
time.sleep(10)
# abort in case graceful_stop has been set, and less than 30 s has passed since MAXTIME was reached (if set)
# (abort at the end of the loop)
abort = should_abort(args, label='job:fast_job_monitor')
if abort:
break
if traces.pilot.get('command') == 'abort':
logger.warning('fast job monitor received an abort command')
break
# check for any abort_job requests
abort_job = check_for_abort_job(args, caller='fast job monitor')
if abort_job:
break
else:
# peek at the jobs in the validated_jobs queue and send the running ones to the heartbeat function
jobs = queues.monitored_payloads.queue
if jobs:
for i in range(len(jobs)):
#current_id = jobs[i].jobid
if jobs[i].state == 'finished' or jobs[i].state == 'failed':
logger.info('will abort job monitoring soon since job state=%s (job is still in queue)', jobs[i].state)
break
# perform the monitoring tasks
exit_code = fast_monitor_tasks(jobs[i])
if exit_code:
logger.debug('fast monitoring reported an error: %d', exit_code)
# proceed to set the job_aborted flag?
if threads_aborted():
logger.debug('will proceed to set job_aborted')
args.job_aborted.set()
else:
logger.debug('will not set job_aborted yet')
logger.debug('[job] fast job monitor thread has finished')
def job_monitor(queues, traces, args): # noqa: C901
"""
Monitoring of job parameters.
This function monitors certain job parameters, such as job looping, at various time intervals. The main loop
is executed once a minute, while individual verifications may be executed at any time interval (>= 1 minute). E.g.
looping jobs are checked once per ten minutes (default) and the heartbeat is send once per 30 minutes. Memory
usage is checked once a minute.
:param queues: internal queues for job handling.
:param traces: tuple containing internal pilot states.
:param args: Pilot arguments (e.g. containing queue name, queuedata dictionary, etc).
:return:
"""
# initialize the monitoring time object
mt = MonitoringTime()
# peeking and current time; peeking_time gets updated if and when jobs are being monitored, update_time is only
# used for sending the heartbeat and is updated after a server update
peeking_time = int(time.time())
update_time = peeking_time
# overall loop counter (ignoring the fact that more than one job may be running)
n = 0
while not args.graceful_stop.is_set():
time.sleep(0.5)
# abort in case graceful_stop has been set, and less than 30 s has passed since MAXTIME was reached (if set)
# (abort at the end of the loop)
abort = should_abort(args, label='job:job_monitor')
if traces.pilot.get('command') == 'abort':
logger.warning('job monitor received an abort command')
# check for any abort_job requests
abort_job = check_for_abort_job(args, caller='job monitor')
if not abort_job:
if not queues.current_data_in.empty():
# make sure to send heartbeat regularly if stage-in takes a long time
jobs = queues.current_data_in.queue
if jobs:
for i in range(len(jobs)):
# send heartbeat if it is time (note that the heartbeat function might update the job object, e.g.
# by turning on debug mode, ie we need to get the heartbeat period in case it has changed)
update_time = send_heartbeat_if_time(jobs[i], args, update_time)
# note: when sending a state change to the server, the server might respond with 'tobekilled'
try:
jobs[i]
except Exception as error:
logger.warning('detected stale jobs[i] object in job_monitor: %s', error)
else:
if jobs[i].state == 'failed':
logger.warning('job state is \'failed\' - order log transfer and abort job_monitor() (1)')
jobs[i].stageout = 'log' # only stage-out log file
put_in_queue(jobs[i], queues.data_out)
# sleep for a while if stage-in has not completed
time.sleep(1)
continue
elif queues.finished_data_in.empty():
# sleep for a while if stage-in has not completed
time.sleep(1)
continue
time.sleep(60)
# peek at the jobs in the validated_jobs queue and send the running ones to the heartbeat function
jobs = queues.monitored_payloads.queue
if jobs:
# update the peeking time
peeking_time = int(time.time())
for i in range(len(jobs)):
current_id = jobs[i].jobid
logger.info('monitor loop #%d: job %d:%s is in state \'%s\'', n, i, current_id, jobs[i].state)
if jobs[i].state == 'finished' or jobs[i].state == 'failed':
logger.info('will abort job monitoring soon since job state=%s (job is still in queue)', jobs[i].state)
break
# perform the monitoring tasks
exit_code, diagnostics = job_monitor_tasks(jobs[i], mt, args)
if exit_code != 0:
if exit_code == errors.NOVOMSPROXY:
logger.warning('VOMS proxy has expired - keep monitoring job')
elif exit_code == errors.KILLPAYLOAD:
jobs[i].piloterrorcodes, jobs[i].piloterrordiags = errors.add_error_code(exit_code)
logger.debug('killing payload process')
kill_process(jobs[i].pid)
break
else:
try:
fail_monitored_job(jobs[i], exit_code, diagnostics, queues, traces)
except Exception as error:
logger.warning('(1) exception caught: %s (job id=%s)', error, current_id)
break
# run this check again in case job_monitor_tasks() takes a long time to finish (and the job object
# has expired in the mean time)
try:
_job = jobs[i]
except Exception:
logger.info('aborting job monitoring since job object (job id=%s) has expired', current_id)
break
# send heartbeat if it is time (note that the heartbeat function might update the job object, e.g.
# by turning on debug mode, ie we need to get the heartbeat period in case it has changed)
try:
update_time = send_heartbeat_if_time(_job, args, update_time)
except Exception as error:
logger.warning('(2) exception caught: %s (job id=%s)', error, current_id)
break
else:
# note: when sending a state change to the server, the server might respond with 'tobekilled'
if _job.state == 'failed':
logger.warning('job state is \'failed\' - order log transfer and abort job_monitor() (2)')
_job.stageout = 'log' # only stage-out log file
put_in_queue(_job, queues.data_out)
abort = True
break
elif os.environ.get('PILOT_JOB_STATE') == 'stagein':
logger.info('job monitoring is waiting for stage-in to finish')
else:
# check the waiting time in the job monitor. set global graceful_stop if necessary
check_job_monitor_waiting_time(args, peeking_time, abort_override=abort_job)
n += 1
if abort or abort_job:
break
# proceed to set the job_aborted flag?
if threads_aborted():
logger.debug('will proceed to set job_aborted')
args.job_aborted.set()
else:
logger.debug('will not set job_aborted yet')
logger.debug('[job] job monitor thread has finished')
def send_heartbeat_if_time(job, args, update_time):
"""
Send a heartbeat to the server if it is time to do so.
:param job: job object.
:param args: args object.
:param update_time: last update time (from time.time()).
:return: possibly updated update_time (from time.time()).
"""
if int(time.time()) - update_time >= get_heartbeat_period(job.debug):
if job.serverstate != 'finished' and job.serverstate != 'failed':
send_state(job, args, 'running')
update_time = int(time.time())
return update_time
def check_job_monitor_waiting_time(args, peeking_time, abort_override=False):
"""
Check the waiting time in the job monitor.
Set global graceful_stop if necessary.
:param args: args object.
:param peeking_time: time when monitored_payloads queue was peeked into (int).
:return:
"""
waiting_time = int(time.time()) - peeking_time
msg = 'no jobs in monitored_payloads queue (waited for %d s)' % waiting_time
if waiting_time > 60 * 60:
abort = True
msg += ' - aborting'
else:
abort = False
if logger:
logger.warning(msg)
else:
print(msg)
if abort or abort_override:
# do not set graceful stop if pilot has not finished sending the final job update
# i.e. wait until SERVER_UPDATE is DONE_FINAL
check_for_final_server_update(args.update_server)
args.graceful_stop.set()
def fail_monitored_job(job, exit_code, diagnostics, queues, traces):
"""
Fail a monitored job.
:param job: job object
:param exit_code: exit code from job_monitor_tasks (int).
:param diagnostics: pilot error diagnostics (string).
:param queues: queues object.
:param traces: traces object.
:return:
"""
set_pilot_state(job=job, state="failed")
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(exit_code, msg=diagnostics)
job.piloterrordiag = diagnostics
traces.pilot['error_code'] = exit_code
put_in_queue(job, queues.failed_payloads)
logger.info('aborting job monitoring since job state=%s', job.state)
def make_job_report(job):
"""
Make a summary report for the given job.
This function is called when the job has completed.
:param job: job object.
:return:
"""
logger.info('')
logger.info('job summary report')
logger.info('--------------------------------------------------')
logger.info('PanDA job id: %s', job.jobid)
logger.info('task id: %s', job.taskid)
n = len(job.piloterrorcodes)
if n > 0:
for i in range(n):
logger.info('error %d/%d: %s: %s', i + 1, n, job.piloterrorcodes[i], job.piloterrordiags[i])
else:
logger.info('errors: (none)')
if job.piloterrorcode != 0:
logger.info('pilot error code: %d', job.piloterrorcode)
logger.info('pilot error diag: %s', job.piloterrordiag)
info = ""
for key in job.status:
info += key + " = " + job.status[key] + " "
logger.info('status: %s', info)
s = ""
if job.is_analysis() and job.state != 'finished':
s = '(user job is recoverable)' if errors.is_recoverable(code=job.piloterrorcode) else '(user job is not recoverable)'
logger.info('pilot state: %s %s', job.state, s)
logger.info('transexitcode: %d', job.transexitcode)
logger.info('exeerrorcode: %d', job.exeerrorcode)
logger.info('exeerrordiag: %s', job.exeerrordiag)
logger.info('exitcode: %d', job.exitcode)
logger.info('exitmsg: %s', job.exitmsg)
logger.info('cpuconsumptiontime: %d %s', job.cpuconsumptiontime, job.cpuconsumptionunit)
logger.info('nevents: %d', job.nevents)
logger.info('neventsw: %d', job.neventsw)
logger.info('pid: %s', job.pid)
logger.info('pgrp: %s', str(job.pgrp))
logger.info('corecount: %d', job.corecount)
logger.info('event service: %s', str(job.is_eventservice))
logger.info('sizes: %s', str(job.sizes))
logger.info('--------------------------------------------------')
logger.info('')
|
{
"content_hash": "3f018f73ecda60b97769c0f128022080",
"timestamp": "",
"source": "github",
"line_count": 2742,
"max_line_length": 155,
"avg_line_length": 38.62253829321663,
"alnum_prop": 0.6106437022558379,
"repo_name": "mlassnig/pilot2",
"id": "cbd5487b2473ff4624c3c4e8bdffa20330e269ed",
"size": "106344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pilot/control/job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1435182"
},
{
"name": "Shell",
"bytes": "624"
}
],
"symlink_target": ""
}
|
import logging
import os
from scrapy import cmdline
if not os.path.exists('log'):
os.makedirs('log')
logging.basicConfig(
filename = 'log/main.log',
format = '%(levelname)s %(asctime)s: %(message)s',
level = logging.DEBUG
)
cmdline.execute("scrapy crawl assetstore".split())
|
{
"content_hash": "23622aa29aebe5d0e9647efb0978a11f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 58,
"avg_line_length": 20.466666666666665,
"alnum_prop": 0.6579804560260586,
"repo_name": "awolfly9/Python-Crawl-UnityAssetStore",
"id": "bdd36f7ca0c41b2f2963d715bf1e93c1d1186627",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32738"
}
],
"symlink_target": ""
}
|
def add_native_methods(clazz):
def getNumGcExtAttributes__java_lang_management_GarbageCollectorMXBean__(a0, a1):
raise NotImplementedError()
def fillGcAttributeInfo__java_lang_management_GarbageCollectorMXBean__int__java_lang_String____char____java_lang_String____(a0, a1, a2, a3, a4, a5):
raise NotImplementedError()
clazz.getNumGcExtAttributes__java_lang_management_GarbageCollectorMXBean__ = getNumGcExtAttributes__java_lang_management_GarbageCollectorMXBean__
clazz.fillGcAttributeInfo__java_lang_management_GarbageCollectorMXBean__int__java_lang_String____char____java_lang_String____ = fillGcAttributeInfo__java_lang_management_GarbageCollectorMXBean__int__java_lang_String____char____java_lang_String____
|
{
"content_hash": "9d6795e41cee07d4523d00bbc53ac0b5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 251,
"avg_line_length": 74.7,
"alnum_prop": 0.7617135207496654,
"repo_name": "laffra/pava",
"id": "dc3e532b6a8e99e365a2a6a36611cadd91b92686",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pava/implementation/natives/sun/management/GcInfoBuilder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "144"
},
{
"name": "Python",
"bytes": "369288"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
from imp import load_source
burgaur = load_source("burgaur", "burgaur")
def read(fname):
filename = os.path.join(os.path.dirname(__file__), fname)
return open(filename).read().replace('#', '')
setup(
name="burgaur",
version=burgaur.__version__,
author=burgaur.__author__,
author_email=burgaur.__email__,
maintainer=burgaur.__maintainer__,
maintainer_email=burgaur.__email__,
description=("A delicious AUR helper. Made from cower."),
license=burgaur.__license__,
url="https://github.com/m45t3r/burgaur",
scripts=["burgaur"],
platforms=["Linux"],
long_description=read("README.rst"),
)
|
{
"content_hash": "e684dc64d523e2df199356db0d0b6991",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 61,
"avg_line_length": 27.32,
"alnum_prop": 0.6603221083455344,
"repo_name": "vga-/burgaur",
"id": "25c40b20301e8d6944a18b85bf20d488bae99e1d",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16080"
},
{
"name": "Shell",
"bytes": "701"
}
],
"symlink_target": ""
}
|
"""
This node does the transform tree setup for my robot 'minibot'.
It also publishes these transforms into ROS.
Author: Markus Knapp, 2017
Website: https://direcs.de
"""
import logging
import rospy
import tf
if __name__ == '__main__':
# init node
rospy.init_node('robot_tf_broadcaster', anonymous=False)
rospy.loginfo("robot_tf_broadcaster started.")
# the broacaster (publisher)
#
# see also http://wiki.ros.org/tf/Tutorials/Adding%20a%20frame%20%28Python%29
br = tf.TransformBroadcaster()
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
""" base_link transform (base_link connected 1:1 to base_footprint. NEEDED?) """
br.sendTransform((0.0, 0.0, 0.0), # translation (x,y,z) in meters
(0.0, 0.0, 0.0, 1.0), # rotation
rospy.Time.now(), # time
"base_footprint", # parent node
"base_link") # child node
""" base_laser transform (the laser rangefinder) """
br.sendTransform((0.06, 0.0, 0.0615), # translation (x,y,z) in meters
(0.0, 0.0, 0.0, 1.0), # rotation
rospy.Time.now(), # time
"base_link", # parent node
"base_laser") # child node
""" imu_link transform (Odometrie?!?) """
br.sendTransform((0.0, 0.0, 0.0395), # translation
(0.0, 0.0, 0.0, 1.0), # rotation
rospy.Time.now(), # time
"base_link", # parent node
"imu_link") # child node
""" camera_link
br.sendTransform((0.0, 0.0, 0.0), # translation
(0.0, 0.0, 0.0, 1.0), # rotation
rospy.Time.now(), # time
"base_link", # parent node
"camera_link") # child node """
# "base_link" is a more common name for a chassis in ROS.
#
# parent > child relation. earth > map > odom > base_link > base_laser
# see also http://wiki.ros.org/navigation/Tutorials/RobotSetup/TF
# and also http://www.ros.org/reps/rep-0105.html#relationship-between-frames
rate.sleep()
|
{
"content_hash": "47a7472144551f5f3e4e5238c30c3698",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 88,
"avg_line_length": 39.833333333333336,
"alnum_prop": 0.4920502092050209,
"repo_name": "markusk/minibot",
"id": "1899541e9710a0bd27452af199abf63a18a65e66",
"size": "2428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ROS/catkin_workspace/src/minibot/nodes/tf_broadcaster.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "804657"
},
{
"name": "C++",
"bytes": "8897"
},
{
"name": "CMake",
"bytes": "13960"
},
{
"name": "Gnuplot",
"bytes": "108022"
},
{
"name": "Python",
"bytes": "106383"
},
{
"name": "Shell",
"bytes": "7815"
}
],
"symlink_target": ""
}
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.client_message_type import *
REQUEST_TYPE = CLIENT_AUTHENTICATIONCUSTOM
RESPONSE_TYPE = 107
RETRYABLE = True
def calculate_size(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_data(credentials)
data_size += BOOLEAN_SIZE_IN_BYTES
if uuid is not None:
data_size += calculate_size_str(uuid)
data_size += BOOLEAN_SIZE_IN_BYTES
if owner_uuid is not None:
data_size += calculate_size_str(owner_uuid)
data_size += BOOLEAN_SIZE_IN_BYTES
data_size += calculate_size_str(client_type)
data_size += BYTE_SIZE_IN_BYTES
return data_size
def encode_request(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_data(credentials)
client_message.append_bool(uuid is None)
if uuid is not None:
client_message.append_str(uuid)
client_message.append_bool(owner_uuid is None)
if owner_uuid is not None:
client_message.append_str(owner_uuid)
client_message.append_bool(is_owner_connection)
client_message.append_str(client_type)
client_message.append_byte(serialization_version)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(status=None, address=None, uuid=None, owner_uuid=None, serialization_version=None)
parameters['status'] = client_message.read_byte()
address=None
if not client_message.read_bool():
parameters['address'] = AddressCodec.decode(client_message, to_object)
uuid=None
if not client_message.read_bool():
parameters['uuid'] = client_message.read_str()
owner_uuid=None
if not client_message.read_bool():
parameters['owner_uuid'] = client_message.read_str()
parameters['serialization_version'] = client_message.read_byte()
return parameters
|
{
"content_hash": "794932bacbb1eb521af169a87c0eb27d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 151,
"avg_line_length": 39.96875,
"alnum_prop": 0.727130570758405,
"repo_name": "LifeDJIK/S.H.I.V.A.",
"id": "9d3c8b4f5aaa5ce95fd373c7f88dbffe4886ffd0",
"size": "2558",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "containers/shiva/hazelcast/protocol/codec/client_authentication_custom_codec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4401"
},
{
"name": "HTML",
"bytes": "7268"
},
{
"name": "Python",
"bytes": "19571"
}
],
"symlink_target": ""
}
|
"""
Backend file for interfacing with video hosts
This backend is for movshare.net
"""
import re
import urllib.parse
import urllib.request
#http://www.movshare.net/api/player.api.php?file=481200e9d928e&key=76%2E10%2E136%2E30-39ce698e365bd646007293ddce454e25
id = "movshare"
domain = "movshare.net"
vidType = "flash"
FKEY_RE = re.compile(r"flashvars\.filekey=\"(.+?)\";")
def getVid(videoID):
videoID = videoID[6:] # Strip the video/ in the front
firstReq = urllib.request.urlopen("http://www.movshare.net/video/{}".format(videoID))
data = firstReq.read().decode("utf-8")
#return data
match = FKEY_RE.search(data)
if match == None:
return "Could not pull video from link"
fkey = match.group(1)
#return(urllib.parse.quote(fkey))
gData = {
"cid": 1,
"cid2": "undefined",
"ci3": "undefined",
"user": "undefined",
"pass": "undefined",
"file": videoID,
"key": fkey
}
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36",
}
p = urllib.parse.urlencode(gData)
reqObj = urllib.request.Request("http://www.movshare.net/api/player.api.php?" + p, headers = headers)
secReq = urllib.request.urlopen(reqObj)
data = secReq.read().decode("utf-8")[4:]
data = data.split("&")
return data[0]
|
{
"content_hash": "3be482288541693bf007d32ceb6f92dd",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 137,
"avg_line_length": 23.157894736842106,
"alnum_prop": 0.6833333333333333,
"repo_name": "anthonynguyen/watch-tv",
"id": "ff9dffe9d43fbb9d91df5dae03f1944a7c164081",
"size": "1320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "video_hosts/movshare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17985"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib.auth.models import Permission
from django.core import validators
from django.db.models import get_model
from django.utils.translation import ugettext_lazy as _
from oscar.apps.customer.forms import EmailUserCreationForm, CommonPasswordValidator
from oscar.core.compat import get_user_model
User = get_user_model()
Partner = get_model('partner', 'Partner')
PartnerAddress = get_model('partner', 'PartnerAddress')
class PartnerSearchForm(forms.Form):
name = forms.CharField(required=False, label=_("Partner name"))
class PartnerCreateForm(forms.ModelForm):
class Meta:
model = Partner
fields = ('name',)
ROLE_CHOICES = (
('staff', _('Full dashboard access')),
('limited', _('Limited dashboard access')),
)
class NewUserForm(EmailUserCreationForm):
role = forms.ChoiceField(choices=ROLE_CHOICES, widget=forms.RadioSelect,
label=_('User role'), initial='limited')
def __init__(self, partner, *args, **kwargs):
self.partner = partner
super(NewUserForm, self).__init__(host=None, *args, **kwargs)
def save(self):
role = self.cleaned_data.get('role', 'limited')
user = super(NewUserForm, self).save(commit=False)
user.is_staff = role == 'staff'
user.save()
self.partner.users.add(user)
if role == 'limited':
dashboard_access_perm = Permission.objects.get(
codename='dashboard_access', content_type__app_label='partner')
user.user_permissions.add(dashboard_access_perm)
return user
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'password1', 'password2')
class ExistingUserForm(forms.ModelForm):
"""
Slightly different form that makes
* makes saving password optional
* doesn't regenerate username
* doesn't allow changing email till #668 is resolved
"""
role = forms.ChoiceField(choices=ROLE_CHOICES, widget=forms.RadioSelect,
label=_('User role'))
password1 = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput,
required=False,
validators=[validators.MinLengthValidator(6),
CommonPasswordValidator()])
password2 = forms.CharField(
required=False,
label=_('Confirm Password'),
widget=forms.PasswordInput)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data.get('password2', '')
if password1 != password2:
raise forms.ValidationError(
_("The two password fields didn't match."))
return password2
def __init__(self, *args, **kwargs):
user = kwargs['instance']
role = 'staff' if user.is_staff else 'limited'
kwargs.get('initial', {}).setdefault('role', role)
super(ExistingUserForm, self).__init__(*args, **kwargs)
def save(self):
role = self.cleaned_data.get('role', 'none')
user = super(ExistingUserForm, self).save(commit=False)
user.is_staff = role == 'staff'
if self.cleaned_data['password1']:
user.set_password(self.cleaned_data['password1'])
user.save()
dashboard_perm = Permission.objects.get(
codename='dashboard_access', content_type__app_label='partner')
user_has_perm = user.user_permissions.filter(
pk=dashboard_perm.pk).exists()
if role == 'limited' and not user_has_perm:
user.user_permissions.add(dashboard_perm)
elif role == 'staff' and user_has_perm:
user.user_permissions.remove(dashboard_perm)
return user
class Meta:
model = User
fields = ('first_name', 'last_name', 'password1', 'password2')
class UserEmailForm(forms.Form):
# We use a CharField so that a partial email address can be entered
email = forms.CharField(
label=_("Email address"), max_length=100)
class PartnerAddressForm(forms.ModelForm):
class Meta:
fields = ('line1', 'line2', 'line3', 'line4',
'state', 'postcode', 'country')
model = PartnerAddress
|
{
"content_hash": "1f806ce81f9350853a8d805987268240",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 84,
"avg_line_length": 34.32258064516129,
"alnum_prop": 0.6282894736842105,
"repo_name": "saadbinakhlaq/django-oscar",
"id": "c19323ca9f4a4288f996451d32a2ce04ed161a71",
"size": "4256",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oscar/apps/dashboard/partners/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import with_statement
import collections
import getopt
import os
import re
import sys
import tables
class Dtrace(object):
class DtraceLine(object):
prefix_maps = {
"/usr/share/caldavd/lib/python/": "{caldavd}/",
"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.6": "{Python}",
"/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6": "{Python}",
"/System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5": "{Python}",
"/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python": "{Extras}",
"/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python": "{Extras}",
"/System/Library/Frameworks/Python.framework/Versions/2.5/Extras/lib/python": "{Extras}",
}
contains_maps = {
"/CalendarServer": "{caldavd}",
"/Twisted": "{Twisted}",
"/pycalendar": "{pycalendar}",
}
def __init__(self, line, lineno):
self.entering = True
self.function_name = ""
self.file_location = ""
self.parent = None
self.children = []
self.lineno = lineno
re_matched = re.match("(..) ([^ ]+) \(([^\)]+)\)", line)
if re_matched is None:
print(line)
results = re_matched.groups()
if results[0] == "<-":
self.entering = False
elif results[0] == "->":
self.entering = True
else:
raise ValueError("Invalid start of line at %d" % (lineno,))
self.function_name = results[1]
self.file_location = results[2]
for key, value in Dtrace.DtraceLine.prefix_maps.iteritems():
if self.file_location.startswith(key):
self.file_location = value + self.file_location[len(key):]
break
else:
for key, value in Dtrace.DtraceLine.contains_maps.iteritems():
found1 = self.file_location.find(key)
if found1 != -1:
found2 = self.file_location[found1 + 1:].find('/')
if found2 != -1:
self.file_location = value + self.file_location[found1 + found2 + 1:]
else:
self.file_location = value
break
def __repr__(self):
return "%s (%s)" % self.getKey()
def getKey(self):
return (self.file_location, self.function_name,)
def getPartialKey(self):
return (self.filePath(), self.function_name,)
def addChild(self, child):
child.parent = self
self.children.append(child)
def checkForCollapse(self, other):
if self.entering and not other.entering:
if self.function_name == other.function_name and self.function_name != "mainLoop":
if self.filePath() == other.filePath():
return True
return False
def filePath(self):
return self.file_location[0:self.file_location.rfind(':')]
def prettyPrint(self, indent, indents, sout):
indenter = ""
for level in indents:
if level > 0:
indenter += "⎢ "
elif level < 0:
indenter += "⎿ "
else:
indenter += " "
sout.write("%s%s (%s)\n" % (indenter, self.function_name, self.file_location,))
def stackName(self):
return self.function_name, self.filePath()
class DtraceStack(object):
def __init__(self, lines, no_collapse):
self.start_indent = 0
self.stack = []
self.called_by = {}
self.call_into = {}
self.processLines(lines, no_collapse)
def processLines(self, lines, no_collapse):
new_lines = []
last_line = None
for line in lines:
if last_line:
if not no_collapse and line.checkForCollapse(last_line):
new_lines.pop()
last_line = None
continue
new_lines.append(line)
last_line = line
indent = 0
min_indent = 0
current_line = None
blocks = [[]]
backstack = []
for line in new_lines:
stackName = line.stackName()
if line.entering:
if line.function_name == "mainLoop":
if min_indent < 0:
newstack = []
for oldindent, oldline in blocks[-1]:
newstack.append((oldindent - min_indent, oldline,))
blocks[-1] = newstack
min_indent = 0
indent = 0
blocks.append([])
backstack = []
else:
indent += 1
backstack.append(stackName)
blocks[-1].append((indent, line,))
if current_line:
current_line.addChild(line)
current_line = line
else:
if len(blocks) == 1 or line.function_name != "mainLoop" and indent:
indent -= 1
while backstack and indent and stackName != backstack[-1]:
indent -= 1
backstack.pop()
if backstack:
backstack.pop()
if indent < 0:
print("help")
current_line = current_line.parent if current_line else None
min_indent = min(min_indent, indent)
for block in blocks:
self.stack.extend(block)
if min_indent < 0:
self.start_indent = -min_indent
else:
self.start_indent = 0
self.generateCallInfo()
def generateCallInfo(self):
for _ignore, line in self.stack:
key = line.getKey()
if line.parent:
parent_key = line.parent.getKey()
parent_calls = self.called_by.setdefault(key, {}).get(parent_key, 0)
self.called_by[key][parent_key] = parent_calls + 1
for child in line.children:
child_key = child.getKey()
child_calls = self.call_into.setdefault(key, {}).get(child_key, 0)
self.call_into[key][child_key] = child_calls + 1
def prettyPrint(self, sout):
indents = [1] * self.start_indent
ctr = 0
maxctr = len(self.stack) - 1
for indent, line in self.stack:
current_indent = self.start_indent + indent
next_indent = (self.start_indent + self.stack[ctr + 1][0]) if ctr < maxctr else 10000
if len(indents) == current_indent:
pass
elif len(indents) < current_indent:
indents.append(current_indent)
else:
indents = indents[0:current_indent]
if next_indent < current_indent:
indents = indents[0:next_indent] + [-1] * (current_indent - next_indent)
line.prettyPrint(self.start_indent + indent, indents, sout)
ctr += 1
def __init__(self, filepath):
self.filepath = filepath
self.calltimes = collections.defaultdict(lambda: [0, 0, 0])
self.exclusiveTotal = 0
def analyze(self, do_stack, no_collapse):
print("Parsing dtrace output.")
# Parse the trace lines first and look for the start of the call times
lines = []
traces = True
index = -1
with file(filepath) as f:
for lineno, line in enumerate(f):
if traces:
if line.strip() and line[0:3] in ("-> ", "<- "):
lines.append(Dtrace.DtraceLine(line, lineno + 1))
elif line.startswith("Count,"):
traces = False
else:
if line[0] != ' ':
continue
line = line.strip()
if line.startswith("FILE"):
index += 1
if index >= 0:
self.parseCallTimeLine(line, index)
self.printTraceDetails(lines, do_stack, no_collapse)
for ctr, title in enumerate(("Sorted by Count", "Sorted by Exclusive", "Sorted by Inclusive",)):
print(title)
self.printCallTimeTotals(ctr)
def printTraceDetails(self, lines, do_stack, no_collapse):
print("Found %d lines" % (len(lines),))
print("============================")
print("")
self.stack = Dtrace.DtraceStack(lines, no_collapse)
if do_stack:
with file("stacked.txt", "w") as f:
self.stack.prettyPrint(f)
print("Wrote stack calls to 'stacked.txt'")
print("============================")
print("")
# Get stats for each call
stats = {}
last_exit = None
for line in lines:
key = line.getKey()
if line.entering:
counts = stats.get(key, (0, 0))
counts = (counts[0] + (1 if no_collapse else 0), counts[1] + (0 if no_collapse else 1))
if line.getPartialKey() != last_exit:
counts = (counts[0] + (0 if no_collapse else 1), counts[1] + (1 if no_collapse else 0))
stats[key] = counts
else:
last_exit = line.getPartialKey()
print("Function Call Counts")
print("")
table = tables.Table()
table.addHeader(("Count", "Function", "File",))
for key, value in sorted(stats.iteritems(), key=lambda x: x[1][0], reverse=True):
table.addRow(("%d (%d)" % value, key[1], key[0],))
table.printTable()
print("")
print("Called By Counts")
print("")
table = tables.Table()
table.addHeader(("Function", "Caller", "Count",))
for main_key in sorted(self.stack.called_by.keys(), key=lambda x: x[1] + x[0]):
first = True
for key, value in sorted(self.stack.called_by[main_key].iteritems(), key=lambda x: x[1], reverse=True):
table.addRow((
("%s (%s)" % (main_key[1], main_key[0],)) if first else "",
"%s (%s)" % (key[1], key[0],),
str(value),
))
first = False
table.printTable()
print("")
print("Call Into Counts")
print("")
table = tables.Table()
table.addHeader(("Function", "Calls", "Count",))
for main_key in sorted(self.stack.call_into.keys(), key=lambda x: x[1] + x[0]):
first = True
for key, value in sorted(self.stack.call_into[main_key].iteritems(), key=lambda x: x[1], reverse=True):
table.addRow((
("%s (%s)" % (main_key[1], main_key[0],)) if first else "",
"%s (%s)" % (key[1], key[0],),
str(value),
))
first = False
table.printTable()
print("")
def parseCallTimeLine(self, line, index):
file, type, name, value = line.split()
if file in ("-", "FILE"):
return
else:
self.calltimes[(file, name)][index] = int(value)
if index == 1:
self.exclusiveTotal += int(value)
def printCallTimeTotals(self, sortIndex):
table = tables.Table()
table.setDefaultColumnFormats((
tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.LEFT_JUSTIFY),
tables.Table.ColumnFormat("%s", tables.Table.ColumnFormat.LEFT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY),
))
table.addHeader(("File", "Name", "Count", "Inclusive", "Exclusive", "Children",))
for key, value in sorted(self.calltimes.items(), key=lambda x: x[1][sortIndex], reverse=True):
table.addRow((
key[0],
key[1],
value[0],
value[2],
"%s (%6.3f%%)" % (value[1], (100.0 * value[1]) / self.exclusiveTotal),
value[2] - value[1],
))
table.addRow()
table.addRow((
"Total:",
"",
"",
"",
self.exclusiveTotal,
"",
))
table.printTable()
print("")
def usage(error_msg=None):
if error_msg:
print(error_msg)
print("""Usage: dtraceanalyze [options] FILE
Options:
-h Print this help and exit
--stack Save indented stack to file
--raw-count Display call counts based on full trace,
else display counts on collapsed values.
Arguments:
FILE File name containing dtrace output to analyze
Description:
This utility will analyze the output of the trace.d dtrace script to produce
useful statistics, and other performance related data.
To use this do the following (where PID is the pid of the
Python process to monitor:
> sudo ./trace.d PID > results.txt
...
> ./dtraceanalyze.py results.txt
""")
if error_msg:
raise ValueError(error_msg)
else:
sys.exit(0)
if __name__ == "__main__":
sys.setrecursionlimit(10000)
do_stack = False
no_collapse = False
try:
options, args = getopt.getopt(sys.argv[1:], "h", ["stack", "no-collapse"])
for option, value in options:
if option == "-h":
usage()
elif option == "--stack":
do_stack = True
elif option == "--no-collapse":
no_collapse = True
else:
usage("Unrecognized option: %s" % (option,))
if len(args) == 0:
fname = "results.txt"
elif len(args) != 1:
usage("Must have one argument")
else:
fname = args[0]
filepath = os.path.expanduser(fname)
if not os.path.exists(filepath):
usage("File '%s' does not exist" % (filepath,))
print("CalendarServer dtrace analysis tool tool")
print("=====================================")
print("")
if do_stack:
print("Generating nested stack call file.")
if no_collapse:
print("Consecutive function calls will not be removed.")
else:
print("Consecutive function calls will be removed.")
print("============================")
print("")
Dtrace(filepath).analyze(do_stack, no_collapse)
except Exception, e:
raise
sys.exit(str(e))
|
{
"content_hash": "e8185950c6412aa75661fd6720393b5b",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 115,
"avg_line_length": 35.598194130925506,
"alnum_prop": 0.48788839568801523,
"repo_name": "trevor/calendarserver",
"id": "f68b9bcb51a692309c8c5c45d729b6344f875289",
"size": "16418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/tools/dtraceanalyze.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "D",
"bytes": "13143"
},
{
"name": "JavaScript",
"bytes": "76566"
},
{
"name": "Python",
"bytes": "9260291"
},
{
"name": "Shell",
"bytes": "78964"
}
],
"symlink_target": ""
}
|
import argparse
import os
import sys
from collections import defaultdict
from pprint import pprint
import config
from postprocess import processdata
from utility import utility
import itertools
parser = argparse.ArgumentParser(
description="Prints out general statistics about FIRST/COPY")
parser.add_argument(
"--monthsFolder",
"-m",
default=config.monthsFolder,
type=str,
help="the folder in which the months directory " + "are residing")
parser.add_argument(
"--ignoreLock",
"-i",
help="Ignore locked file and execute" + " anyways",
action="store_true")
parser.add_argument(
"--position",
"-p",
default="default position",
type=str,
help="The position to be displayed before the data.")
parser.add_argument(
"month", type=str, help="the month which we're interested in")
if (len(sys.argv[1:]) == 0):
parser.print_help()
parser.exit()
args = parser.parse_args()
if os.path.isfile(utility.addMissingSlash(args.monthsFolder)
+ utility.addMissingSlash(args.month) + "locked") \
and not args.ignoreLock:
print("ERROR: The month " + str(args.month) +
" is being edited at the moment." +
" Use -i if you want to force the execution of this script.")
sys.exit()
class GeneralStatisticsHandler:
statistic = defaultdict(int)
totalCount = 0
def handle(self, sparqlQuery, processed):
if (processed['#Valid'] == 'VALID'):
self.totalCount += 1
self.statistic[processed['#First']] += 1
self.statistic[processed['#QueryComplexity']] += 1
if processed['#ExampleQueryStringComparison'] != "NONE":
self.statistic['EXAMPLE_STRING'] += 1
if processed['#ExampleQueryParsedComparison'] != "NONE":
self.statistic['EXAMPLE_PARSED'] += 1
def printStat(self):
#pprint(self.statistic)
print(
"Month\tFirst\tCopy\tSIMPLE\tCOMPLEX\tEXAMPLE_STRING\tEXAMPLE_PARSED"
)
print(
args.month + "\t" + str(self.statistic["FIRST"]) + "\t" +
str(self.statistic["COPY"]) + "\t" + str(self.statistic["SIMPLE"])
+ "\t" + str(self.statistic["COMPLEX"]) + "\t" +
str(self.statistic["EXAMPLE_STRING"]) + "\t" +
str(self.statistic["EXAMPLE_PARSED"]))
handler = GeneralStatisticsHandler()
processdata.processMonth(
handler, args.month, args.monthsFolder, notifications=False)
print args.position
print ""
handler.printStat()
|
{
"content_hash": "7cd8641e79156183268dcbe2174ee3b1",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 81,
"avg_line_length": 30.214285714285715,
"alnum_prop": 0.6347517730496454,
"repo_name": "Wikidata/QueryAnalysis",
"id": "ec698c3ad2916025f717c17047e258f291d44d1e",
"size": "2538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/generalStat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "659"
},
{
"name": "HTML",
"bytes": "42087"
},
{
"name": "Java",
"bytes": "242056"
},
{
"name": "JavaScript",
"bytes": "193"
},
{
"name": "Python",
"bytes": "132061"
},
{
"name": "Shell",
"bytes": "1889"
}
],
"symlink_target": ""
}
|
"""Incremental Principal Components Analysis."""
# Author: Kyle Kastner <kastnerkyle@gmail.com>
# Giorgio Patrini
# License: BSD 3 clause
import numpy as np
from scipy import linalg, sparse
from ._base import _BasePCA
from ..utils import gen_batches
from ..utils.extmath import svd_flip, _incremental_mean_and_var
class IncrementalPCA(_BasePCA):
"""Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
the data, keeping only the most significant singular vectors to
project the data to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA, and allows sparse input.
This algorithm has constant memory complexity, on the order
of ``batch_size * n_features``, enabling use of np.memmap files without
loading the entire file into memory. For sparse matrices, the input
is converted to dense in batches (in order to be able to subtract the
mean) which avoids storing the entire dense matrix at any one time.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
Read more in the :ref:`User Guide <IncrementalPCA>`.
.. versionadded:: 0.16
Parameters
----------
n_components : int, default=None
Number of components to keep. If ``n_components`` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
whiten : bool, default=False
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
copy : bool, default=True
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
batch_size : int, default=None
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Principal axes in feature space, representing the directions of
maximum variance in the data. Equivalently, the right singular
vectors of the centered input data, parallel to its eigenvectors.
The components are sorted by ``explained_variance_``.
explained_variance_ : ndarray of shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0.
singular_values_ : ndarray of shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : ndarray of shape (n_features,)
Per-feature empirical variance, aggregate over calls to
``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when
``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
batch_size_ : int
Inferred batch size from ``batch_size``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import IncrementalPCA
>>> from scipy import sparse
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = IncrementalPCA(n_components=7, batch_size=200)
>>> # either partially fit on smaller batches of data
>>> transformer.partial_fit(X[:100, :])
IncrementalPCA(batch_size=200, n_components=7)
>>> # or let the fit function itself divide the data into batches
>>> X_sparse = sparse.csr_matrix(X)
>>> X_transformed = transformer.fit_transform(X_sparse)
>>> X_transformed.shape
(1797, 7)
Notes
-----
Implements the incremental PCA model from:
*D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
pp. 125-141, May 2008.*
See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
This model is an extension of the Sequential Karhunen-Loeve Transform from:
*A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
its Application to Images, IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000.*
See https://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
*Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
section 5.4.4, pp 252-253.*. This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
References
----------
D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
Section 5.4.4, pp. 252-253.
See Also
--------
PCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None):
self.n_components = n_components
self.whiten = whiten
self.copy = copy
self.batch_size = batch_size
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
X = self._validate_data(
X,
accept_sparse=["csr", "csc", "lil"],
copy=self.copy,
dtype=[np.float64, np.float32],
)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(
n_samples, self.batch_size_, min_batch_size=self.n_components or 0
):
X_batch = X[batch]
if sparse.issparse(X_batch):
X_batch = X_batch.toarray()
self.partial_fit(X_batch, check_input=False)
return self
def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
check_input : bool, default=True
Run check_array on X.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
first_pass = not hasattr(self, "components_")
if check_input:
if sparse.issparse(X):
raise TypeError(
"IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches."
)
X = self._validate_data(
X, copy=self.copy, dtype=[np.float64, np.float32], reset=first_pass
)
n_samples, n_features = X.shape
if first_pass:
self.components_ = None
if self.n_components is None:
if self.components_ is None:
self.n_components_ = min(n_samples, n_features)
else:
self.n_components_ = self.components_.shape[0]
elif not 1 <= self.n_components <= n_features:
raise ValueError(
"n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features)
)
elif not self.n_components <= n_samples:
raise ValueError(
"n_components=%r must be less or equal to "
"the batch number of samples "
"%d." % (self.n_components, n_samples)
)
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (
self.components_.shape[0] != self.n_components_
):
raise ValueError(
"Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value."
% (self.components_.shape[0], self.n_components_)
)
# This is the first partial_fit
if not hasattr(self, "n_samples_seen_"):
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
# Update stats - they are 0 if this is the first step
col_mean, col_var, n_total_samples = _incremental_mean_and_var(
X,
last_mean=self.mean_,
last_variance=self.var_,
last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]),
)
n_total_samples = n_total_samples[0]
# Whitening
if self.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt(
(self.n_samples_seen_ / n_total_samples) * n_samples
) * (self.mean_ - col_batch_mean)
X = np.vstack(
(
self.singular_values_.reshape((-1, 1)) * self.components_,
X,
mean_correction,
)
)
U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
explained_variance = S ** 2 / (n_total_samples - 1)
explained_variance_ratio = S ** 2 / np.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = Vt[: self.n_components_]
self.singular_values_ = S[: self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[: self.n_components_]
self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_]
if self.n_components_ < n_features:
self.noise_variance_ = explained_variance[self.n_components_ :].mean()
else:
self.noise_variance_ = 0.0
return self
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set, using minibatches of size batch_size if X is
sparse.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
... [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, n_components=2)
>>> ipca.transform(X) # doctest: +SKIP
"""
if sparse.issparse(X):
n_samples = X.shape[0]
output = []
for batch in gen_batches(
n_samples, self.batch_size_, min_batch_size=self.n_components or 0
):
output.append(super().transform(X[batch].toarray()))
return np.vstack(output)
else:
return super().transform(X)
|
{
"content_hash": "033ab60e40e56ba5a002a4a60f3d7bb7",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 87,
"avg_line_length": 38.281984334203656,
"alnum_prop": 0.6044877915700451,
"repo_name": "shyamalschandra/scikit-learn",
"id": "634ed779f63f3195fa5133f542aacd90aabd8eb3",
"size": "14662",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/decomposition/_incremental_pca.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394788"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6271288"
},
{
"name": "Shell",
"bytes": "6747"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0030_realm_org_type'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar_source',
field=models.CharField(choices=[('G', 'Hosted by Gravatar'), ('U', 'Uploaded by user')], max_length=1, default='G'),
),
]
|
{
"content_hash": "f805ce311df7847ee38d822dafd1a670",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 128,
"avg_line_length": 26.375,
"alnum_prop": 0.5781990521327014,
"repo_name": "brainwane/zulip",
"id": "7e57a2c01a971a1651cc810343c6ffaea93105fe",
"size": "422",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "zerver/migrations/0031_remove_system_avatar_source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
"""
Given a string, sort it in decreasing order based on the frequency of characters.
"""
from collections import defaultdict
class Solution(object):
def frequencySort(self, s):
"""
Brute force: counter, sort O(n log n)
There is a uppper limit of the counter, thus bucket sort possible
:type s: str
:rtype: str
"""
counter = defaultdict(int)
for c in s:
counter[c] += 1
bucket = {count: [] for count in range(1, len(s)+1)}
for k, v in counter.items():
bucket[v].append(k)
ret = []
for count in reversed(range(1, len(s) + 1)):
if bucket[count]:
for c in bucket[count]:
ret.append(c * count)
return "".join(ret)
if __name__ == "__main__":
assert Solution().frequencySort("tree") == "eetr"
|
{
"content_hash": "2120df043e217cf76db09d58653f639a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 81,
"avg_line_length": 25.852941176470587,
"alnum_prop": 0.5358361774744027,
"repo_name": "algorhythms/LeetCode",
"id": "8f713188786f442d804707948ba31dd81199e508",
"size": "898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "451 Sort Characters By Frequency.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1444167"
}
],
"symlink_target": ""
}
|
"""
Functions for working with constant values.
"""
from typing import Union
from enum import Enum
import json
from penman.types import Constant
from penman.exceptions import ConstantError
pytype = type # store because type() is redefined below
class Type(Enum):
"""
An enumeration of constant value types.
"""
SYMBOL = 'Symbol'
STRING = 'String'
INTEGER = 'Integer'
FLOAT = 'Float'
NULL = 'Null'
# Make them available at the module level
SYMBOL = Type.SYMBOL #: Symbol constants (e.g., :code:`(... :polarity -)`)
STRING = Type.STRING #: String constants (e.g., :code:`(... :op1 "Kim")`)
INTEGER = Type.INTEGER #: Integer constants (e.g., :code:`(... :value 12)`)
FLOAT = Type.FLOAT #: Float constants (e.g., :code:`(... :value 1.2)`)
NULL = Type.NULL #: Empty values (e.g., :code:`(... :ARG1 )`)
_typemap = {
str: SYMBOL, # needs further checking
int: INTEGER,
float: FLOAT,
type(None): NULL,
}
def type(constant_string: Union[str, None]) -> Type:
"""
Return the type of constant encoded by *constant_string*.
Examples:
>>> from penman import constant
>>> constant.type('-')
<Type.SYMBOL: 'Symbol'>
>>> constant.type('"foo"')
<Type.STRING: 'String'>
>>> constant.type('1')
<Type.INTEGER: 'Integer'>
>>> constant.type('1.2')
<Type.FLOAT: 'Float'>
>>> constant.type('')
<Type.NULL: 'Null'>
"""
if constant_string is None:
typ = NULL
else:
assert isinstance(constant_string, str)
value = evaluate(constant_string)
typ = _typemap[pytype(value)]
if (typ == Type.SYMBOL
and constant_string.startswith('"')
and constant_string.endswith('"')):
typ = Type.STRING
return typ
def evaluate(constant_string: Union[str, None]) -> Constant:
"""
Evaluate and return *constant_string*.
If *constant_string* is ``None`` or an empty symbol (``''``), this
function returns ``None``, while an empty string constant
(``'""'``) returns an empty :py:class:`str` object
(``''``). Otherwise, symbols are returned unchanged while strings
get quotes removed and escape sequences are unescaped. Note that
this means it is impossible to recover the original type of
strings and symbols once they have been evaluated. For integer and
float constants, this function returns the equivalent Python
:py:class:`int` or :py:class:`float` objects.
Examples:
>>> from penman import constant
>>> constant.evaluate('-')
'-'
>>> constant.evaluate('"foo"')
'foo'
>>> constant.evaluate('1')
1
>>> constant.evaluate('1.2')
1.2
>>> constant.evaluate('') is None
True
"""
value: Constant = constant_string
if value is None or value == '':
value = None
else:
assert isinstance(constant_string, str)
if constant_string.startswith('"') ^ constant_string.endswith('"'):
raise ConstantError(f'unbalanced quotes: {constant_string}')
if constant_string not in ('true', 'false', 'null'):
try:
value = json.loads(constant_string, parse_constant=str)
except json.JSONDecodeError:
value = constant_string
if not (value is None or isinstance(value, (str, int, float))):
raise ConstantError(f'invalid constant: {value!r}')
return value
def quote(constant: Constant) -> str:
"""
Return *constant* as a quoted string.
If *constant* is ``None``, this function returns an empty string
constant (``'""'``). All other types are cast to a string and
quoted.
Examples:
>>> from penman import constant
>>> constant.quote(None)
'""'
>>> constant.quote('')
'""'
>>> constant.quote('foo')
'"foo"'
>>> constant.quote('"foo"')
'"\\\\"foo\\\\""'
>>> constant.quote(1)
'"1"'
>>> constant.quote(1.5)
'"1.5"'
"""
if constant is None:
return '""'
else:
return json.dumps(str(constant))
|
{
"content_hash": "2b470d5dc4776af831ba5fe8f8834b59",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 77,
"avg_line_length": 29.131034482758622,
"alnum_prop": 0.5745738636363636,
"repo_name": "goodmami/penman",
"id": "9119b25ad10568ba0913c70189f4fce97da19adf",
"size": "4225",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "penman/constant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186533"
}
],
"symlink_target": ""
}
|
import pytest
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.storage import default_storage
from shop.conf import app_settings
from shop.models.cart import CartModel
from shop.models.defaults.customer import Customer
from shop.modifiers.pool import CartModifiersPool
from shop.views.cart import CartViewSet, WatchViewSet
from shop.modifiers.pool import cart_modifiers_pool
from rest_framework.reverse import reverse
CartModifiersPool.USE_CACHE = False
@pytest.fixture(name='filled_cart')
@pytest.mark.django_db
def test_add_to_cart(commodity_factory, api_client, rf):
# add a product to the cart
product = commodity_factory()
data = {'quantity': 2, 'product': product.id}
response = api_client.post(reverse('shop:cart-list'), data)
assert response.status_code == 201
assert response.data['quantity'] == 2
assert response.data['unit_price'] == str(product.unit_price)
assert response.data['line_total'] == str(2 * product.unit_price)
# verify that the product is in the cart
request = rf.get('/my-cart')
request.session = api_client.session
request.user = AnonymousUser()
request.customer = Customer.objects.get_from_request(request)
filled_cart = CartModel.objects.get_from_request(request)
filled_cart.update(request)
assert filled_cart.num_items == 1
items = filled_cart.items.all()
assert items[0].product == product
assert items[0].quantity == 2
assert filled_cart.subtotal == 2 * product.unit_price
return filled_cart
@pytest.mark.django_db
def test_list_cart(api_rf, filled_cart):
request = api_rf.get('/shop/api/cart')
request.customer = filled_cart.customer
response = CartViewSet.as_view({'get': 'list'})(request)
assert response.status_code == 200
assert response.data['num_items'] == 1
assert response.data['total_quantity'] == 2
assert response.data['subtotal'] == str(filled_cart.subtotal)
assert response.data['total'] == str(filled_cart.total)
@pytest.mark.django_db
def test_unowned_cart(customer_factory, api_rf, filled_cart):
request = api_rf.get('/shop/api/cart')
request.customer = customer_factory()
response = CartViewSet.as_view({'get': 'list'})(request)
assert response.data['num_items'] == 0
@pytest.mark.django_db
def test_change_quantity(api_rf, filled_cart):
product = filled_cart.items.all()[0].product
data = {'quantity': 3, 'product': product.id}
request = api_rf.put('/shop/api/cart', data)
request.customer = filled_cart.customer
response = CartViewSet.as_view({'put': 'update'})(request, pk=product.id)
assert response.status_code == 200
filled_cart.refresh_from_db()
assert filled_cart.num_items == 1
assert filled_cart.items.all()[0].quantity == 3
@pytest.mark.django_db
def test_too_greedy(session, api_rf, filled_cart):
product = filled_cart.items.all()[0].product
data = {'quantity': 10, 'product': product.id}
request = api_rf.put('/shop/api/cart', data)
request.customer = filled_cart.customer
request.session = session
request._messages = default_storage(request)
response = CartViewSet.as_view({'put': 'update'})(request, pk=product.id)
assert response.status_code == 200
filled_cart.refresh_from_db()
assert filled_cart.num_items == 1
assert filled_cart.items.all()[0].quantity == 5 # not 10, as requested
@pytest.mark.django_db
def test_remove_item(api_rf, filled_cart):
product = filled_cart.items.all()[0].product
request = api_rf.delete(reverse('shop:cart-list'))
request.customer = filled_cart.customer
response = CartViewSet.as_view({'delete': 'destroy'})(request, pk=product.id)
assert response.status_code == 200
filled_cart.refresh_from_db()
assert filled_cart.num_items == 0
assert filled_cart.items.count() == 0
@pytest.fixture(name='watch_list')
@pytest.mark.django_db
def test_watch_cart_item(api_rf, filled_cart):
product = filled_cart.items.all()[0].product
data = {'quantity': 0, 'product': product.id}
request = api_rf.put('/shop/api/cart', data)
request.customer = filled_cart.customer
response = WatchViewSet.as_view({'put': 'update'})(request, pk=product.id)
assert response.status_code == 200
filled_cart.refresh_from_db()
assert filled_cart.num_items == 0
assert filled_cart.items.all()[0].quantity == 0
return filled_cart
@pytest.mark.django_db
def test_add_watch_item(api_rf, watch_list):
product = watch_list.items.all()[0].product
data = {'quantity': 1, 'product': product.id}
request = api_rf.put('/shop/api/cart', data)
request.customer = watch_list.customer
response = CartViewSet.as_view({'put': 'update'})(request, pk=product.id)
assert response.status_code == 200
watch_list.refresh_from_db()
assert watch_list.num_items == 1
assert watch_list.items.all()[0].quantity == 1
@pytest.mark.django_db
def test_include_tax_modifier(api_rf, filled_cart):
request = api_rf.get('/shop/api/cart')
request.customer = filled_cart.customer
response = CartViewSet.as_view({'get': 'list'})(request)
assert response.status_code == 200
assert response.data['subtotal'] == str(filled_cart.subtotal)
tax_rate = 1 + app_settings.SHOP_VALUE_ADDED_TAX / 100
assert response.data['total'] == str(filled_cart.subtotal * tax_rate)
@pytest.mark.django_db
def test_payment_modifiers_with_same_processors(api_rf, filled_cart):
for modifier_to_test in cart_modifiers_pool.get_payment_modifiers():
for modifier_for_id in cart_modifiers_pool.get_payment_modifiers():
if modifier_to_test.is_active(modifier_for_id.identifier):
assert modifier_for_id.identifier == modifier_to_test.identifier
|
{
"content_hash": "d6040daf6a49efc21552f757de244d07",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 81,
"avg_line_length": 39.34013605442177,
"alnum_prop": 0.6979076603838837,
"repo_name": "awesto/django-shop",
"id": "5491f8d1f8cbeb4e573ccf372fe77830f4a7dd70",
"size": "5783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cart.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "107122"
},
{
"name": "JavaScript",
"bytes": "51946"
},
{
"name": "Python",
"bytes": "588560"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import authenticate, login
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import *
def login_user(request):
username = password = ''
if request.POST:
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('../ws/%d' % user.id)
else:
err = "Dein Account is nicht gültig, bitte kontaktieren Sie den Administrator"
else:
err = 'Dein Benutzername und/oder dein Passwort stimmen nicht überein.'
return render_to_response('auth/login.html', context_instance=RequestContext(request, {'err': err}))
# Create your views here.
|
{
"content_hash": "6715c15a9be140388b52a91b18ff3efa",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 104,
"avg_line_length": 35.2,
"alnum_prop": 0.6852272727272727,
"repo_name": "wegtam/weather-api",
"id": "ae2d49eb5107dc9f716fcdc7b5f00210166a4ee9",
"size": "882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auth/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1816"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "14950"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudtrail
short_description: manage CloudTrail create, delete, update
description:
- Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled.
version_added: "2.0"
author:
- Ansible Core Team
- Ted Timmons (@tedder)
- Daniel Shepherd (@shepdelacreme)
requirements:
- boto3
- botocore
options:
state:
description:
- Add or remove CloudTrail configuration.
- The following states have been preserved for backwards compatibility. C(state=enabled) and C(state=disabled).
- enabled=present and disabled=absent.
required: true
choices: ['present', 'absent', 'enabled', 'disabled']
name:
description:
- Name for the CloudTrail.
- Names are unique per-region unless the CloudTrail is a multi-region trail, in which case it is unique per-account.
required: true
enable_logging:
description:
- Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files.
default: true
type: bool
version_added: "2.4"
s3_bucket_name:
description:
- An existing S3 bucket where CloudTrail will deliver log files.
- This bucket should exist and have the proper policy.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html).
- Required when C(state=present).
version_added: "2.4"
s3_key_prefix:
description:
- S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed.
is_multi_region_trail:
description:
- Specify whether the trail belongs only to one region or exists in all regions.
default: false
type: bool
version_added: "2.4"
enable_log_file_validation:
description:
- Specifies whether log file integrity validation is enabled.
- CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered.
version_added: "2.4"
type: bool
aliases: [ "log_file_validation_enabled" ]
include_global_events:
description:
- Record API calls from global services such as IAM and STS.
default: true
type: bool
aliases: [ "include_global_service_events" ]
sns_topic_name:
description:
- SNS Topic name to send notifications to when a log file is delivered.
version_added: "2.4"
cloudwatch_logs_role_arn:
description:
- Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
- Required when C(cloudwatch_logs_log_group_arn).
version_added: "2.4"
cloudwatch_logs_log_group_arn:
description:
- A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
- Required when C(cloudwatch_logs_role_arn).
version_added: "2.4"
kms_key_id:
description:
- Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption.
- The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html).
version_added: "2.4"
tags:
description:
- A hash/dictionary of tags to be applied to the CloudTrail resource.
- Remove completely or specify an empty dictionary to remove all tags.
default: {}
version_added: "2.4"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: create single region cloudtrail
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
s3_key_prefix: cloudtrail
region: us-east-1
- name: create multi-region trail with validation and tags
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
cloudwatch_logs_role_arn: "arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role"
cloudwatch_logs_log_group_arn: "arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*"
kms_key_id: "alias/MyAliasName"
tags:
environment: dev
Name: default
- name: show another valid kms_key_id
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
kms_key_id: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
# simply "12345678-1234-1234-1234-123456789012" would be valid too.
- name: pause logging the trail we just created
cloudtrail:
state: present
name: default
enable_logging: false
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
tags:
environment: dev
Name: default
- name: delete a trail
cloudtrail:
state: absent
name: default
'''
RETURN = '''
exists:
description: whether the resource exists
returned: always
type: bool
sample: true
trail:
description: CloudTrail resource details
returned: always
type: complex
sample: hash/dictionary of values
contains:
trail_arn:
description: Full ARN of the CloudTrail resource
returned: success
type: str
sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default
name:
description: Name of the CloudTrail resource
returned: success
type: str
sample: default
is_logging:
description: Whether logging is turned on or paused for the Trail
returned: success
type: bool
sample: True
s3_bucket_name:
description: S3 bucket name where log files are delivered
returned: success
type: str
sample: myBucket
s3_key_prefix:
description: Key prefix in bucket where log files are delivered (if any)
returned: success when present
type: str
sample: myKeyPrefix
log_file_validation_enabled:
description: Whether log file validation is enabled on the trail
returned: success
type: bool
sample: true
include_global_service_events:
description: Whether global services (IAM, STS) are logged with this trail
returned: success
type: bool
sample: true
is_multi_region_trail:
description: Whether the trail applies to all regions or just one
returned: success
type: bool
sample: true
has_custom_event_selectors:
description: Whether any custom event selectors are used for this trail.
returned: success
type: bool
sample: False
home_region:
description: The home region where the trail was originally created and must be edited.
returned: success
type: str
sample: us-east-1
sns_topic_name:
description: The SNS topic name where log delivery notifications are sent.
returned: success when present
type: str
sample: myTopic
sns_topic_arn:
description: Full ARN of the SNS topic where log delivery notifications are sent.
returned: success when present
type: str
sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic
cloud_watch_logs_log_group_arn:
description: Full ARN of the CloudWatch Logs log group where events are delivered.
returned: success when present
type: str
sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*
cloud_watch_logs_role_arn:
description: Full ARN of the IAM role that CloudTrail assumes to deliver events.
returned: success when present
type: str
sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role
kms_key_id:
description: Full ARN of the KMS Key used to encrypt log files.
returned: success when present
type: str
sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012
tags:
description: hash/dictionary of tags applied to this resource
returned: success
type: dict
sample: {'environment': 'dev', 'Name': 'default'}
'''
import traceback
try:
from botocore.exceptions import ClientError
except ImportError:
# Handled in main() by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info,
HAS_BOTO3, ansible_dict_to_boto3_tag_list,
boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict)
def create_trail(module, client, ct_params):
"""
Creates a CloudTrail
module : AnsibleModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to create
"""
resp = {}
try:
resp = client.create_trail(**ct_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
return resp
def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False):
"""
Creates, updates, removes tags on a CloudTrail resource
module : AnsibleModule object
client : boto3 client connection object
tags : Dict of tags converted from ansible_dict to boto3 list of dicts
trail_arn : The ARN of the CloudTrail to operate on
curr_tags : Dict of the current tags on resource, if any
dry_run : true/false to determine if changes will be made if needed
"""
adds = []
removes = []
updates = []
changed = False
if curr_tags is None:
# No current tags so just convert all to a tag list
adds = ansible_dict_to_boto3_tag_list(tags)
else:
curr_keys = set(curr_tags.keys())
new_keys = set(tags.keys())
add_keys = new_keys - curr_keys
remove_keys = curr_keys - new_keys
update_keys = dict()
for k in curr_keys.intersection(new_keys):
if curr_tags[k] != tags[k]:
update_keys.update({k: tags[k]})
adds = get_tag_list(add_keys, tags)
removes = get_tag_list(remove_keys, curr_tags)
updates = get_tag_list(update_keys, tags)
if removes or updates:
changed = True
if not dry_run:
try:
client.remove_tags(ResourceId=trail_arn, TagsList=removes + updates)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
if updates or adds:
changed = True
if not dry_run:
try:
client.add_tags(ResourceId=trail_arn, TagsList=updates + adds)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
return changed
def get_tag_list(keys, tags):
"""
Returns a list of dicts with tags to act on
keys : set of keys to get the values for
tags : the dict of tags to turn into a list
"""
tag_list = []
for k in keys:
tag_list.append({'Key': k, 'Value': tags[k]})
return tag_list
def set_logging(module, client, name, action):
"""
Starts or stops logging based on given state
module : AnsibleModule object
client : boto3 client connection object
name : The name or ARN of the CloudTrail to operate on
action : start or stop
"""
if action == 'start':
try:
client.start_logging(Name=name)
return client.get_trail_status(Name=name)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
elif action == 'stop':
try:
client.stop_logging(Name=name)
return client.get_trail_status(Name=name)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
else:
module.fail_json(msg="Unsupported logging action")
def get_trail_facts(module, client, name):
"""
Describes existing trail in an account
module : AnsibleModule object
client : boto3 client connection object
name : Name of the trail
"""
# get Trail info
try:
trail_resp = client.describe_trails(trailNameList=[name])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
# Now check to see if our trail exists and get status and tags
if len(trail_resp['trailList']):
trail = trail_resp['trailList'][0]
try:
status_resp = client.get_trail_status(Name=trail['Name'])
tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
trail['IsLogging'] = status_resp['IsLogging']
trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList'])
# Check for non-existent values and populate with None
optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId'])
for v in optional_vals - set(trail.keys()):
trail[v] = None
return trail
else:
# trail doesn't exist return None
return None
def delete_trail(module, client, trail_arn):
"""
Delete a CloudTrail
module : AnsibleModule object
client : boto3 client connection object
trail_arn : Full CloudTrail ARN
"""
try:
client.delete_trail(Name=trail_arn)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
def update_trail(module, client, ct_params):
"""
Delete a CloudTrail
module : AnsibleModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to update
"""
try:
client.update_trail(**ct_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
name=dict(default='default'),
enable_logging=dict(default=True, type='bool'),
s3_bucket_name=dict(),
s3_key_prefix=dict(),
sns_topic_name=dict(),
is_multi_region_trail=dict(default=False, type='bool'),
enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']),
include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']),
cloudwatch_logs_role_arn=dict(),
cloudwatch_logs_log_group_arn=dict(),
kms_key_id=dict(),
tags=dict(default={}, type='dict'),
))
required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module')
# collect parameters
if module.params['state'] in ('present', 'enabled'):
state = 'present'
elif module.params['state'] in ('absent', 'disabled'):
state = 'absent'
tags = module.params['tags']
enable_logging = module.params['enable_logging']
ct_params = dict(
Name=module.params['name'],
S3BucketName=module.params['s3_bucket_name'],
IncludeGlobalServiceEvents=module.params['include_global_events'],
IsMultiRegionTrail=module.params['is_multi_region_trail'],
)
if module.params['s3_key_prefix']:
ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
if module.params['sns_topic_name']:
ct_params['SnsTopicName'] = module.params['sns_topic_name']
if module.params['cloudwatch_logs_role_arn']:
ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
if module.params['cloudwatch_logs_log_group_arn']:
ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
if module.params['enable_log_file_validation'] is not None:
ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation']
if module.params['kms_key_id']:
ct_params['KmsKeyId'] = module.params['kms_key_id']
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudtrail', region=region, endpoint=ec2_url, **aws_connect_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
results = dict(
changed=False,
exists=False
)
# Get existing trail facts
trail = get_trail_facts(module, client, ct_params['Name'])
# If the trail exists set the result exists variable
if trail is not None:
results['exists'] = True
if state == 'absent' and results['exists']:
# If Trail exists go ahead and delete
results['changed'] = True
results['exists'] = False
results['trail'] = dict()
if not module.check_mode:
delete_trail(module, client, trail['TrailARN'])
elif state == 'present' and results['exists']:
# If Trail exists see if we need to update it
do_update = False
for key in ct_params:
tkey = str(key)
# boto3 has inconsistent parameter naming so we handle it here
if key == 'EnableLogFileValidation':
tkey = 'LogFileValidationEnabled'
# We need to make an empty string equal None
if ct_params.get(key) == '':
val = None
else:
val = ct_params.get(key)
if val != trail.get(tkey):
do_update = True
results['changed'] = True
# If we are in check mode copy the changed values to the trail facts in result output to show what would change.
if module.check_mode:
trail.update({tkey: ct_params.get(key)})
if not module.check_mode and do_update:
update_trail(module, client, ct_params)
trail = get_trail_facts(module, client, ct_params['Name'])
# Check if we need to start/stop logging
if enable_logging and not trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = True
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = False
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Check if we need to update tags on resource
tag_dry_run = False
if module.check_mode:
tag_dry_run = True
tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
if tags_changed:
results['changed'] = True
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
elif state == 'present' and not results['exists']:
# Trail doesn't exist just go create it
results['changed'] = True
if not module.check_mode:
# If we aren't in check_mode then actually create it
created_trail = create_trail(module, client, ct_params)
# Apply tags
tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
# Get the trail status
try:
status_resp = client.get_trail_status(Name=created_trail['Name'])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
# Set the logging state for the trail to desired value
if enable_logging and not status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Get facts for newly created Trail
trail = get_trail_facts(module, client, ct_params['Name'])
# If we are in check mode create a fake return structure for the newly minted trail
if module.check_mode:
acct_id = '123456789012'
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params)
acct_id = sts_client.get_caller_identity()['Account']
except ClientError:
pass
trail = dict()
trail.update(ct_params)
if 'EnableLogFileValidation' not in ct_params:
ct_params['EnableLogFileValidation'] = False
trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation']
trail.pop('EnableLogFileValidation')
fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name']
trail['HasCustomEventSelectors'] = False
trail['HomeRegion'] = region
trail['TrailARN'] = fake_arn
trail['IsLogging'] = enable_logging
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
{
"content_hash": "087e18d76406f99ba9a5f807c17d216d",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 160,
"avg_line_length": 38.40813008130081,
"alnum_prop": 0.634985817704585,
"repo_name": "SergeyCherepanov/ansible",
"id": "932d2effcea9cc6259d2c92f13e454d7dfc2e55c",
"size": "23762",
"binary": false,
"copies": "40",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/amazon/cloudtrail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import json
from climate.api import context as api_context
from climate import context
from climate import exceptions
from climate import tests
class ContextTestCase(tests.TestCase):
def setUp(self):
super(ContextTestCase, self).setUp()
self.fake_headers = {u'X-User-Id': u'1',
u'X-Project-Id': u'1',
u'X-Auth-Token': u'111-111-111',
u'X-User-Name': u'user_name',
u'X-Project-Name': u'project_name',
u'X-Roles': u'user_name0, user_name1'}
def test_ctx_from_headers(self):
self.context = self.patch(context, 'ClimateContext')
catalog = json.dumps({'nova': 'catalog'})
self.fake_headers[u'X-Service-Catalog'] = catalog
api_context.ctx_from_headers(self.fake_headers)
self.context.assert_called_once_with(user_id=u'1',
roles=[u'user_name0',
u'user_name1'],
project_name=u'project_name',
auth_token=u'111-111-111',
service_catalog={
u'nova': u'catalog'},
project_id=u'1',
user_name=u'user_name')
def test_ctx_from_headers_no_catalog(self):
self.assertRaises(
exceptions.ServiceCatalogNotFound,
api_context.ctx_from_headers,
self.fake_headers)
def test_ctx_from_headers_wrong_format(self):
catalog = ['etc']
self.fake_headers[u'X-Service-Catalog'] = catalog
self.assertRaises(
exceptions.WrongFormat,
api_context.ctx_from_headers,
self.fake_headers)
|
{
"content_hash": "6102e275ecd7284593ea109b2d6b22e8",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 41.361702127659576,
"alnum_prop": 0.48199588477366256,
"repo_name": "paramite/blazar",
"id": "ef4d6f76d253fa592c04b6abe99c4683dd38a42b",
"size": "2527",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "climate/tests/api/test_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1014"
},
{
"name": "Python",
"bytes": "907331"
},
{
"name": "Shell",
"bytes": "786"
}
],
"symlink_target": ""
}
|
'''
Taken from https://github.com/xzela/prawns
'''
from django.core.management import setup_environ
from djprawn import settings
from optparse import OptionParser
setup_environ(settings)
from rtube import api
# TODO store this in the database
PROVIDERS = ['rtube', 'kink']
api_urls = {
'rtube': {
'videos': {
'url': 'http://api.redtube.com/?data=redtube.Videos.searchVideos',
'params': {'category': None, 'output': 'json', 'thumbsize': 'big', 'stars': True}
},
'categories': {
'url': 'http://api.redtube.com/?data=redtube.Categories.getCategoriesList',
'params': {'output': 'json'}
}
},
'kink': {
'videos': {
'url': '',
'params': {}
},
'categories': {
'url': '',
'params': {}
}
}
}
def main():
usage = "%prog [-a action] [-c type of content] [-s starting category]"
version = "%prog 0.0.1a"
parser = OptionParser(usage=usage, version=version)
parser.add_option("-a", "--action",
action="store", type="string", dest="action",
help="The action you wish to perform: fetch|get|list. [fetch pulls from web, get pulls from database]")
parser.add_option('-c', "--content",
action="store", type="string", dest="content", default=None,
help="Which type of content you want to get: videos|categories")
parser.add_option('-s', '--start',
action="store", type="string", dest="starting", default=None,
help="Starting Category you wish to start at. Try listing to see available categories")
parser.add_option('-p', '--provider',
action="store", type="string", dest="provider", default=None,
help="The content Provider you wish to slurp")
(options, args) = parser.parse_args()
categories = []
if options.action == "fetch":
fetch_objects(options)
if options.content == "categories":
if options.provider == 'rtube':
categories = fetch_categories()
else:
print "Please provider a provider or run: crab.py -a list -c providers"
if options.content == "videos":
categories = get_categories()
if options.starting != None:
if options.starting in parse_categories(categories):
categories = slice_category(options.starting, categories)
else:
print "coud not find %s in category list. try --list" % options.starting
exit()
for c in categories:
api_urls['rtube']['videos']['params']['category'] = c.title
# print api_urls['rtube']['videos']['params']['category']
url = api.format_url(api_urls['rtube']['videos']['url'], api_urls['rtube']['videos']['params'])
# print url
# fetch api call
json = api.fetch(url)
pages = json['count'] / len(json['videos'])
print pages
for i in range(pages):
api_urls['rtube']['videos']['params']['page'] = i + 1
p_url = api.format_url(api_urls['rtube']['videos']['url'], api_urls['rtube']['videos']['params'])
print p_url
p_json = api.fetch(p_url)
api.insert_content(p_json, 'videos')
if i > 5:
break
# break
# exit()
elif options.action == "get":
if options.content == "categories":
categories = get_categories()
elif options.action == "list":
if options.content == "categories":
categories = get_categories()
print "Here is a list of the known categories:"
for c in categories:
print c.title
if options.content == "providers":
print "here are the known providers:"
for p in PROVIDERS:
print p
else:
print "No content type specified. Try: categories|videos|providers"
else:
print "No action speficied. try --help"
def fetch_objects(options):
'''
Attempts to fetch objects from a provider
assuming you've provided one
return: None
'''
if options.content == "categories":
if options.provider != None:
fetch_categories(options.provider)
else:
print "Please provider a provider or run: crab.py -a list -c providers"
elif options.content == "videos":
categories = get_categories()
if options.starting != None:
if options.starting in parse_categories(categories):
categories = slice_category(options.starting, categories)
else:
print "Could not find %s in category list. run crab.py -a list -c categories to see available categories" % options.starting
exit()
for c in categories:
api_urls['rtube']['videos']['params']['category'] = c.title
# print api_urls['rtube']['videos']['params']['category']
url = api.format_url(api_urls['rtube']['videos']['url'], api_urls['rtube']['videos']['params'])
# print url
# fetch api call
json = api.fetch(url)
pages = json['count'] / len(json['videos'])
print pages
for i in range(pages):
api_urls['rtube']['videos']['params']['page'] = i + 1
p_url = api.format_url(api_urls['rtube']['videos']['url'], api_urls['rtube']['videos']['params'])
print p_url
p_json = api.fetch(p_url)
api.insert_content(p_json, 'videos')
if i > 5:
break
return None
def fetch_categories(provider="rtube"):
'''
Attempts to fetch and return a list if categories for
a given provider
return: list of categories
'''
if provider in PROVIDERS:
url = api.format_url(api_urls[provider]['categories']['url'], api_urls[provider]['categories']['params'])
json = api.fetch(url)
api.insert_content(json, 'categories')
return get_categories()
def parse_categories(categories):
'''
Attempts to return a list of category titles
return list of category titles
'''
list_ = []
for c in categories:
list_.append(c.title)
return list_
def get_categories():
'''
Attemps to fetch and return all of the known
categories
return: list of Categories
'''
categories = []
for cat in api.get_categories():
categories.append(cat)
return categories
def slice_category(category, categories):
start_from = category
list_ = []
temp_list = []
for c in categories:
temp_list.append(c.title)
for c in temp_list[temp_list.index(start_from):]:
list_.append(c)
print list_
return list_
if __name__ == "__main__":
main()
|
{
"content_hash": "50fd5010ad0b40cd028e673ae72d2deb",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 140,
"avg_line_length": 35.34825870646766,
"alnum_prop": 0.5452498240675581,
"repo_name": "xzela/code-samples",
"id": "2c550e411a0a502ec4f683035e55dca86bba16cf",
"size": "7105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/crab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11703"
}
],
"symlink_target": ""
}
|
from panda3d.core import NodePath, Point3, VBase4
from direct.fsm.FSM import FSM
from direct.interval.IntervalGlobal import Sequence, Parallel, ActorInterval, Func, Wait, ParticleInterval, Track, LerpColorScaleInterval, LerpScaleInterval, LerpHprInterval
from direct.task.Task import Task
from toontown.battle import BattleParticles
from toontown.battle import MovieUtil
from toontown.minigame.MazeSuit import MazeSuit
from CogdoMazeGameObjects import CogdoMazeSplattable
import CogdoMazeGameGlobals as Globals
import random
class CogdoMazeSuit(MazeSuit, FSM, CogdoMazeSplattable):
GagHitEventName = 'CogdoMazeSuit_GagHit'
DeathEventName = 'CogdoMazeSuit_Death'
ThinkEventName = 'CogdoMazeSuit_Think'
def __init__(self, serialNum, maze, randomNumGen, difficulty, startTile, cogdoSuitType, walkAnimName = None):
data = Globals.SuitData[cogdoSuitType]
MazeSuit.__init__(self, serialNum, maze, randomNumGen, data['cellWalkPeriod'], difficulty, data['dnaName'], startTile=startTile, walkSameDirectionProb=Globals.SuitWalkSameDirectionProb, walkTurnAroundProb=Globals.SuitWalkTurnAroundProb, uniqueRandomNumGen=False, walkAnimName=walkAnimName)
FSM.__init__(self, 'CogdoMazeSuit')
CogdoMazeSplattable.__init__(self, self.suit, '%s-%i' % (Globals.SuitCollisionName, self.serialNum), 1.5)
if 'scale' in data:
self.suit.setScale(data['scale'])
self.hp = data['hp']
self.type = cogdoSuitType
self.memos = data['memos']
self.deathSuit = self.suit.getLoseActor()
self.deathSuit.pose('lose', 0)
BattleParticles.loadParticles()
self._initSfx()
def _initSfx(self):
audioMgr = base.cogdoGameAudioMgr
self._deathSoundIval = Sequence(audioMgr.createSfxIval('cogSpin', duration=1.6, startTime=0.6, volume=0.8, source=self.deathSuit), audioMgr.createSfxIval('cogDeath', volume=0.32, source=self.deathSuit))
def _destroySfx(self):
if self._deathSoundIval.isPlaying():
self._deathSoundIval.finish()
del self._deathSoundIval
def destroy(self):
BattleParticles.unloadParticles()
self.ignoreAll()
self._destroySfx()
CogdoMazeSplattable.destroy(self)
MazeSuit.destroy(self)
def handleEnterSphere(self, collEntry):
messenger.send(self.COLLISION_EVENT_NAME, [self.type, self.serialNum])
def gameStart(self, gameStartTime):
MazeSuit.gameStart(self, gameStartTime)
self.accept(Globals.GagCollisionName + '-into-' + self.gagCollisionName, self.handleGagHit)
messenger.send(self.ThinkEventName, [self, self.TX, self.TY])
def initCollisions(self):
MazeSuit.initCollisions(self)
self.collNodePath.setScale(0.75)
self.accept(self.uniqueName('again' + self.COLL_SPHERE_NAME), self.handleEnterSphere)
def think(self, curTic, curT, unwalkables):
MazeSuit.think(self, curTic, curT, unwalkables)
messenger.send(self.ThinkEventName, [self, self.TX, self.TY])
def handleGagHit(self, collEntry):
gagNodePath = collEntry.getFromNodePath().getParent()
messenger.send(self.GagHitEventName, [self.type, self.serialNum, gagNodePath])
def _getSuitAnimationIval(self, animName, startFrame = 0, duration = 1, partName = None, nextState = None):
totalFrames = self.suit.getNumFrames(animName)
frames = totalFrames - 1 - startFrame
frameRate = self.suit.getFrameRate(animName)
newRate = frames / duration
playRate = newRate / frameRate
ival = Sequence(ActorInterval(self.suit, animName, startTime=startFrame / newRate, endTime=totalFrames / newRate, playRate=playRate, partName=partName))
if nextState is not None:
def done():
self.request(nextState)
ival.append(Func(done))
return ival
def hitByGag(self):
self.hp = self.hp - 1
self.doSplat()
if self.hp <= 0:
self.explode()
def explode(self):
self.doDeathTrack()
messenger.send(self.DeathEventName, [self.type, self.serialNum])
def doDeathTrack(self):
def removeDeathSuit(suit, deathSuit):
if not deathSuit.isEmpty():
deathSuit.detachNode()
suit.cleanupLoseActor()
self.deathSuit.reparentTo(self.suit.getParent())
self.deathSuit.setScale(self.suit.getScale())
self.deathSuit.setPos(render, self.suit.getPos(render))
self.deathSuit.setHpr(render, self.suit.getHpr(render))
self.suit.hide()
self.collNodePath.reparentTo(self.deathSuit)
gearPoint = Point3(0, 0, self.suit.height / 2.0 + 2.0)
smallGears = BattleParticles.createParticleEffect(file='gearExplosionSmall')
singleGear = BattleParticles.createParticleEffect('GearExplosion', numParticles=1)
smallGearExplosion = BattleParticles.createParticleEffect('GearExplosion', numParticles=10)
bigGearExplosion = BattleParticles.createParticleEffect('BigGearExplosion', numParticles=30)
smallGears.setPos(gearPoint)
singleGear.setPos(gearPoint)
smallGearExplosion.setPos(gearPoint)
bigGearExplosion.setPos(gearPoint)
smallGears.setDepthWrite(False)
singleGear.setDepthWrite(False)
smallGearExplosion.setDepthWrite(False)
bigGearExplosion.setDepthWrite(False)
suitTrack = Sequence(Func(self.collNodePath.stash), ActorInterval(self.deathSuit, 'lose', startFrame=80, endFrame=140), Func(removeDeathSuit, self.suit, self.deathSuit, name='remove-death-suit'))
explosionTrack = Sequence(Wait(1.5), MovieUtil.createKapowExplosionTrack(self.deathSuit, explosionPoint=gearPoint))
gears1Track = Sequence(ParticleInterval(smallGears, self.deathSuit, worldRelative=0, duration=4.3, cleanup=True), name='gears1Track')
gears2MTrack = Track((0.0, explosionTrack), (0.7, ParticleInterval(singleGear, self.deathSuit, worldRelative=0, duration=5.7, cleanup=True)), (5.2, ParticleInterval(smallGearExplosion, self.deathSuit, worldRelative=0, duration=1.2, cleanup=True)), (5.4, ParticleInterval(bigGearExplosion, self.deathSuit, worldRelative=0, duration=1.0, cleanup=True)), name='gears2MTrack')
def removeParticle(particle):
if particle and hasattr(particle, 'renderParent'):
particle.cleanup()
del particle
removeParticles = Sequence(Func(removeParticle, smallGears), Func(removeParticle, singleGear), Func(removeParticle, smallGearExplosion), Func(removeParticle, bigGearExplosion))
self.deathTrack = Sequence(Parallel(suitTrack, gears2MTrack, gears1Track, self._deathSoundIval), removeParticles)
self.deathTrack.start()
class CogdoMazeSlowMinionSuit(CogdoMazeSuit):
def __init__(self, serialNum, maze, randomNumGen, difficulty, startTile = None):
CogdoMazeSuit.__init__(self, serialNum, maze, randomNumGen, difficulty, startTile, Globals.SuitTypes.SlowMinion)
self.defaultTransitions = {'Off': ['Normal'],
'Normal': ['Attack', 'Off'],
'Attack': ['Normal']}
def gameStart(self, gameStartTime):
CogdoMazeSuit.gameStart(self, gameStartTime)
self.request('Normal')
def enterNormal(self):
self.startWalkAnim()
def exitNormal(self):
pass
def enterAttack(self, elapsedTime):
self._attackIval = self._getSuitAnimationIval('finger-wag', duration=2.0, nextState='Normal')
self._attackIval.start(elapsedTime)
def filterAttack(self, request, args):
if request == 'Attack':
return None
else:
return self.defaultFilter(request, args)
return None
def exitAttack(self):
self._attackIval.pause()
del self._attackIval
class CogdoMazeFastMinionSuit(CogdoMazeSuit):
def __init__(self, serialNum, maze, randomNumGen, difficulty, startTile = None):
CogdoMazeSuit.__init__(self, serialNum, maze, randomNumGen, difficulty, startTile, Globals.SuitTypes.FastMinion)
class CogdoMazeBossSuit(CogdoMazeSuit):
BlinkTaskName = 'CogdoMazeBossBlinkTask'
ShakeTaskName = 'CogdoMazeBossShakeTask'
StartWalkTaskName = 'CogdoMazeBossStartWalkTask'
ShakeEventName = 'CogdoMazeSuitShake'
def __init__(self, serialNum, maze, randomNumGen, difficulty, startTile = None):
CogdoMazeSuit.__init__(self, serialNum, maze, randomNumGen, difficulty, startTile, Globals.SuitTypes.Boss, walkAnimName='stomp')
self.dropTimer = 0
self._walkSpeed = float(self.maze.cellWidth) / self.cellWalkDuration * 0.5
def _initSfx(self):
CogdoMazeSuit._initSfx(self)
audioMgr = base.cogdoGameAudioMgr
self._stompSfxIval = audioMgr.createSfxIval('cogStomp', source=self.suit, cutoff=Globals.BossStompSfxCutoff, volume=0.3)
self._hitSfx = audioMgr.createSfx('bossCogAngry', self.suit)
def _destroySfx(self):
del self._hitSfx
if self._stompSfxIval.isPlaying():
self._stompSfxIval.finish()
del self._stompSfxIval
CogdoMazeSuit._destroySfx(self)
def spin(self):
part = self.suit
time = Globals.BossSpinTime
degrees = 360 * Globals.BossSpinCount
spinIval = LerpHprInterval(part, time, (self.suit.getH() + degrees, 0, 0), blendType='easeOut')
spinIval.start()
def hitByGag(self):
if self.hp >= 2:
self._hitSfx.play()
self.spin()
self.suit.setColorScale(Globals.BlinkColor)
self.__startBlinkTask()
elif self.hp == 1:
self.__stopBlinkTask()
CogdoMazeSuit.hitByGag(self)
def gameStart(self, gameStartTime):
CogdoMazeSuit.gameStart(self, gameStartTime)
def startWalkAnim(self):
self.suit.loop(self._walkAnimName, fromFrame=43, toFrame=81)
self.suit.setPlayRate(self._walkSpeed * Globals.BossCogStompAnimationPlayrateFactor, self._walkAnimName)
self.__startShakeTask()
def destroy(self):
CogdoMazeSuit.destroy(self)
self.__stopShakeTask()
self.__stopBlinkTask()
def pickRandomValidSpot(self, r = 5):
validSpots = []
for x in xrange(self.TX - r, self.TX + r):
for y in xrange(self.TY - r, self.TY + r):
if self.maze.isWalkable(x, y):
validSpots.append([x, y])
return self.rng.choice(validSpots)
def __startShakeTask(self):
self.__stopShakeTask()
taskMgr.doMethodLater(Globals.BossShakeTime, self.__shake, self.uniqueName(CogdoMazeBossSuit.ShakeTaskName))
self.bossShakeLastTime = 0
def __stopShakeTask(self):
taskMgr.remove(self.uniqueName(CogdoMazeBossSuit.ShakeTaskName))
def __shake(self, task):
if task.time - self.bossShakeLastTime > Globals.BossShakeTime:
self.suit.setPlayRate(self._walkSpeed * Globals.BossCogStompAnimationPlayrateFactor, self._walkAnimName)
self._stompSfxIval.start()
messenger.send(self.ShakeEventName, [self, Globals.BossShakeStrength])
self.bossShakeLastTime = task.time
return task.cont
def __startBlinkTask(self):
self.__stopBlinkTask()
taskMgr.doMethodLater(Globals.BlinkFrequency, self.__blink, CogdoMazeBossSuit.BlinkTaskName)
def __stopBlinkTask(self):
taskMgr.remove(CogdoMazeBossSuit.BlinkTaskName)
def __blink(self, task):
blink = Sequence(LerpColorScaleInterval(self.suit, Globals.BlinkSpeed, VBase4(1.0, 1.0, 1.0, 1.0)), LerpColorScaleInterval(self.suit, Globals.BlinkSpeed, Globals.BlinkColor))
blink.start()
return Task.again
|
{
"content_hash": "59f66dd78b1a19bd6dac32fc2e149878",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 380,
"avg_line_length": 45.48863636363637,
"alnum_prop": 0.6724123573986177,
"repo_name": "DedMemez/ODS-August-2017",
"id": "b31b4572ecce1f354836324741a1e9babc45eef2",
"size": "12105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogdominium/CogdoMazeSuits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10152014"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
}
|
import hashlib
import logging
import time
from mako.template import Template
from pyramid.response import Response
from pyramid.view import view_config
from pyramid.config import Configurator
from pyramid.httpexceptions import HTTPBadRequest
from waitress import serve
main_template = Template("""<html><head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
</head><body>
<h1>TRICYCLES</h1>
<a href="${ request.route_url('root') }">HOME</a><br/>
<hr/>
<a href="${ request.route_url('login', _query={'userid': 'john'}) }">
Login as john</a><br/>
<a href="${ request.route_url('login', _query={'userid': 'bob'}) }">
Login as bob</a><br/>
<hr/>
<a href="${ request.route_url('logout') }">Logout</a><br/>
<hr/>
${ msg }
<hr/>
User ID: ${ identity if not identity is None else "--not-set--" }
</body></html>""")
def calculate_digest(secret, userid, timestamp, ip):
m = hashlib.sha1()
m.update(str(secret).encode("utf8"))
m.update(str(userid).encode("utf8"))
m.update(str(timestamp).encode("utf8"))
m.update(str(ip).encode("utf8"))
return m.hexdigest()
class View(object):
SECRET = "verysecretstring"
def _encode_cookie(self, userid):
ip = ""
ts = int(time.time())
digest = calculate_digest(self.SECRET, userid, ts, ip)
return "%s-%s-%s" % (digest, ts, userid)
def _decode_cookie(self):
cookie = self.request.cookies.get("userid", None)
if not cookie:
return None
# try to extract a userid and timestamp from the cookie
try:
(digest, ts, userid) = cookie.split("-", 2)
logging.info("cookie splitted up:%s-%s-%s" % (digest, ts, userid, ))
except:
logging.error("BAD COOKIE FORMAT:%s|" % cookie)
response = HTTPBadRequest()
response.delete_cookie("userid")
raise response
ip = ""
d2 = calculate_digest(self.SECRET, userid, ts, ip)
if d2 == digest:
return userid
logging.error("bad digest")
response = HTTPBadRequest()
response.delete_cookie("userid")
raise response
def __init__(self, request):
self.request = request
self.identity = self._decode_cookie()
def response(self, msg):
return Response(main_template.render(
request=self.request,
msg=msg,
identity=self.identity
))
@view_config(route_name="root", )
def root_view(self):
return self.response(["HOME"])
@view_config(route_name="login", )
def login_view(self):
userid = self.request.params.get("userid")
response = self.response(["LOGGED IN", userid])
# session - cleared with browser exit:
response.set_cookie("userid", self._encode_cookie(userid), httponly=1)
return response
@view_config(route_name="logout", )
def logout_view(self):
response = self.response(["LOGGED OUT"])
response.delete_cookie("userid")
return response
if __name__ == '__main__':
config = Configurator()
config.add_route('root', '')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.scan()
app = config.make_wsgi_app()
serve(app)
|
{
"content_hash": "f7e2ac8f341e10077af47e3debcb8e60",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 80,
"avg_line_length": 31.64485981308411,
"alnum_prop": 0.5856467808623745,
"repo_name": "petrblahos/tricycles",
"id": "747e1deb884febf226027a8181fbbce97d8f04b4",
"size": "3409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "step03.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13346"
}
],
"symlink_target": ""
}
|
'''XML Canonicalization
Patches Applied to xml.dom.ext.c14n:
http://sourceforge.net/projects/pyxml/
[ 1444526 ] c14n.py: http://www.w3.org/TR/xml-exc-c14n/ fix
-- includes [ 829905 ] c14n.py fix for bug #825115,
Date Submitted: 2003-10-24 23:43
-- include dependent namespace declarations declared in ancestor nodes
(checking attributes and tags),
-- handle InclusiveNamespaces PrefixList parameter
This module generates canonical XML of a document or element.
http://www.w3.org/TR/2001/REC-xml-c14n-20010315
and includes a prototype of exclusive canonicalization
http://www.w3.org/Signature/Drafts/xml-exc-c14n
Requires PyXML 0.7.0 or later.
Known issues if using Ft.Lib.pDomlette:
1. Unicode
2. does not white space normalize attributes of type NMTOKEN and ID?
3. seems to be include "\n" after importing external entities?
Note, this version processes a DOM tree, and consequently it processes
namespace nodes as attributes, not from a node's namespace axis. This
permits simple document and element canonicalization without
XPath. When XPath is used, the XPath result node list is passed and used to
determine if the node is in the XPath result list, but little else.
Authors:
"Joseph M. Reagle Jr." <reagle@w3.org>
"Rich Salz" <rsalz@zolera.com>
$Date$ by $Author$
'''
_copyright = '''Copyright 2001, Zolera Systems Inc. All Rights Reserved.
Copyright 2001, MIT. All Rights Reserved.
Distributed under the terms of:
Python 2.0 License or later.
http://www.python.org/2.0.1/license.html
or
W3C Software License
http://www.w3.org/Consortium/Legal/copyright-software-19980720
'''
import string
from xml.dom import Node
try:
from xml.ns import XMLNS
except:
class XMLNS:
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
try:
import cStringIO
StringIO = cStringIO
except ImportError:
import StringIO
_attrs = lambda E: (E.attributes and E.attributes.values()) or []
_children = lambda E: E.childNodes or []
_IN_XML_NS = lambda n: n.name.startswith("xmlns")
_inclusive = lambda n: n.unsuppressedPrefixes == None
# Does a document/PI has lesser/greater document order than the
# first element?
_LesserElement, _Element, _GreaterElement = range(3)
def _sorter(n1,n2):
'''_sorter(n1,n2) -> int
Sorting predicate for non-NS attributes.'''
i = cmp(n1.namespaceURI, n2.namespaceURI)
if i: return i
return cmp(n1.localName, n2.localName)
def _sorter_ns(n1,n2):
'''_sorter_ns((n,v),(n,v)) -> int
"(an empty namespace URI is lexicographically least)."'''
if n1[0] == 'xmlns': return -1
if n2[0] == 'xmlns': return 1
return cmp(n1[0], n2[0])
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix: return 1
# For exclusive need to look at attributes
if unsuppressedPrefixes is not None:
for attr in _attrs(node):
if n == attr.prefix: return 1
return 0
def _inclusiveNamespacePrefixes(node, context, unsuppressedPrefixes):
'''http://www.w3.org/TR/xml-exc-c14n/
InclusiveNamespaces PrefixList parameter, which lists namespace prefixes that
are handled in the manner described by the Canonical XML Recommendation'''
inclusive = []
if node.prefix:
usedPrefixes = ['xmlns:%s' %node.prefix]
else:
usedPrefixes = ['xmlns']
for a in _attrs(node):
if a.nodeName.startswith('xmlns') or not a.prefix: continue
usedPrefixes.append('xmlns:%s' %a.prefix)
unused_namespace_dict = {}
for attr in context:
n = attr.nodeName
if n in unsuppressedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns:') and n[6:] in unsuppressedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns') and n[5:] in unsuppressedPrefixes:
inclusive.append(attr)
elif attr.nodeName in usedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns:'):
unused_namespace_dict[n] = attr.value
return inclusive, unused_namespace_dict
#_in_subset = lambda subset, node: not subset or node in subset
_in_subset = lambda subset, node: subset is None or node in subset # rich's tweak
class _implementation:
'''Implementation class for C14N. This accompanies a node during it's
processing and includes the parameters and processing state.'''
# Handler for each node type; populated during module instantiation.
handlers = {}
def __init__(self, node, write, **kw):
'''Create and run the implementation.'''
self.write = write
self.subset = kw.get('subset')
self.comments = kw.get('comments', 0)
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
nsdict = kw.get('nsdict', { 'xml': XMLNS.XML, 'xmlns': XMLNS.BASE })
# Processing state.
self.state = (nsdict, {'xml':''}, {}, {}) #0422
if node.nodeType == Node.DOCUMENT_NODE:
self._do_document(node)
elif node.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
if not _inclusive(self):
inherited,unused = _inclusiveNamespacePrefixes(node, self._inherit_context(node),
self.unsuppressedPrefixes)
self._do_element(node, inherited, unused=unused)
else:
inherited = self._inherit_context(node)
self._do_element(node, inherited)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(node)
def _inherit_context(self, node):
'''_inherit_context(self, node) -> list
Scan ancestors of attribute and namespace context. Used only
for single element node canonicalization, not for subset
canonicalization.'''
# Collect the initial list of xml:foo attributes.
xmlattrs = filter(_IN_XML_NS, _attrs(node))
# Walk up and get all xml:XXX attributes we inherit.
inherited, parent = [], node.parentNode
while parent and parent.nodeType == Node.ELEMENT_NODE:
for a in filter(_IN_XML_NS, _attrs(parent)):
n = a.localName
if n not in xmlattrs:
xmlattrs.append(n)
inherited.append(a)
parent = parent.parentNode
return inherited
def _do_document(self, node):
'''_do_document(self, node) -> None
Process a document node. documentOrder holds whether the document
element has been encountered such that PIs/comments can be written
as specified.'''
self.documentOrder = _LesserElement
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
self._do_element(child)
self.documentOrder = _GreaterElement # After document element
elif child.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
self._do_pi(child)
elif child.nodeType == Node.COMMENT_NODE:
self._do_comment(child)
elif child.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(child)
handlers[Node.DOCUMENT_NODE] = _do_document
def _do_text(self, node):
'''_do_text(self, node) -> None
Process a text or CDATA node. Render various special characters
as their C14N entity representations.'''
if not _in_subset(self.subset, node): return
s = string.replace(node.data, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, ">", ">")
s = string.replace(s, "\015", "
")
if s: self.write(s)
handlers[Node.TEXT_NODE] = _do_text
handlers[Node.CDATA_SECTION_NODE] = _do_text
def _do_pi(self, node):
'''_do_pi(self, node) -> None
Process a PI node. Render a leading or trailing #xA if the
document order of the PI is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<?')
W(node.nodeName)
s = node.data
if s:
W(' ')
W(s)
W('?>')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.PROCESSING_INSTRUCTION_NODE] = _do_pi
def _do_comment(self, node):
'''_do_comment(self, node) -> None
Process a comment node. Render a leading or trailing #xA if the
document order of the comment is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
if self.comments:
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<!--')
W(node.data)
W('-->')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.COMMENT_NODE] = _do_comment
def _do_attr(self, n, value):
''''_do_attr(self, node) -> None
Process an attribute.'''
W = self.write
W(' ')
W(n)
W('="')
s = string.replace(value, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, '"', '"')
s = string.replace(s, '\011', '	')
s = string.replace(s, '\012', '
')
s = string.replace(s, '\015', '
')
W(s)
W('"')
def _do_element(self, node, initial_other_attrs = [], unused = None):
'''_do_element(self, node, initial_other_attrs = [], unused = {}) -> None
Process an element (and its children).'''
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# ns_local -- NS declarations relevant to this element
# xml_attrs -- Attributes in XML namespace from parent
# xml_attrs_local -- Local attributes in XML namespace.
# ns_unused_inherited -- not rendered namespaces, used for exclusive
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1].copy(), self.state[2].copy() #0422
ns_unused_inherited = unused
if unused is None:
ns_unused_inherited = self.state[3].copy()
ns_local = ns_parent.copy()
inclusive = _inclusive(self)
xml_attrs_local = {}
# Divide attributes into NS, XML, and others.
other_attrs = []
in_subset = _in_subset(self.subset, node)
for a in initial_other_attrs + _attrs(node):
if a.namespaceURI == XMLNS.BASE:
n = a.nodeName
if n == "xmlns:": n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == XMLNS.XML:
if inclusive or (in_subset and _in_subset(self.subset, a)): #020925 Test to see if attribute node in subset
xml_attrs_local[a.nodeName] = a #0426
else:
if _in_subset(self.subset, a): #020925 Test to see if attribute node in subset
other_attrs.append(a)
# # TODO: exclusive, might need to define xmlns:prefix here
# if not inclusive and a.prefix is not None and not ns_rendered.has_key('xmlns:%s' %a.prefix):
# ns_local['xmlns:%s' %a.prefix] = ??
#add local xml:foo attributes to ancestor's xml:foo attributes
xml_attrs.update(xml_attrs_local)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
if not inclusive:
if node.prefix is not None:
prefix = 'xmlns:%s' %node.prefix
else:
prefix = 'xmlns'
if not ns_rendered.has_key(prefix) and not ns_local.has_key(prefix):
if not ns_unused_inherited.has_key(prefix):
raise RuntimeError,\
'For exclusive c14n, unable to map prefix "%s" in %s' %(
prefix, node)
ns_local[prefix] = ns_unused_inherited[prefix]
del ns_unused_inherited[prefix]
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n,v in ns_local.items():
# If default namespace is XMLNS.BASE or empty,
# and if an ancestor was the same
if n == "xmlns" and v in [ XMLNS.BASE, '' ] \
and ns_rendered.get('xmlns') in [ XMLNS.BASE, '', None ]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n in ["xmlns:xml", "xml"] \
and v in [ 'http://www.w3.org/XML/1998/namespace' ]:
continue
# If not previously rendered
# and it's inclusive or utilized
if (n,v) not in ns_rendered.items():
if inclusive or _utilized(n, node, other_attrs, self.unsuppressedPrefixes):
ns_to_render.append((n, v))
elif not inclusive:
ns_unused_inherited[n] = v
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(_sorter_ns)
for n,v in ns_to_render:
self._do_attr(n, v)
ns_rendered[n]=v #0417
# If exclusive or the parent is in the subset, add the local xml attributes
# Else, add all local and ancestor xml attributes
# Sort and render the attributes.
if not inclusive or _in_subset(self.subset,node.parentNode): #0426
other_attrs.extend(xml_attrs_local.values())
else:
other_attrs.extend(xml_attrs.values())
other_attrs.sort(_sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local, ns_rendered, xml_attrs, ns_unused_inherited)
for c in _children(node):
_implementation.handlers[c.nodeType](self, c)
self.state = state
if name: W('</%s>' % name)
handlers[Node.ELEMENT_NODE] = _do_element
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
apply(_implementation, (node, output.write), kw)
else:
s = StringIO.StringIO()
apply(_implementation, (node, s.write), kw)
return s.getvalue()
|
{
"content_hash": "12d66d4e7a09d9e4ef794be5af1dbd35",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 124,
"avg_line_length": 37.91203703703704,
"alnum_prop": 0.5802295762608377,
"repo_name": "acigna/pywez",
"id": "33305bf2b162e92b8df121a1f1200af6b204a843",
"size": "16401",
"binary": false,
"copies": "290",
"ref": "refs/heads/master",
"path": "wstools/c14n.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "338"
},
{
"name": "CSS",
"bytes": "11480"
},
{
"name": "Python",
"bytes": "1095192"
},
{
"name": "Shell",
"bytes": "278"
},
{
"name": "TeX",
"bytes": "152117"
}
],
"symlink_target": ""
}
|
"""
ZMQ example using python3's asyncio
Litecoind should be started with the command line arguments:
litecoind -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 5):
print("This example only works with Python 3.5 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
async def handle(self) :
msg = await self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
|
{
"content_hash": "1a3b7883a0efaa87dbef1ccad5da89d4",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 107,
"avg_line_length": 36.1375,
"alnum_prop": 0.6281563472846766,
"repo_name": "untrustbank/litecoin",
"id": "acb6d6dee293110c0e37b00ad22407ff2d3e9b3c",
"size": "3106",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/zmq/zmq_sub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "685657"
},
{
"name": "C++",
"bytes": "5481429"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "195367"
},
{
"name": "Makefile",
"bytes": "114437"
},
{
"name": "Objective-C",
"bytes": "35416"
},
{
"name": "Objective-C++",
"bytes": "6755"
},
{
"name": "Python",
"bytes": "1319104"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "66256"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
from collections import namedtuple
import GeoIP
OUTPUT_FMT = "{0} - {country_code}, {region}, {region_name}, {city}, " + \
"{area_code}"
# borrowed from https://gist.github.com/nathangrigg/2363393
regex = r'(?P<owner>\S+) (?P<bucket>\S+) (?P<time>\[[^]]*\]) (?P<ip>\S+) ' + \
r'(?P<requester>\S+) (?P<reqid>\S+) (?P<operation>\S+) ' + \
r'(?P<key>\S+) (?P<request>"[^"]*") (?P<status>\S+) ' + \
r'(?P<error>\S+) (?P<bytes>\S+) (?P<size>\S+) (?P<totaltime>\S+) ' + \
r'(?P<turnaround>\S+) (?P<referer>"[^"]*") ' + \
r'(?P<useragent>"+?[^"]*"+?) (?P<version>\S)'
pattern = re.compile(regex)
fields = ['owner', 'bucket', 'time', 'ip', 'requester', 'reqid', 'operation',
'key', 'request', 'status', 'error', 'bytes', 'size', 'totaltime',
'turnaround', 'referer', 'useragent', 'version']
Message = namedtuple('Message', fields)
def handle_file(f):
"""Loop over lines in f and return list of log meessage tuples
"""
msgs = []
for line in f:
m = pattern.match(line)
if not m:
print line
raise Exception
else:
msgs.append(Message(**m.groupdict()))
return msgs
def f1(m):
"""Filter messages that correspond to HTTP GET
"""
return m.operation.startswith('WEBSITE.GET')
def f2(m):
"""Filter message where the useragent doesn't look like a bot
"""
keywords = ['bot', 'spider', 'feedly', 'slurp']
return not any(t in m.useragent.lower() for t in keywords)
def f3(m):
"""Filter messages for non-errors
"""
return int(m.status) == 200
def main(argv):
# object for geolocating requests
gi = GeoIP.open(
"/usr/local/var/GeoIP/GeoIPCity.dat",
GeoIP.GEOIP_MEMORY_CACHE
)
# filtered list of message tuples
log = []
# loop over local directory containing AWS S3 logs, parsing into message
# tuples before filtering
for i in os.listdir(argv[1]):
filename = os.path.join(argv[1], i)
with open(filename, 'r') as f:
log += filter(f3, filter(f2, filter(f1, handle_file(f))))
# find the set of unique request IP addresses and geolocate them
unique_ips = set(l.ip for l in log)
for ip in unique_ips:
gir = gi.record_by_addr(ip)
if gir is not None:
print(OUTPUT_FMT.format(ip, **gir))
if __name__ == '__main__':
main(sys.argv)
|
{
"content_hash": "42bf5cc921176710cac8a5748840247c",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 28.627906976744185,
"alnum_prop": 0.5641754670999187,
"repo_name": "bradfordboyle/python-projects",
"id": "4ebebaea65e36e279109dd735197ec7b846b44f9",
"size": "2508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "s3-log-parser/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "643"
},
{
"name": "Python",
"bytes": "45644"
},
{
"name": "Shell",
"bytes": "971"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from io import open
from os import path
from re import search, MULTILINE
def read(filename, encoding='utf-8'):
base_dir = path.abspath(path.dirname(__file__))
with open(path.join(base_dir, filename), 'r', encoding=encoding) as f:
return f.read()
def find_version(filename, encoding='utf-8'):
"""
Reads the version string from the specified file. It should be in the
format:
.. code-block:: python
__version__ = 'version-string'
Where version-string should be the version number.
Adapted from packaging.python.org
"""
version_file = read(filename, encoding)
version_match = search(
r'^__version__\s+=\s+[\'"](?P<version>[^\'"]+)[\'"]',
version_file,
MULTILINE)
if version_match:
return version_match.group('version')
raise RuntimeError('Unable to find version string.')
requirements = [
'numpy>=1.9,<1.11',
'six>=1.8,<2',
'astropy>=1.3,<2.0',
]
setup(
name='nanoscope',
version=find_version('nanoscope/__init__.py'),
description='Library to parse and process of Nanoscope Dimension AFM files',
long_description=read('README.rst'),
url='https://github.com/jmarini/nanoscope',
author='Jonathan Marini',
author_email='jonathan.r.marini@gmail.com',
packages=['nanoscope'],
install_requires=requirements,
test_suite='tests',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
],
)
|
{
"content_hash": "2c49e32f1f510d0e3234c174137a68f0",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 80,
"avg_line_length": 28.97142857142857,
"alnum_prop": 0.621301775147929,
"repo_name": "jmarini/nanoscope",
"id": "07abb39dc737f96e8f5484386a4b736a7a72eb57",
"size": "2074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63215"
}
],
"symlink_target": ""
}
|
import pytest
import masfotos
import os
from urllib.parse import urlparse
"""
Fixtures
"""
@pytest.fixture
def app():
return masfotos.app.test_client()
"""
Helpers
"""
def toBytes(s):
return bytes(s, 'UTF-8')
def test_should_redirect_to_about_with_signin_when_request_to_home_is_from_unauthenticated_user(app):
"""Given a user has requested their home page
- when the user is un-authenticated, redirect to about page with signin
"""
with app.session_transaction() as session:
session.clear()
resp = app.get('/')
assert resp._status_code == 200
#assert resp._status_code == 302
#assert urlparse(resp.location).path == "/about"
#resp = app.get('/', follow_redirects=True)
assert toBytes('masfotos is a simple application for aggregating your Flickr and Youtube content.') in resp.data
assert toBytes('Signin with Google') in resp.data
"""Given a user that's authenticated but unconfirmed,
when home page requested, send them to sign up page."""
def test_should_redirect_to_signup_when_request_from_authenticated_user_with_unconfirmed_role(app):
with app.session_transaction() as session:
session['user'] = {
'email': 'user@mail.com',
'name': 'daryl zero',
'role': masfotos.ROLE_UNCONFIRMED
}
resp = app.get('/', follow_redirects = False)
assert resp._status_code == 302
assert urlparse(resp.location).path == '/signup'
def test_should_serve_home_when_request_is_from_authenticated_user(app):
"""Given a user has requested their home page
- when the user is authenticated, then serve the home page
"""
with app.session_transaction() as session:
# fake user in session
session['user'] = {
'name': 'daryl zero',
'role': masfotos.ROLE_USER
}
resp = app.get('/')
assert resp._status_code == 200
assert toBytes('<h1>Home</h1>') in resp.data
def test_should_redirect_to_oauth_provider_on_signin(app):
"""Given a user is on the login page
- when the user signs in, then redirect them to OAuth provider for authentication
"""
resp = app.get('/signin/google', follow_redirects=False)
assert resp._status_code == 302
assert resp.location.startswith('https://accounts.google.com/o/oauth2/auth?response_type=code')
def test_should_get_user_profile_and_serve_home_page_when_user_grants_authorization(monkeypatch, app):
"""given a user consent oresponse from an OAuth provider
- when the response is success, then save the user profile and serve the home page
"""
# save the original
get_google_oauth_orig = masfotos.get_google_oauth
mock_user = {'name': 'daryl zero', 'email': 'usersd@email.com', 'role': masfotos.ROLE_USER }
# mocks the response from get profile request
class MockGetProfileResponse():
def __init__(self, status=200):
self.status_code = status
def json(self):
return mock_user
# we replace the get_google_oauth with one that returns
# a monkeypatched version of the OAuth2Session
def mock_get_google_oauth():
oauth_config, oauth_session = get_google_oauth_orig()
oauth_session.get = lambda p: MockGetProfileResponse() if p == oauth_config['profile_url'] else None
oauth_session.fetch_token = lambda t,client_secret,authorization_response: {
'access_token': '__access_token__',
'token_type': 'Bearer',
'refresh_token': '__refresh_token__',
'id_token': '__id_token__'
}
return (oauth_config, oauth_session)
monkeypatch.setattr(masfotos, "get_google_oauth", mock_get_google_oauth)
monkeypatch.setattr(masfotos, "extract_jwt_info", lambda t: mock_user)
monkeypatch.setattr(masfotos, "find_or_create_user", lambda e: mock_user)
# set the 'state' that is used to cover CSRF attack
with app.session_transaction() as session:
session['signin_oauth_state'] = '_oauth_state_'
# simulate the callback from Google
resp = app.get('/signin/google/complete?code=_code_&state=_oauth_state_', follow_redirects=True)
# if everything checks out, we should be redirected to home page
assert toBytes('<h1>Home</h1>') in resp.data
def test_should_serve_unauthorized_page_when_user_denies_authorization(app):
"""given a user consent oresponse from an OAuth provider
- when the response is failure, then serve the user an "unauthorized" error page
"""
# set the 'state' that is used to cover CSRF attack
with app.session_transaction() as session:
session['signin_oauth_state'] = '_oauth_state_'
# when callback from Google contains error response
resp = app.get('/signin/google/complete?error=accessdenied&state=_oauth_state_#')
assert resp._status_code == 401
# other possible scenarios not defined in our spec:
# missing code/state params from callback
resp = app.get('/signin/google/complete')
assert resp._status_code == 401
# mismatch state
resp = app.get('/signin/google/complete?code=xxxx&state=not_equal_to_one_in_session')
assert resp._status_code == 401
if __name__ == '__main__':
pytest.main()
|
{
"content_hash": "0385cbf02c2d3b61fd17f13601dd0112",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 113,
"avg_line_length": 32.22516556291391,
"alnum_prop": 0.7184545828195643,
"repo_name": "ikumen/masfotos",
"id": "d308b7baf96c87898702f03ad7f8d2a35ee30d99",
"size": "4866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "masfotos_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16389"
},
{
"name": "Python",
"bytes": "35163"
},
{
"name": "Shell",
"bytes": "9193"
}
],
"symlink_target": ""
}
|
import hashlib
from neutron.api.v2 import attributes
from neutron.openstack.common import log
from neutron import version
LOG = log.getLogger(__name__)
MAX_DISPLAY_NAME_LEN = 40
NEUTRON_VERSION = version.version_info.release_string()
# Allowed network types for the NSX Plugin
class NetworkTypes:
"""Allowed provider network types for the NSX Plugin."""
L3_EXT = 'l3_ext'
STT = 'stt'
GRE = 'gre'
FLAT = 'flat'
VLAN = 'vlan'
BRIDGE = 'bridge'
def get_tags(**kwargs):
tags = ([dict(tag=value, scope=key)
for key, value in kwargs.iteritems()])
tags.append({"tag": NEUTRON_VERSION, "scope": "quantum"})
return sorted(tags)
def device_id_to_vm_id(device_id, obfuscate=False):
# device_id can be longer than 40 characters, for example
# a device_id for a dhcp port is like the following:
#
# dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c
#
# To fit it into an NSX tag we need to hash it, however device_id
# used for ports associated to VM's are small enough so let's skip the
# hashing
if len(device_id) > MAX_DISPLAY_NAME_LEN or obfuscate:
return hashlib.sha1(device_id).hexdigest()
else:
return device_id
def check_and_truncate(display_name):
if (attributes.is_attr_set(display_name) and
len(display_name) > MAX_DISPLAY_NAME_LEN):
LOG.debug("Specified name:'%s' exceeds maximum length. "
"It will be truncated on NSX", display_name)
return display_name[:MAX_DISPLAY_NAME_LEN]
return display_name or ''
|
{
"content_hash": "cf17f20504b3374072d3f8ba282451b9",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 30.75,
"alnum_prop": 0.6647904940587868,
"repo_name": "leeseuljeong/leeseulstack_neutron",
"id": "49f5cf01d91176cfb3b5f353c7db161834292d21",
"size": "2226",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "neutron/plugins/vmware/common/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "8816599"
},
{
"name": "Shell",
"bytes": "11768"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
def is_prime(num):
for i in range(2, num):
if num % i == 0:
return False
return True
def get_prime(last_prime):
num = last_prime + 1
while True:
if is_prime(num):
return num
num = num + 1
def main():
last_prime = 0
while True:
c = raw_input('Enter Y if you want get next prime number - ')
if c == 'Y':
last_prime = get_prime(last_prime)
print last_prime
else:
break
if __name__ == '__main__':
main()
|
{
"content_hash": "ee136e85bdb1d0bfe22b47c59a593ecc",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 69,
"avg_line_length": 19.357142857142858,
"alnum_prop": 0.48154981549815495,
"repo_name": "igleyy/Python-Projects",
"id": "13aafbd67f7d2d5dfcc55f5ed54566e6d87f3b76",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Numbers/next_prime_number.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10253"
}
],
"symlink_target": ""
}
|
from .server import Application
|
{
"content_hash": "e204721cdefff123fb6a48f03cafffeb",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 31,
"alnum_prop": 0.8709677419354839,
"repo_name": "chakki-works/arXivTimesIndicator",
"id": "70ecc3bb253029a64b550829a901f26d30203690",
"size": "31",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arxivtimes_indicator/server/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2133"
},
{
"name": "HTML",
"bytes": "14391"
},
{
"name": "JavaScript",
"bytes": "7392"
},
{
"name": "Python",
"bytes": "37992"
}
],
"symlink_target": ""
}
|
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
from environ import Env
########## PATH CONFIGURATION
PACKAGE_PATH = dirname(dirname(abspath(__file__)))
PACKAGE_NAME = basename(PACKAGE_PATH)
PROJECT_PATH = dirname(PACKAGE_PATH)
PROJECT_NAME = "DB delle Delibere CIPE" or PACKAGE_NAME
PROJECT_PACKAGE = "delibere" or PACKAGE_NAME
REPO_PATH = dirname(PROJECT_PATH)
REPO_NAME = "cipe-db-delibere" or basename(REPO_PATH)
CONFIG_DIR = 'config'
CONFIG_PATH = join(REPO_PATH, CONFIG_DIR)
RESOURCE_DIR = 'resources'
RESOURCES_PATH = join(REPO_PATH, RESOURCE_DIR)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(PROJECT_PATH)
# load environment variables
Env.read_env(normpath(join(CONFIG_PATH, '.env')))
env = Env()
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DEBUG', False)
DEBUG_TOOLBAR = env.bool('DEBUG_TOOLBAR', False)
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
ADMIN_EMAIL = env('ADMIN_EMAIL', default='admin@%s.com' % PROJECT_NAME)
ADMIN_NAME = env('ADMIN_NAME', default=ADMIN_EMAIL.split('@')[0])
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
(ADMIN_EMAIL, ADMIN_NAME),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# See: https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', default=ADMIN_EMAIL)
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db(default='sqlite:///{0}'.format(normpath(join(RESOURCES_PATH, 'db', 'default.db'))))
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'Europe/Rome'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'it-IT'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = False
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = env('MEDIA_ROOT', default=normpath(join(RESOURCES_PATH, 'media')))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = env('STATIC_ROOT', default=normpath(join(RESOURCES_PATH, 'static')))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(PACKAGE_PATH, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"!^8(s+)^#^r@qmptaajqf+d#dgpn=%(ds56x0_q17cwx$zytu_"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(RESOURCES_PATH, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
normpath(join(PACKAGE_PATH, 'templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % PACKAGE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
# Django helper
'django_extensions',
# Hierarchic tree manager
'mptt',
# Sortable
'adminsortable2',
'django_admin_listfilter_dropdown',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'rest_framework',
'rest_framework_swagger',
'haystack',
'delibere',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
)
########## END AUTHENTICATION CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': normpath(join(RESOURCES_PATH, 'logs', 'cipe-db-delibere.log')),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 7,
'formatter': 'verbose',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'delibere': {
'handlers': ['file', ],
'level': 'INFO',
'propagate': True,
}
},
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % PACKAGE_NAME
########## END WSGI CONFIGURATION
########## TESTING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/releases/1.6/#new-test-runner
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
########## END TESTING CONFIGURATION
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://127.0.0.1:8983/solr/delibere',
'TIMEOUT': 60 * 2,
},
}
REST_FRAMEWORK = {
# Only authenticated users can access the API
# Authentication may be passed through:
# - a jwt header (json web token)
# - a basic_auth header
# - a cookie,
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100
}
MPTT_ADMIN_LEVEL_INDENT = 20
|
{
"content_hash": "82f0dabd9ed8764014d0b87756c4088e",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 105,
"avg_line_length": 30.244514106583072,
"alnum_prop": 0.6566127694859039,
"repo_name": "guglielmo/mosic2-db-delibere",
"id": "c9cc627d65f7385af74b85b5111edf2b33633874",
"size": "9648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/delibere/settings/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "132694"
},
{
"name": "HTML",
"bytes": "31797"
},
{
"name": "JavaScript",
"bytes": "16111"
},
{
"name": "Python",
"bytes": "135060"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import argparse
from typing import Iterable
from typing import Iterator
from typing import Sequence
from pre_commit_hooks.util import added_files
from pre_commit_hooks.util import cmd_output
def lower_set(iterable: Iterable[str]) -> set[str]:
return {x.lower() for x in iterable}
def parents(file: str) -> Iterator[str]:
path_parts = file.split('/')
path_parts.pop()
while path_parts:
yield '/'.join(path_parts)
path_parts.pop()
def directories_for(files: set[str]) -> set[str]:
return {parent for file in files for parent in parents(file)}
def find_conflicting_filenames(filenames: Sequence[str]) -> int:
repo_files = set(cmd_output('git', 'ls-files').splitlines())
repo_files |= directories_for(repo_files)
relevant_files = set(filenames) | added_files()
relevant_files |= directories_for(relevant_files)
repo_files -= relevant_files
retv = 0
# new file conflicts with existing file
conflicts = lower_set(repo_files) & lower_set(relevant_files)
# new file conflicts with other new file
lowercase_relevant_files = lower_set(relevant_files)
for filename in set(relevant_files):
if filename.lower() in lowercase_relevant_files:
lowercase_relevant_files.remove(filename.lower())
else:
conflicts.add(filename.lower())
if conflicts:
conflicting_files = [
x for x in repo_files | relevant_files
if x.lower() in conflicts
]
for filename in sorted(conflicting_files):
print(f'Case-insensitivity conflict found: {filename}')
retv = 1
return retv
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'filenames', nargs='*',
help='Filenames pre-commit believes are changed.',
)
args = parser.parse_args(argv)
return find_conflicting_filenames(args.filenames)
if __name__ == '__main__':
raise SystemExit(main())
|
{
"content_hash": "ef21ba254d12c75eb8f70c5fc805abbc",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 67,
"avg_line_length": 28.430555555555557,
"alnum_prop": 0.6580361504640938,
"repo_name": "pre-commit/pre-commit-hooks",
"id": "33a13f1b94709a6c756ef46ead60eb9b1ade2efb",
"size": "2047",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pre_commit_hooks/check_case_conflict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "145874"
}
],
"symlink_target": ""
}
|
"""Tests for extensions module."""
import unittest
import zlib
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket import extensions
class ExtensionsTest(unittest.TestCase):
"""A unittest for non-class methods in extensions.py"""
def test_parse_window_bits(self):
self.assertRaises(ValueError, extensions._parse_window_bits, None)
self.assertRaises(ValueError, extensions._parse_window_bits, 'foobar')
self.assertRaises(ValueError, extensions._parse_window_bits, ' 8 ')
self.assertRaises(ValueError, extensions._parse_window_bits, 'a8a')
self.assertRaises(ValueError, extensions._parse_window_bits, '00000')
self.assertRaises(ValueError, extensions._parse_window_bits, '00008')
self.assertRaises(ValueError, extensions._parse_window_bits, '0x8')
self.assertRaises(ValueError, extensions._parse_window_bits, '9.5')
self.assertRaises(ValueError, extensions._parse_window_bits, '8.0')
self.assertTrue(extensions._parse_window_bits, '8')
self.assertTrue(extensions._parse_window_bits, '15')
self.assertRaises(ValueError, extensions._parse_window_bits, '-8')
self.assertRaises(ValueError, extensions._parse_window_bits, '0')
self.assertRaises(ValueError, extensions._parse_window_bits, '7')
self.assertRaises(ValueError, extensions._parse_window_bits, '16')
self.assertRaises(
ValueError, extensions._parse_window_bits, '10000000')
class CompressionMethodParameterParserTest(unittest.TestCase):
"""A unittest for _parse_compression_method which parses the compression
method description used by perframe-compression and permessage-compression
extension in their "method" extension parameter.
"""
def test_parse_method_simple(self):
method_list = extensions._parse_compression_method('foo')
self.assertEqual(1, len(method_list))
method = method_list[0]
self.assertEqual('foo', method.name())
self.assertEqual(0, len(method.get_parameters()))
def test_parse_method_with_parameter(self):
method_list = extensions._parse_compression_method('foo; x; y=10')
self.assertEqual(1, len(method_list))
method = method_list[0]
self.assertEqual('foo', method.name())
self.assertEqual(2, len(method.get_parameters()))
self.assertTrue(method.has_parameter('x'))
self.assertEqual(None, method.get_parameter_value('x'))
self.assertTrue(method.has_parameter('y'))
self.assertEqual('10', method.get_parameter_value('y'))
def test_parse_method_with_quoted_parameter(self):
method_list = extensions._parse_compression_method(
'foo; x="Hello World"; y=10')
self.assertEqual(1, len(method_list))
method = method_list[0]
self.assertEqual('foo', method.name())
self.assertEqual(2, len(method.get_parameters()))
self.assertTrue(method.has_parameter('x'))
self.assertEqual('Hello World', method.get_parameter_value('x'))
self.assertTrue(method.has_parameter('y'))
self.assertEqual('10', method.get_parameter_value('y'))
def test_parse_method_multiple(self):
method_list = extensions._parse_compression_method('foo, bar')
self.assertEqual(2, len(method_list))
self.assertEqual('foo', method_list[0].name())
self.assertEqual(0, len(method_list[0].get_parameters()))
self.assertEqual('bar', method_list[1].name())
self.assertEqual(0, len(method_list[1].get_parameters()))
def test_parse_method_multiple_methods_with_quoted_parameter(self):
method_list = extensions._parse_compression_method(
'foo; x="Hello World", bar; y=10')
self.assertEqual(2, len(method_list))
self.assertEqual('foo', method_list[0].name())
self.assertEqual(1, len(method_list[0].get_parameters()))
self.assertTrue(method_list[0].has_parameter('x'))
self.assertEqual('Hello World',
method_list[0].get_parameter_value('x'))
self.assertEqual('bar', method_list[1].name())
self.assertEqual(1, len(method_list[1].get_parameters()))
self.assertTrue(method_list[1].has_parameter('y'))
self.assertEqual('10', method_list[1].get_parameter_value('y'))
def test_create_method_desc_simple(self):
params = common.ExtensionParameter('foo')
desc = extensions._create_accepted_method_desc('foo',
params.get_parameters())
self.assertEqual('foo', desc)
def test_create_method_desc_with_parameters(self):
params = common.ExtensionParameter('foo')
params.add_parameter('x', 'Hello, World')
params.add_parameter('y', '10')
desc = extensions._create_accepted_method_desc('foo',
params.get_parameters())
self.assertEqual('foo; x="Hello, World"; y=10', desc)
class DeflateFrameExtensionProcessorParsingTest(unittest.TestCase):
"""A unittest for checking that DeflateFrameExtensionProcessor parses given
extension parameter correctly.
"""
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('deflate-frame'))
self.assertIsInstance(processor,
extensions.DeflateFrameExtensionProcessor)
processor = extensions.get_extension_processor(
common.ExtensionParameter('x-webkit-deflate-frame'))
self.assertIsInstance(processor,
extensions.DeflateFrameExtensionProcessor)
def test_minimal_offer(self):
processor = extensions.DeflateFrameExtensionProcessor(
common.ExtensionParameter('perframe-deflate'))
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertEqual(zlib.MAX_WBITS,
processor._rfc1979_deflater._window_bits)
self.assertFalse(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_max_window_bits(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('max_window_bits', '10')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertEqual(10, processor._rfc1979_deflater._window_bits)
def test_offer_with_out_of_range_max_window_bits(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('max_window_bits', '0')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_max_window_bits_without_value(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('max_window_bits', None)
processor = extensions.DeflateFrameExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_no_context_takeover(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('no_context_takeover', None)
processor = extensions.DeflateFrameExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertTrue(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_no_context_takeover_with_value(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('no_context_takeover', 'foobar')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_unknown_parameter(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('foo', 'bar')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
class PerFrameCompressExtensionProcessorTest(unittest.TestCase):
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('perframe-compress'))
self.assertIsInstance(processor,
extensions.PerFrameCompressExtensionProcessor)
class PerMessageDeflateExtensionProcessorParsingTest(unittest.TestCase):
"""A unittest for checking that PerMessageDeflateExtensionProcessor parses
given extension parameter correctly.
"""
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('permessage-deflate'))
self.assertIsInstance(processor,
extensions.PerMessageDeflateExtensionProcessor)
def test_minimal_offer(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertEqual(zlib.MAX_WBITS,
processor._rfc1979_deflater._window_bits)
self.assertFalse(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_max_window_bits(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('s2c_max_window_bits', '10')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('s2c_max_window_bits', '10')],
response.get_parameters())
self.assertEqual(10, processor._rfc1979_deflater._window_bits)
def test_offer_with_out_of_range_max_window_bits(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('s2c_max_window_bits', '0')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_max_window_bits_without_value(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('s2c_max_window_bits', None)
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_no_context_takeover(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('s2c_no_context_takeover', None)
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('s2c_no_context_takeover', None)],
response.get_parameters())
self.assertTrue(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_no_context_takeover_with_value(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('s2c_no_context_takeover', 'foobar')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_unknown_parameter(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('foo', 'bar')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
class PerMessageDeflateExtensionProcessorBuildingTest(unittest.TestCase):
"""A unittest for checking that PerMessageDeflateExtensionProcessor builds
a response based on specified options correctly.
"""
def test_response_with_max_window_bits(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('c2s_max_window_bits', None)
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
processor.set_c2s_max_window_bits(10)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('c2s_max_window_bits', '10')],
response.get_parameters())
def test_response_with_max_window_bits_without_client_permission(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
processor.set_c2s_max_window_bits(10)
response = processor.get_extension_response()
self.assertIsNone(response)
def test_response_with_true_for_no_context_takeover(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
processor.set_c2s_no_context_takeover(True)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('c2s_no_context_takeover', None)],
response.get_parameters())
def test_response_with_false_for_no_context_takeover(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
processor.set_c2s_no_context_takeover(False)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
class PerMessageCompressExtensionProcessorTest(unittest.TestCase):
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('permessage-compress'))
self.assertIsInstance(processor,
extensions.PerMessageCompressExtensionProcessor)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
{
"content_hash": "00ef558a24065d1b30b8be9957913322",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 79,
"avg_line_length": 44.24404761904762,
"alnum_prop": 0.682900578501278,
"repo_name": "haoxli/web-testing-service",
"id": "b50d74a244ec5cd3cc19f70fc13390cd60cd03c9",
"size": "16420",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tools/pywebsocket/src/test/test_extensions.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "28136"
},
{
"name": "CSS",
"bytes": "123435"
},
{
"name": "Cucumber",
"bytes": "1479"
},
{
"name": "GLSL",
"bytes": "3495"
},
{
"name": "HTML",
"bytes": "22604652"
},
{
"name": "JavaScript",
"bytes": "2513665"
},
{
"name": "Python",
"bytes": "480857"
},
{
"name": "Shell",
"bytes": "46586"
}
],
"symlink_target": ""
}
|
import os
from unittest import SkipTest
from django.contrib.staticfiles.testing import StaticLiveServerCase
from django.utils.module_loading import import_by_path
from django.utils.translation import ugettext as _
class AdminSeleniumWebDriverTestCase(StaticLiveServerCase):
available_apps = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
]
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
@classmethod
def setUpClass(cls):
if not os.environ.get('DJANGO_SELENIUM_TESTS', False):
raise SkipTest('Selenium tests not requested')
try:
cls.selenium = import_by_path(cls.webdriver_class)()
except Exception as e:
raise SkipTest('Selenium webdriver "%s" not installed or not '
'operational: %s' % (cls.webdriver_class, str(e)))
super(AdminSeleniumWebDriverTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'selenium'):
cls.selenium.quit()
super(AdminSeleniumWebDriverTestCase, cls).tearDownClass()
def wait_until(self, callback, timeout=10):
"""
Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.
"""
from selenium.webdriver.support.wait import WebDriverWait
WebDriverWait(self.selenium, timeout).until(callback)
def wait_loaded_tag(self, tag_name, timeout=10):
"""
Helper function that blocks until the element with the given tag name
is found on the page.
"""
self.wait_until(
lambda driver: driver.find_element_by_tag_name(tag_name),
timeout
)
def wait_page_loaded(self):
"""
Block until page has started to load.
"""
from selenium.common.exceptions import TimeoutException
try:
# Wait for the next page to be loaded
self.wait_loaded_tag('body')
except TimeoutException:
# IE7 occasionnally returns an error "Internet Explorer cannot
# display the webpage" and doesn't load the next page. We just
# ignore it.
pass
def admin_login(self, username, password, login_url='/admin/'):
"""
Helper function to log into the admin.
"""
self.selenium.get('%s%s' % (self.live_server_url, login_url))
username_input = self.selenium.find_element_by_name('username')
username_input.send_keys(username)
password_input = self.selenium.find_element_by_name('password')
password_input.send_keys(password)
login_text = _('Log in')
self.selenium.find_element_by_xpath(
'//input[@value="%s"]' % login_text).click()
self.wait_page_loaded()
def get_css_value(self, selector, attribute):
"""
Helper function that returns the value for the CSS attribute of an
DOM element specified by the given selector. Uses the jQuery that ships
with Django.
"""
return self.selenium.execute_script(
'return django.jQuery("%s").css("%s")' % (selector, attribute))
def get_select_option(self, selector, value):
"""
Returns the <OPTION> with the value `value` inside the <SELECT> widget
identified by the CSS selector `selector`.
"""
from selenium.common.exceptions import NoSuchElementException
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
for option in options:
if option.get_attribute('value') == value:
return option
raise NoSuchElementException('Option "%s" not found in "%s"' % (value, selector))
def assertSelectOptions(self, selector, values):
"""
Asserts that the <SELECT> widget identified by `selector` has the
options with the given `values`.
"""
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
actual_values = []
for option in options:
actual_values.append(option.get_attribute('value'))
self.assertEqual(values, actual_values)
def has_css_class(self, selector, klass):
"""
Returns True if the element identified by `selector` has the CSS class
`klass`.
"""
return (self.selenium.find_element_by_css_selector(selector)
.get_attribute('class').find(klass) != -1)
|
{
"content_hash": "a6d2475ea7fdbdfcef6619c7a4ecb6e9",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 89,
"avg_line_length": 39.26829268292683,
"alnum_prop": 0.6277432712215321,
"repo_name": "adambrenecki/django",
"id": "4b424b72cd4cb097100f3ed21a679753bc523c32",
"size": "4830",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/contrib/admin/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50381"
},
{
"name": "JavaScript",
"bytes": "100819"
},
{
"name": "Python",
"bytes": "8829204"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname(os.path.realpath(__file__ ))
>>> datadir = os.path.realpath(os.path.join(filepath,
... '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import os.path as op
from ..traits_extension import isdefined
from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec,
File)
from .base import MRTrix3BaseInputSpec, MRTrix3Base
class ResponseSDInputSpec(MRTrix3BaseInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-2,
desc='input diffusion weighted images')
out_file = File(
'response.txt', argstr='%s', mandatory=True, position=-1,
usedefault=True, desc='output file containing SH coefficients')
# DW Shell selection options
shell = traits.List(traits.Float, sep=',', argstr='-shell %s',
desc='specify one or more dw gradient shells')
in_mask = File(exists=True, argstr='-mask %s',
desc='provide initial mask image')
max_sh = traits.Int(8, argstr='-lmax %d',
desc='maximum harmonic degree of response function')
out_sf = File('sf_mask.nii.gz', argstr='-sf %s',
desc='write a mask containing single-fibre voxels')
test_all = traits.Bool(False, argstr='-test_all',
desc='re-test all voxels at every iteration')
# Optimization
iterations = traits.Int(0, argstr='-max_iters %d',
desc='maximum number of iterations per pass')
max_change = traits.Float(
argstr='-max_change %f',
desc=('maximum percentile change in any response function coefficient;'
' if no individual coefficient changes by more than this '
'fraction, the algorithm is terminated.'))
# Thresholds
vol_ratio = traits.Float(
.15, argstr='-volume_ratio %f',
desc=('maximal volume ratio between the sum of all other positive'
' lobes in the voxel and the largest FOD lobe'))
disp_mult = traits.Float(
1., argstr='-dispersion_multiplier %f',
desc=('dispersion of FOD lobe must not exceed some threshold as '
'determined by this multiplier and the FOD dispersion in other '
'single-fibre voxels. The threshold is: (mean + (multiplier * '
'(mean - min))); default = 1.0. Criterion is only applied in '
'second pass of RF estimation.'))
int_mult = traits.Float(
2., argstr='-integral_multiplier %f',
desc=('integral of FOD lobe must not be outside some range as '
'determined by this multiplier and FOD lobe integral in other'
' single-fibre voxels. The range is: (mean +- (multiplier * '
'stdev)); default = 2.0. Criterion is only applied in second '
'pass of RF estimation.'))
class ResponseSDOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output response file')
out_sf = File(desc=('mask containing single-fibre voxels'))
class ResponseSD(MRTrix3Base):
"""
Generate an appropriate response function from the image data for
spherical deconvolution.
.. [1] Tax, C. M.; Jeurissen, B.; Vos, S. B.; Viergever, M. A. and
Leemans, A., Recursive calibration of the fiber response function
for spherical deconvolution of diffusion MRI data. NeuroImage,
2014, 86, 67-80
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> resp = mrt.ResponseSD()
>>> resp.inputs.in_file = 'dwi.mif'
>>> resp.inputs.in_mask = 'mask.nii.gz'
>>> resp.inputs.grad_fsl = ('bvecs', 'bvals')
>>> resp.cmdline # doctest: +ELLIPSIS +IGNORE_UNICODE
'dwi2response -fslgrad bvecs bvals -mask mask.nii.gz dwi.mif response.txt'
>>> resp.run() # doctest: +SKIP
"""
_cmd = 'dwi2response'
input_spec = ResponseSDInputSpec
output_spec = ResponseSDOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = op.abspath(self.inputs.out_file)
if isdefined(self.inputs.out_sf):
outputs['out_sf'] = op.abspath(self.inputs.out_sf)
return outputs
class ACTPrepareFSLInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-2,
desc='input anatomical image')
out_file = File(
'act_5tt.mif', argstr='%s', mandatory=True, position=-1,
usedefault=True, desc='output file after processing')
class ACTPrepareFSLOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output response file')
class ACTPrepareFSL(CommandLine):
"""
Generate anatomical information necessary for Anatomically
Constrained Tractography (ACT).
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> prep = mrt.ACTPrepareFSL()
>>> prep.inputs.in_file = 'T1.nii.gz'
>>> prep.cmdline # doctest: +ELLIPSIS +IGNORE_UNICODE
'act_anat_prepare_fsl T1.nii.gz act_5tt.mif'
>>> prep.run() # doctest: +SKIP
"""
_cmd = 'act_anat_prepare_fsl'
input_spec = ACTPrepareFSLInputSpec
output_spec = ACTPrepareFSLOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = op.abspath(self.inputs.out_file)
return outputs
class ReplaceFSwithFIRSTInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-4,
desc='input anatomical image')
in_t1w = File(exists=True, argstr='%s', mandatory=True, position=-3,
desc='input T1 image')
in_config = File(exists=True, argstr='%s', position=-2,
desc='connectome configuration file')
out_file = File(
'aparc+first.mif', argstr='%s', mandatory=True, position=-1,
usedefault=True, desc='output file after processing')
class ReplaceFSwithFIRSTOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output response file')
class ReplaceFSwithFIRST(CommandLine):
"""
Replace deep gray matter structures segmented with FSL FIRST in a
FreeSurfer parcellation.
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> prep = mrt.ReplaceFSwithFIRST()
>>> prep.inputs.in_file = 'aparc+aseg.nii'
>>> prep.inputs.in_t1w = 'T1.nii.gz'
>>> prep.inputs.in_config = 'mrtrix3_labelconfig.txt'
>>> prep.cmdline # doctest: +ELLIPSIS +IGNORE_UNICODE
'fs_parc_replace_sgm_first aparc+aseg.nii T1.nii.gz \
mrtrix3_labelconfig.txt aparc+first.mif'
>>> prep.run() # doctest: +SKIP
"""
_cmd = 'fs_parc_replace_sgm_first'
input_spec = ReplaceFSwithFIRSTInputSpec
output_spec = ReplaceFSwithFIRSTOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = op.abspath(self.inputs.out_file)
return outputs
|
{
"content_hash": "0303c45fa6a1e4fb7ba2ab101901b808",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 87,
"avg_line_length": 37.370558375634516,
"alnum_prop": 0.6154577560445531,
"repo_name": "carolFrohlich/nipype",
"id": "8f96154909de37b54d29b53ae813ae98e90fbafd",
"size": "7501",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/interfaces/mrtrix3/preprocess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2320"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5451077"
},
{
"name": "Shell",
"bytes": "3302"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(name='itunes-web',
version='1.0',
description='control itunes over rest api',
author='Anuj Patel',
author_email='patelanuj28@gmail.com',
url='https://github.com/patelanuj28/itunes-web.git',
package_dir={'': 'src'},
py_modules=['itunes_api'],
license='MIT',
packages=['itunes-web'],
install_requires=[
'bottle',
'osascript'
],
)
|
{
"content_hash": "f56396693da3f798c6bac35fe128119c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 58,
"avg_line_length": 26.941176470588236,
"alnum_prop": 0.574235807860262,
"repo_name": "patelanuj28/itunes-web",
"id": "b035e15b755148f2ef5eb28051d3eb9466bb4f15",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8763"
}
],
"symlink_target": ""
}
|
import os
from mattermost_bot import get_version
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'mattermost_bot'
copyright = u'2016, '
version = get_version()
release = get_version()
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'default'
htmlhelp_basename = 'mattermost_botdoc'
latex_documents = [
('index', 'mattermost_bot.tex', u'mattermost_bot Documentation',
u'', 'manual'),
]
man_pages = [
('index', 'mattermost_bot', u'mattermost_bot Documentation',
[u'gotlium'], 1)
]
texinfo_documents = [
('index', 'mattermost_bot', u'Mattermost-bot Documentation',
u'gotlium', 'mattermost_bot', 'One line description of project.',
'Miscellaneous'),
]
|
{
"content_hash": "87c3fcc389883cf7651ac3737def447c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 70,
"avg_line_length": 27.096774193548388,
"alnum_prop": 0.680952380952381,
"repo_name": "tuwmft/MatterBot",
"id": "c0ab149969e017a477f4ee66b161757dd8c1b954",
"size": "865",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1076"
},
{
"name": "Python",
"bytes": "52818"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/food/shared_ingredient_marinade.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "92ed3e960f2389254049745c49f87e96",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 22.615384615384617,
"alnum_prop": 0.6904761904761905,
"repo_name": "obi-two/Rebelion",
"id": "b3d637ecfd47b1bf710a05c40186bd223c2992bf",
"size": "439",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/component/food/shared_ingredient_marinade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""Tests for the MacOS Document Versions plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import mac_document_versions
from tests.parsers.sqlite_plugins import test_lib
class MacDocumentVersionsTest(test_lib.SQLitePluginTestCase):
"""Tests for the MacOS Document Versions plugin."""
def testProcess(self):
"""Tests the Process function on a MacOS Document Versions file."""
plugin = mac_document_versions.MacDocumentVersionsPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['document_versions.sql'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 4)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'mac:document_versions:file',
'name': 'Spain is beautiful.rtf',
'path': '/Users/moxilo/Documents',
'timestamp': '2014-01-21 02:03:00.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'user_sid': '501',
'version_path': (
'/.DocumentRevisions-V100/PerUID/501/1/com.apple.documentVersions/'
'08CFEB5A-5CDA-486F-AED5-EA35BF3EE4C2.rtf')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "71930f54501b1ad3c3fd28d3cb19c420",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 33.925,
"alnum_prop": 0.6949152542372882,
"repo_name": "Onager/plaso",
"id": "9bb71bd9ad21be84b5b8092bdb7761d1f6034103",
"size": "1404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parsers/sqlite_plugins/mac_document_versions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1172"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1270"
},
{
"name": "Python",
"bytes": "4816953"
},
{
"name": "Shell",
"bytes": "22891"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
__author__ = "czerwin@scalyr.com"
import os
from scalyr_agent.__scalyr__ import get_install_root, get_package_root, SCALYR_VERSION
from scalyr_agent.test_base import ScalyrTestCase
class TestUtil(ScalyrTestCase):
def test_version(self):
self.assertTrue(SCALYR_VERSION.startswith("2."))
def test_get_install_root(self):
self.assertEquals(os.path.basename(get_install_root()), "scalyr-agent-2")
def test_get_package_root(self):
self.assertEquals(os.path.basename(get_package_root()), "scalyr_agent")
|
{
"content_hash": "be2cac43bb8b872da06636f85244c182",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 86,
"avg_line_length": 30.526315789473685,
"alnum_prop": 0.7155172413793104,
"repo_name": "imron/scalyr-agent-2",
"id": "ff316f4d2035496695bc22665e5f38a983fe85d2",
"size": "1281",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/__scalyr__test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1297"
},
{
"name": "Dockerfile",
"bytes": "1461"
},
{
"name": "Python",
"bytes": "2093708"
}
],
"symlink_target": ""
}
|
"""This example gets all custom channels in an ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: customchannels.list
"""
__author__ = 'api.Dean.Lukies@gmail.com (Dean Lukies)'
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'--ad_client_id',
help='The ad client ID for which to get custom channels')
MAX_PAGE_SIZE = 50
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'adexchangeseller', 'v2.0', __doc__, __file__, parents=[argparser],
scope='https://www.googleapis.com/auth/adexchange.seller.readonly')
ad_client_id = flags.ad_client_id
try:
# Retrieve custom channel list in pages and display data as we receive it.
request = service.accounts().customchannels().list(
accountId='myaccount', adClientId=ad_client_id,maxResults=MAX_PAGE_SIZE)
while request:
result = request.execute()
if 'items' in result:
custom_channels = result['items']
for custom_channel in custom_channels:
print ('Custom channel with id "%s" and name "%s" was found. '
% (custom_channel['id'], custom_channel['name']))
if 'targetingInfo' in custom_channel:
print ' Targeting info:'
targeting_info = custom_channel['targetingInfo']
if 'adsAppearOn' in targeting_info:
print ' Ads appear on: %s' % targeting_info['adsAppearOn']
if 'location' in targeting_info:
print ' Location: %s' % targeting_info['location']
if 'description' in targeting_info:
print ' Description: %s' % targeting_info['description']
if 'siteLanguage' in targeting_info:
print ' Site language: %s' % targeting_info['siteLanguage']
request = service.accounts().customchannels().list_next(request, result)
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
{
"content_hash": "c79bef35f4540afd71743dbd6b0a68c7",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 34.984375,
"alnum_prop": 0.6493970522554712,
"repo_name": "googleads/googleads-adxseller-examples",
"id": "d769da02738a8247f85d1689b64057b1913fc220",
"size": "2873",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/v2.0/get_all_custom_channels.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2752"
},
{
"name": "HTML",
"bytes": "3706"
},
{
"name": "Java",
"bytes": "103028"
},
{
"name": "PHP",
"bytes": "73791"
},
{
"name": "Python",
"bytes": "59607"
},
{
"name": "Ruby",
"bytes": "59513"
}
],
"symlink_target": ""
}
|
import textwrap
from pkg_resources import parse_version
from setuptools import __version__ as SETUPTOOLS_VERSION
import sympy as sm
from sympy.core.function import AppliedUndef
from sympy.utilities.iterables import iterable
from sympy.physics.mechanics import dynamicsymbols
SYMPY_VERSION = sm.__version__
def sympy_equal_to_or_newer_than(version, installed_version=None):
"""Returns true if the installed version of SymPy is equal to or newer
than the provided version string."""
if installed_version is None:
v = SYMPY_VERSION
else:
v = installed_version
if v.endswith('-git') and \
parse_version(SETUPTOOLS_VERSION) >= parse_version('8.0'):
msg = ('You are using an older development version of SymPy with a '
'non-PEP440 compliant version number: {}. Please install '
'setuptools < 8.0 or a newer development version of SymPy.')
raise ValueError(msg.format(v))
return parse_version(v) >= parse_version(version)
def wrap_and_indent(lines, indentation=4, width=79):
"""Returns a single string in which the lines have been indented and
wrapped into a block of text."""
# TODO : This will indent any lines that only contain a new line. Which
# may not be preferable.
new_lines = []
for line in lines:
if line != '\n':
wrapped = textwrap.wrap(line, width=width-indentation)
else:
wrapped = [line]
new_lines += wrapped
spacer = '\n' + ' ' * indentation
return ' ' * indentation + spacer.join(new_lines)
# This is a copy of the function in SymPy. It doesn't exist in SymPy 0.7.4
# so we keep it here for now.
def find_dynamicsymbols(expression, exclude=None):
"""Find all dynamicsymbols in expression.
>>> from sympy.physics.mechanics import dynamicsymbols, find_dynamicsymbols
>>> x, y = dynamicsymbols('x, y')
>>> expr = x + x.diff()*y
>>> find_dynamicsymbols(expr)
set([x(t), y(t), Derivative(x(t), t)])
If the optional ``exclude`` kwarg is used, only dynamicsymbols
not in the iterable ``exclude`` are returned.
>>> find_dynamicsymbols(expr, [x, y])
set([Derivative(x(t), t)])
"""
t_set = set([dynamicsymbols._t])
if exclude:
if iterable(exclude):
exclude_set = set(exclude)
else:
raise TypeError("exclude kwarg must be iterable")
else:
exclude_set = set()
return set([i for i in expression.atoms(AppliedUndef, sm.Derivative) if
i.free_symbols == t_set]) - exclude_set
class PyDyDeprecationWarning(DeprecationWarning):
pass
class PyDyImportWarning(ImportWarning):
pass
class PyDyFutureWarning(FutureWarning):
pass
class PyDyUserWarning(UserWarning):
pass
|
{
"content_hash": "e6ddffac4adc15e1b2581de813d9c3a7",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 31.75,
"alnum_prop": 0.65748031496063,
"repo_name": "Shekharrajak/pydy",
"id": "14ef67c43c764e9b972b7bd6b188404181d1a165",
"size": "2817",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pydy/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "102"
},
{
"name": "CSS",
"bytes": "14810"
},
{
"name": "HTML",
"bytes": "15405"
},
{
"name": "JavaScript",
"bytes": "49934"
},
{
"name": "Python",
"bytes": "279080"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
}
|
from .base import REGISTERED_ENVS, MujocoEnv
ALL_ENVIRONMENTS = REGISTERED_ENVS.keys()
|
{
"content_hash": "4d0beafbd29b5ef38859f9364ec5d57f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 44,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.7954545454545454,
"repo_name": "ARISE-Initiative/robosuite",
"id": "fff6081f09064994187ac1154f62c57066bd455c",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robosuite/environments/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "552"
},
{
"name": "Python",
"bytes": "1197777"
}
],
"symlink_target": ""
}
|
import math
import pytest
import numpy as np
import dartpy as dart
def kinematics_tester(joint):
num_tests = 2
joint.setTransformFromChildBodyNode(dart.math.expMap(np.random.rand(6)))
joint.setTransformFromParentBodyNode(dart.math.expMap(np.random.rand(6)))
dof = joint.getNumDofs()
q = np.zeros(dof)
dq = np.zeros(dof)
for _ in range(num_tests):
q_delta = 1e-5
for i in range(dof):
q[i] = dart.math.Random.uniform(-math.pi, math.pi)
dq[i] = dart.math.Random.uniform(-math.pi, math.pi)
joint.setPositions(q)
joint.setVelocities(dq)
if dof == 0:
return
T = joint.getRelativeTransform()
J = joint.getRelativeJacobian(q)
dJ = joint.getRelativeJacobianTimeDeriv()
# Verify transform
assert dart.math.verifyTransform(T)
# Test analytic Jacobian and numerical Jacobian
numericJ = np.zeros((6, dof))
for i in range(dof):
q_a = q.copy()
joint.setPositions(q_a)
T_a = joint.getRelativeTransform()
q_b = q.copy()
q_b[i] += q_delta
joint.setPositions(q_b)
T_b = joint.getRelativeTransform()
Tinv_a = T_a.inverse()
dTdq = (T_b.matrix() - T_a.matrix()) / q_delta
Ji_4x4matrix = np.matmul(Tinv_a.matrix(), dTdq)
Ji = np.zeros(6)
Ji[0] = Ji_4x4matrix[2, 1]
Ji[1] = Ji_4x4matrix[0, 2]
Ji[2] = Ji_4x4matrix[1, 0]
Ji[3] = Ji_4x4matrix[0, 3]
Ji[4] = Ji_4x4matrix[1, 3]
Ji[5] = Ji_4x4matrix[2, 3]
numericJ[:, i] = Ji
assert np.allclose(J, numericJ, atol=1e-5)
def test_kinematics():
skel = dart.dynamics.Skeleton()
joint, _ = skel.createWeldJointAndBodyNodePair()
kinematics_tester(joint)
skel = dart.dynamics.Skeleton()
joint, _ = skel.createRevoluteJointAndBodyNodePair()
kinematics_tester(joint)
skel = dart.dynamics.Skeleton()
joint, _ = skel.createPrismaticJointAndBodyNodePair()
kinematics_tester(joint)
skel = dart.dynamics.Skeleton()
joint, _ = skel.createScrewJointAndBodyNodePair()
kinematics_tester(joint)
skel = dart.dynamics.Skeleton()
joint, _ = skel.createUniversalJointAndBodyNodePair()
kinematics_tester(joint)
skel = dart.dynamics.Skeleton()
joint, _ = skel.createTranslationalJoint2DAndBodyNodePair()
kinematics_tester(joint)
skel = dart.dynamics.Skeleton()
joint, _ = skel.createEulerJointAndBodyNodePair()
kinematics_tester(joint)
skel = dart.dynamics.Skeleton()
joint, _ = skel.createTranslationalJointAndBodyNodePair()
kinematics_tester(joint)
skel = dart.dynamics.Skeleton()
joint, _ = skel.createPlanarJointAndBodyNodePair()
kinematics_tester(joint)
def test_access_to_parent_child_transforms():
skel = dart.dynamics.Skeleton()
joint, _ = skel.createRevoluteJointAndBodyNodePair()
parentToJointTf = dart.math.Isometry3.Identity()
parentToJointTf.set_translation(np.random.rand(3, 1))
childToJointTf = dart.math.Isometry3.Identity()
childToJointTf.set_translation(np.random.rand(3, 1))
joint.setTransformFromParentBodyNode(parentToJointTf)
joint.setTransformFromChildBodyNode(childToJointTf)
storedParentTf = joint.getTransformFromParentBodyNode()
storedChildTf = joint.getTransformFromChildBodyNode()
assert np.allclose(parentToJointTf.matrix(), storedParentTf.matrix())
assert np.allclose(childToJointTf.matrix(), storedChildTf.matrix())
def test_BallJoint_positions_conversion():
assert np.allclose(
dart.dynamics.BallJoint.convertToPositions(np.eye(3)),
np.zeros((1, 3))
)
assert np.allclose(
dart.dynamics.BallJoint.convertToPositions(
np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])),
np.array([0, 0, -np.pi/2])
)
for i in range(30):
ballJointPos = np.random.uniform(-np.pi/2, np.pi/2, 3)
assert np.allclose(
dart.dynamics.BallJoint.convertToRotation(
dart.dynamics.BallJoint.convertToPositions(
dart.dynamics.BallJoint.convertToRotation(ballJointPos)
)),
dart.dynamics.BallJoint.convertToRotation(ballJointPos)
)
if __name__ == "__main__":
pytest.main()
|
{
"content_hash": "f4f6418358773e1d4c5e8d81b3c7a92d",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 77,
"avg_line_length": 30.040816326530614,
"alnum_prop": 0.6329257246376812,
"repo_name": "dartsim/dart",
"id": "a94384f3ec314caa4eb4833fc3428644bd7b0c4f",
"size": "4416",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/tests/unit/dynamics/test_joint.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "13809"
},
{
"name": "C++",
"bytes": "14864431"
},
{
"name": "CMake",
"bytes": "177235"
},
{
"name": "Dockerfile",
"bytes": "1493"
},
{
"name": "Python",
"bytes": "81308"
},
{
"name": "Roff",
"bytes": "1046"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "18128"
},
{
"name": "TeX",
"bytes": "1801"
}
],
"symlink_target": ""
}
|
'''manages mongor
'''
from mongor import Maintenence
import argparse
from pprint import pprint
from bson.json_util import loads, dumps
from distutils.util import strtobool
def list_nodes(args):
m = Maintenence(args.config_host, args.config_port, args.config_ssl)
nodes = m.config.get_nodes({})
for node in nodes:
del(node["_id"])
print dumps(node)
return
def add_node(args):
m = Maintenence(args.config_host, args.config_port, args.config_ssl)
m.config.add_node(uid = args.uid,
name = args.name,
db_type = args.db_type,
db_tags = loads(args.db_tags),
capability = args.capability,
host = args.host,
port = args.port,
max_size = args.max_size,
ssl = bool(strtobool(args.ssl)),
passwd_file = args.passwd_file
)
return
def remove_node(args):
m = Maintenence(args.config_host, args.config_port, args.config_ssl)
m.config.remove_node(args.db_type, args.uid)
return
def set_db_tags(args):
m = Maintenence(args.config_host, args.config_port, args.config_ssl)
tags = loads(args.db_tags)
m.config.set_node_tags(args.uid,
tags)
return
def add_index(args):
m = Maintenence(args.config_host, args.config_port, args.config_ssl)
fields = loads(args.fields)
m.config.add_index(db_type = args.db_type,
fields = fields,
background = bool(strtobool(args.background)), #bool
unique = bool(strtobool(args.unique)),
sparse = bool(strtobool(args.sparse)),
text = bool(strtobool(args.text)))
if bool(strtobool(args.rebuild)):
build_index(args)
return
def build_index(args):
m = Maintenence(args.config_host, args.config_port, args.config_ssl)
m.ensure_indexes(args.collection,
m.config.get_indexes(args.db_type),
db_type=args.db_type)
return
def remove_index():
m = Maintenence(args.config_host, args.config_port, args.config_ssl)
fields = loads(args.fields)
m.remove_index(args.db_type,
fields)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser("MongoR manager")
#parser.add_argument('-o', '--output')
#parser.add_argument('-v', dest='verbose', action='store_true')
parser.add_argument('--host', type=str, dest='config_host', required=True)
parser.add_argument('--port', type=int, dest='config_port', required=True)
parser.add_argument('--ssl', dest='config_ssl', default=False, action='store_true')
subparsers = parser.add_subparsers(help='python manage.py <command> -h')
#add a node
parser_add_node = subparsers.add_parser('addnode', help='adds a node to mongor')
parser_add_node.set_defaults(which='addnode')
parser_add_node.add_argument('uid', type=str, help="uniqueID for the node (unique)")
parser_add_node.add_argument('name', type=str, help="name of the node(unique)")
parser_add_node.add_argument('db_type', type=str, help="the major category of the database")
parser_add_node.add_argument('db_tags', type=str, help="json list '[\"tag1\",\"tag2\",etc]'")
parser_add_node.add_argument('capability', type=str, help="r=readonly, rw=read/write")
parser_add_node.add_argument('max_size', type=int, help="size after which should rotate")
parser_add_node.add_argument('passwd_file', type=str, help="the local (to the caller) file containing auth creds")
parser_add_node.add_argument('host', type=str, help="dns name or IP address of the mongod")
parser_add_node.add_argument('port', type=int, help="port of the mongod")
parser_add_node.add_argument('ssl', type=str, help="use SSL for connection")
parser_rm_node = subparsers.add_parser('removenode', help='removes a node to mongor')
parser_rm_node.set_defaults(which='removenode')
parser_rm_node.add_argument('uid', type=str, help="the unique identifier of the node to remove")
parser_rm_node.add_argument('db_type', type=str, help="the major category of the database")
parser_add_index = subparsers.add_parser('addindex', help='adds an index to mongor')
parser_add_index.set_defaults(which='addindex')
parser_add_index.add_argument('db_type', type=str, help="db_type the index applies")
parser_add_index.add_argument('collection', type=str, help="collection the index applies")
parser_add_index.add_argument('fields', type=str, help="JSON list of namespaces")
parser_add_index.add_argument('background', type=str, help="build index in background")
parser_add_index.add_argument('unique', type=str, help="force a unique index")
parser_add_index.add_argument('sparse', type=str, help="use a sparse index")
parser_add_index.add_argument('text', type=str, help="use text index engine")
parser_add_index.add_argument('rebuild', type=str, help="rebuild indexes on previous databases")
parser_rm_index = subparsers.add_parser('removeindex', help='removes index from future built buckets')
parser_rm_index.set_defaults(which='removeindex')
parser_rm_index.add_argument('db_type', type=str, help="db_type the index applies")
parser_rm_index.add_argument('fields', type=str, help="JSON list of namespaces")
parser_build_index = subparsers.add_parser('buildindex', help='build the index on all databases')
parser_build_index.set_defaults(which='buildindex')
parser_build_index.add_argument('db_type', type=str, help="db_type the index applies")
parser_build_index.add_argument('collection', type=str, help="collection the index applies")
parser_set_db_tags = subparsers.add_parser('setdbtags', help='sets the db_tags field for an existing node')
parser_set_db_tags.set_defaults(which='setdbtags')
parser_set_db_tags.add_argument('uid', type=str, help="unique id for the node to change")
parser_set_db_tags.add_argument('db_tags', type=str, help="JSON list of namespaces")
parser_list_nodes = subparsers.add_parser('listnodes', help='list the current mongor configuration')
parser_list_nodes.set_defaults(which='listnodes')
args = parser.parse_args()
if args.which is "listnodes":
list_nodes(args)
elif args.which is "addnode":
add_node(args)
elif args.which is "removenode":
remove_node(args)
elif args.which is "addindex":
add_index(args)
elif args.which is "removeindex":
remove_index(args)
elif args.which is "setdbtags":
set_db_tags(args)
elif args.which is "buildindex":
build_index(args)
|
{
"content_hash": "f7e02d316bb40b28794a86e0cae521fa",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 118,
"avg_line_length": 45.390728476821195,
"alnum_prop": 0.6477969069156697,
"repo_name": "lmco/python-mongor",
"id": "8ac51fb3117d27eeb82b99384743d92e0d3899a2",
"size": "6872",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/mongor_manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32934"
},
{
"name": "Shell",
"bytes": "596"
}
],
"symlink_target": ""
}
|
import unittest
import requests
import requests_mock
from airflow.models.dag import DAG
from airflow.providers.apache.druid.transfers.hive_to_druid import HiveToDruidOperator
class TestDruidHook(unittest.TestCase):
# To debug the large json diff
maxDiff = None
hook_config = {
'sql': 'SELECT * FROM table',
'druid_datasource': 'our_datasource',
'ts_dim': 'timedimension_column',
'metric_spec': [
{"name": "count", "type": "count"},
{"name": "amountSum", "type": "doubleSum", "fieldName": "amount"},
],
'hive_cli_conn_id': 'hive_cli_custom',
'druid_ingest_conn_id': 'druid_ingest_default',
'metastore_conn_id': 'metastore_default',
'hadoop_dependency_coordinates': 'org.apache.spark:spark-core_2.10:1.5.2-mmx1',
'intervals': '2016-01-01/2017-01-01',
'num_shards': -1,
'target_partition_size': 1925,
'query_granularity': 'month',
'segment_granularity': 'week',
'job_properties': {
"mapreduce.job.user.classpath.first": "false",
"mapreduce.map.output.compress": "false",
"mapreduce.output.fileoutputformat.compress": "false",
},
}
index_spec_config = {'static_path': '/apps/db/warehouse/hive/', 'columns': ['country', 'segment']}
def setUp(self):
super().setUp()
args = {'owner': 'airflow', 'start_date': '2017-01-01'}
self.dag = DAG('hive_to_druid', default_args=args)
session = requests.Session()
adapter = requests_mock.Adapter()
session.mount('mock', adapter)
def test_construct_ingest_query(self):
operator = HiveToDruidOperator(task_id='hive_to_druid', dag=self.dag, **self.hook_config)
provided_index_spec = operator.construct_ingest_query(**self.index_spec_config)
expected_index_spec = {
"hadoopDependencyCoordinates": self.hook_config['hadoop_dependency_coordinates'],
"type": "index_hadoop",
"spec": {
"dataSchema": {
"metricsSpec": self.hook_config['metric_spec'],
"granularitySpec": {
"queryGranularity": self.hook_config['query_granularity'],
"intervals": self.hook_config['intervals'],
"type": "uniform",
"segmentGranularity": self.hook_config['segment_granularity'],
},
"parser": {
"type": "string",
"parseSpec": {
"columns": self.index_spec_config['columns'],
"dimensionsSpec": {
"dimensionExclusions": [],
"dimensions": self.index_spec_config['columns'],
"spatialDimensions": [],
},
"timestampSpec": {"column": self.hook_config['ts_dim'], "format": "auto"},
"format": "tsv",
},
},
"dataSource": self.hook_config['druid_datasource'],
},
"tuningConfig": {
"type": "hadoop",
"jobProperties": self.hook_config['job_properties'],
"partitionsSpec": {
"type": "hashed",
"targetPartitionSize": self.hook_config['target_partition_size'],
"numShards": self.hook_config['num_shards'],
},
},
"ioConfig": {
"inputSpec": {"paths": self.index_spec_config['static_path'], "type": "static"},
"type": "hadoop",
},
},
}
# Make sure it is like we expect it
self.assertEqual(provided_index_spec, expected_index_spec)
|
{
"content_hash": "3f5b74d132df5dadaa6f3e625e473287",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 102,
"avg_line_length": 39.97,
"alnum_prop": 0.49862396797598196,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "968dd1d78f90ab0b4d734cc1d7387391120403f4",
"size": "4787",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/providers/apache/druid/transfers/test_hive_to_druid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
}
|
import sys
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
longest = 0
def longestUnivaluePath(self, root: TreeNode) -> int:
self.longestUnivaluePathHelper(sys.maxsize, root)
return self.longest
def longestUnivaluePathHelper(self, parentVal, node):
if not node:
return 0
leftNodes = self.longestUnivaluePathHelper(node.val, node.left)
rightNodes = self.longestUnivaluePathHelper(node.val, node.right)
self.longest = max(self.longest, leftNodes + rightNodes)
if node.val == parentVal:
return 1 + max(leftNodes, rightNodes)
return 0
root = TreeNode(1)
root.left = TreeNode(4)
root.left.left = TreeNode(4)
root.left.right = TreeNode(4)
root.right = TreeNode(5)
root.right.right = TreeNode(5)
ob = Solution()
print(ob.longestUnivaluePath(root))
|
{
"content_hash": "c761098f8a59224307d179bbaa2245be",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 28.08823529411765,
"alnum_prop": 0.6387434554973822,
"repo_name": "shobhitmishra/CodingProblems",
"id": "e7ae9c48e6fc36c36c1f055aaa7d7213ca859b3e",
"size": "955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LeetCode/Session3/LongestUnival.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "31844"
},
{
"name": "Python",
"bytes": "437556"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_bfd_global
version_added: "2.4"
short_description: Manages BFD global configuration on HUAWEI CloudEngine devices.
description:
- Manages BFD global configuration on HUAWEI CloudEngine devices.
author: QijunPan (@QijunPan)
options:
bfd_enable:
description:
- Enables the global Bidirectional Forwarding Detection (BFD) function.
choices: ['enable', 'disable']
default_ip:
description:
- Specifies the default multicast IP address.
The value ranges from 224.0.0.107 to 224.0.0.250.
tos_exp_dynamic:
description:
- Indicates the priority of BFD control packets for dynamic BFD sessions.
The value is an integer ranging from 0 to 7.
The default priority is 7, which is the highest priority of BFD control packets.
tos_exp_static:
description:
- Indicates the priority of BFD control packets for static BFD sessions.
The value is an integer ranging from 0 to 7.
The default priority is 7, which is the highest priority of BFD control packets.
damp_init_wait_time:
description:
- Specifies an initial flapping suppression time for a BFD session.
The value is an integer ranging from 1 to 3600000, in milliseconds.
The default value is 2000.
damp_max_wait_time:
description:
- Specifies a maximum flapping suppression time for a BFD session.
The value is an integer ranging from 1 to 3600000, in milliseconds.
The default value is 15000.
damp_second_wait_time:
description:
- Specifies a secondary flapping suppression time for a BFD session.
The value is an integer ranging from 1 to 3600000, in milliseconds.
The default value is 5000.
delay_up_time:
description:
- Specifies the delay before a BFD session becomes Up.
The value is an integer ranging from 1 to 600, in seconds.
The default value is 0, indicating that a BFD session immediately becomes Up.
state:
description:
- Determines whether the config should be present or not on the device.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: bfd global module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Enable the global BFD function
ce_bfd_global:
bfd_enable: enable
provider: '{{ cli }}'
- name: Set the default multicast IP address to 224.0.0.150
ce_bfd_global:
bfd_enable: enable
default_ip: 224.0.0.150
state: present
provider: '{{ cli }}'
- name: Set the priority of BFD control packets for dynamic and static BFD sessions
ce_bfd_global:
bfd_enable: enable
tos_exp_dynamic: 5
tos_exp_static: 6
state: present
provider: '{{ cli }}'
- name: Disable the global BFD function
ce_bfd_global:
bfd_enable: disable
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {
"bfd_enalbe": "enable",
"damp_init_wait_time": null,
"damp_max_wait_time": null,
"damp_second_wait_time": null,
"default_ip": null,
"delayUpTimer": null,
"state": "present",
"tos_exp_dynamic": null,
"tos_exp_static": null
}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {
"global": {
"bfdEnable": "false",
"dampInitWaitTime": "2000",
"dampMaxWaitTime": "12000",
"dampSecondWaitTime": "5000",
"defaultIp": "224.0.0.184",
"delayUpTimer": null,
"tosExp": "7",
"tosExpStatic": "7"
}
}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {
"global": {
"bfdEnable": "true",
"dampInitWaitTime": "2000",
"dampMaxWaitTime": "12000",
"dampSecondWaitTime": "5000",
"defaultIp": "224.0.0.184",
"delayUpTimer": null,
"tosExp": "7",
"tosExpStatic": "7"
}
}
updates:
description: commands sent to the device
returned: always
type: list
sample: [ "bfd" ]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import sys
import socket
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
CE_NC_GET_BFD = """
<filter type="subtree">
<bfd xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</bfd>
</filter>
"""
CE_NC_GET_BFD_GLB = """
<bfdSchGlobal>
<bfdEnable></bfdEnable>
<defaultIp></defaultIp>
<tosExp></tosExp>
<tosExpStatic></tosExpStatic>
<dampInitWaitTime></dampInitWaitTime>
<dampMaxWaitTime></dampMaxWaitTime>
<dampSecondWaitTime></dampSecondWaitTime>
<delayUpTimer></delayUpTimer>
</bfdSchGlobal>
"""
def check_default_ip(ipaddr):
"""check the default multicast IP address"""
# The value ranges from 224.0.0.107 to 224.0.0.250
if not check_ip_addr(ipaddr):
return False
if ipaddr.count(".") != 3:
return False
ips = ipaddr.split(".")
if ips[0] != "224" or ips[1] != "0" or ips[2] != "0":
return False
if not ips[3].isdigit() or int(ips[3]) < 107 or int(ips[3]) > 250:
return False
return True
class BfdGlobal(object):
"""Manages BFD Global"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# module input info
self.bfd_enable = self.module.params['bfd_enable']
self.default_ip = self.module.params['default_ip']
self.tos_exp_dynamic = self.module.params['tos_exp_dynamic']
self.tos_exp_static = self.module.params['tos_exp_static']
self.damp_init_wait_time = self.module.params['damp_init_wait_time']
self.damp_max_wait_time = self.module.params['damp_max_wait_time']
self.damp_second_wait_time = self.module.params['damp_second_wait_time']
self.delay_up_time = self.module.params['delay_up_time']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.bfd_dict = dict()
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def __init_module__(self):
"""init module"""
required_together = [('damp_init_wait_time', 'damp_max_wait_time', 'damp_second_wait_time')]
self.module = AnsibleModule(argument_spec=self.spec,
required_together=required_together,
supports_check_mode=True)
def get_bfd_dict(self):
"""bfd config dict"""
bfd_dict = dict()
bfd_dict["global"] = dict()
conf_str = CE_NC_GET_BFD % CE_NC_GET_BFD_GLB
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return bfd_dict
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
# get bfd global info
glb = root.find("data/bfd/bfdSchGlobal")
if glb:
for attr in glb:
bfd_dict["global"][attr.tag] = attr.text
return bfd_dict
def config_global(self):
"""configures bfd global params"""
xml_str = ""
damp_chg = False
# bfd_enable
if self.bfd_enable:
if bool(self.bfd_dict["global"].get("bfdEnable", "false") == "true") != bool(self.bfd_enable == "enable"):
if self.bfd_enable == "enable":
xml_str = "<bfdEnable>true</bfdEnable>"
self.updates_cmd.append("bfd")
else:
xml_str = "<bfdEnable>false</bfdEnable>"
self.updates_cmd.append("undo bfd")
# get bfd end state
bfd_state = "disable"
if self.bfd_enable:
bfd_state = self.bfd_enable
elif self.bfd_dict["global"].get("bfdEnable", "false") == "true":
bfd_state = "enable"
# default_ip
if self.default_ip:
if bfd_state == "enable":
if self.state == "present" and self.default_ip != self.bfd_dict["global"].get("defaultIp"):
xml_str += "<defaultIp>%s</defaultIp>" % self.default_ip
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("default-ip-address %s" % self.default_ip)
elif self.state == "absent" and self.default_ip == self.bfd_dict["global"].get("defaultIp"):
xml_str += "<defaultIp/>"
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("undo default-ip-address")
# tos_exp_dynamic
if self.tos_exp_dynamic is not None:
if bfd_state == "enable":
if self.state == "present" and self.tos_exp_dynamic != int(self.bfd_dict["global"].get("tosExp", "7")):
xml_str += "<tosExp>%s</tosExp>" % self.tos_exp_dynamic
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("tos-exp %s dynamic" % self.tos_exp_dynamic)
elif self.state == "absent" and self.tos_exp_dynamic == int(self.bfd_dict["global"].get("tosExp", "7")):
xml_str += "<tosExp/>"
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("undo tos-exp dynamic")
# tos_exp_static
if self.tos_exp_static is not None:
if bfd_state == "enable":
if self.state == "present" \
and self.tos_exp_static != int(self.bfd_dict["global"].get("tosExpStatic", "7")):
xml_str += "<tosExpStatic>%s</tosExpStatic>" % self.tos_exp_static
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("tos-exp %s static" % self.tos_exp_static)
elif self.state == "absent" \
and self.tos_exp_static == int(self.bfd_dict["global"].get("tosExpStatic", "7")):
xml_str += "<tosExpStatic/>"
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("undo tos-exp static")
# delay_up_time
if self.delay_up_time is not None:
if bfd_state == "enable":
delay_time = self.bfd_dict["global"].get("delayUpTimer", "0")
if not delay_time or not delay_time.isdigit():
delay_time = "0"
if self.state == "present" \
and self.delay_up_time != int(delay_time):
xml_str += "<delayUpTimer>%s</delayUpTimer>" % self.delay_up_time
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("delay-up %s" % self.delay_up_time)
elif self.state == "absent" \
and self.delay_up_time == int(delay_time):
xml_str += "<delayUpTimer/>"
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("undo delay-up")
# damp_init_wait_time damp_max_wait_time damp_second_wait_time
if self.damp_init_wait_time is not None and self.damp_second_wait_time is not None \
and self.damp_second_wait_time is not None:
if bfd_state == "enable":
if self.state == "present":
if self.damp_max_wait_time != int(self.bfd_dict["global"].get("dampMaxWaitTime", "2000")):
xml_str += "<dampMaxWaitTime>%s</dampMaxWaitTime>" % self.damp_max_wait_time
damp_chg = True
if self.damp_init_wait_time != int(self.bfd_dict["global"].get("dampInitWaitTime", "12000")):
xml_str += "<dampInitWaitTime>%s</dampInitWaitTime>" % self.damp_init_wait_time
damp_chg = True
if self.damp_second_wait_time != int(self.bfd_dict["global"].get("dampSecondWaitTime", "5000")):
xml_str += "<dampSecondWaitTime>%s</dampSecondWaitTime>" % self.damp_second_wait_time
damp_chg = True
if damp_chg:
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("dampening timer-interval maximum %s initial %s secondary %s" % (
self.damp_max_wait_time, self.damp_init_wait_time, self.damp_second_wait_time))
else:
damp_chg = True
if self.damp_max_wait_time != int(self.bfd_dict["global"].get("dampMaxWaitTime", "2000")):
damp_chg = False
if self.damp_init_wait_time != int(self.bfd_dict["global"].get("dampInitWaitTime", "12000")):
damp_chg = False
if self.damp_second_wait_time != int(self.bfd_dict["global"].get("dampSecondWaitTime", "5000")):
damp_chg = False
if damp_chg:
xml_str += "<dampMaxWaitTime/><dampInitWaitTime/><dampSecondWaitTime/>"
if "bfd" not in self.updates_cmd:
self.updates_cmd.append("bfd")
self.updates_cmd.append("undo dampening timer-interval maximum %s initial %s secondary %s" % (
self.damp_max_wait_time, self.damp_init_wait_time, self.damp_second_wait_time))
if xml_str:
return '<bfdSchGlobal operation="merge">' + xml_str + '</bfdSchGlobal>'
else:
return ""
def netconf_load_config(self, xml_str):
"""load bfd config by netconf"""
if not xml_str:
return
xml_cfg = """
<config>
<bfd xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</bfd>
</config>""" % xml_str
set_nc_config(self.module, xml_cfg)
self.changed = True
def check_params(self):
"""Check all input params"""
# check default_ip
if self.default_ip:
if not check_default_ip(self.default_ip):
self.module.fail_json(msg="Error: Default ip is invalid.")
# check tos_exp_dynamic
if self.tos_exp_dynamic is not None:
if self.tos_exp_dynamic < 0 or self.tos_exp_dynamic > 7:
self.module.fail_json(msg="Error: Session tos_exp_dynamic is not ranges from 0 to 7.")
# check tos_exp_static
if self.tos_exp_static is not None:
if self.tos_exp_static < 0 or self.tos_exp_static > 7:
self.module.fail_json(msg="Error: Session tos_exp_static is not ranges from 0 to 7.")
# check damp_init_wait_time
if self.damp_init_wait_time is not None:
if self.damp_init_wait_time < 1 or self.damp_init_wait_time > 3600000:
self.module.fail_json(msg="Error: Session damp_init_wait_time is not ranges from 1 to 3600000.")
# check damp_max_wait_time
if self.damp_max_wait_time is not None:
if self.damp_max_wait_time < 1 or self.damp_max_wait_time > 3600000:
self.module.fail_json(msg="Error: Session damp_max_wait_time is not ranges from 1 to 3600000.")
# check damp_second_wait_time
if self.damp_second_wait_time is not None:
if self.damp_second_wait_time < 1 or self.damp_second_wait_time > 3600000:
self.module.fail_json(msg="Error: Session damp_second_wait_time is not ranges from 1 to 3600000.")
# check delay_up_time
if self.delay_up_time is not None:
if self.delay_up_time < 1 or self.delay_up_time > 600:
self.module.fail_json(msg="Error: Session delay_up_time is not ranges from 1 to 600.")
def get_proposed(self):
"""get proposed info"""
self.proposed["bfd_enalbe"] = self.bfd_enable
self.proposed["default_ip"] = self.default_ip
self.proposed["tos_exp_dynamic"] = self.tos_exp_dynamic
self.proposed["tos_exp_static"] = self.tos_exp_static
self.proposed["damp_init_wait_time"] = self.damp_init_wait_time
self.proposed["damp_max_wait_time"] = self.damp_max_wait_time
self.proposed["damp_second_wait_time"] = self.damp_second_wait_time
self.proposed["delay_up_time"] = self.delay_up_time
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.bfd_dict:
return
self.existing["global"] = self.bfd_dict.get("global")
def get_end_state(self):
"""get end state info"""
bfd_dict = self.get_bfd_dict()
if not bfd_dict:
return
self.end_state["global"] = bfd_dict.get("global")
def work(self):
"""worker"""
self.check_params()
self.bfd_dict = self.get_bfd_dict()
self.get_existing()
self.get_proposed()
# deal present or absent
xml_str = self.config_global()
# update to device
if xml_str:
self.netconf_load_config(xml_str)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
bfd_enable=dict(required=False, type='str', choices=['enable', 'disable']),
default_ip=dict(required=False, type='str'),
tos_exp_dynamic=dict(required=False, type='int'),
tos_exp_static=dict(required=False, type='int'),
damp_init_wait_time=dict(required=False, type='int'),
damp_max_wait_time=dict(required=False, type='int'),
damp_second_wait_time=dict(required=False, type='int'),
delay_up_time=dict(required=False, type='int'),
state=dict(required=False, default='present', choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = BfdGlobal(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
{
"content_hash": "d818c49188fc39cabae033d5c5a0e4ba",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 120,
"avg_line_length": 38.578154425612055,
"alnum_prop": 0.5579204295826214,
"repo_name": "SergeyCherepanov/ansible",
"id": "c1416013d8e3f7c80959a501f59486238dedc025",
"size": "21160",
"binary": false,
"copies": "27",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/network/cloudengine/ce_bfd_global.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
"""File to convert the output of PATH to a list."""
from optparse import OptionParser
import os
import sys
def path2list(thePath):
"""Converts a path listing to a whitespace separated list."""
theItems = thePath.split( os.pathsep )
theResult = '\n'.join( theItems )
return theResult
if __name__ == '__main__':
usage = "Usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option('-p', '--path', dest='path', default='PATH',
help="Path to list (default = 'PATH' '-' for stdin)")
(options, args) = parser.parse_args()
if (len(args) > 0):
parser.error('No arguments expected: {0}'.format(args))
thePath = (os.environ[ options.path.upper() ] if
options.path != '-' else
sys.stdin.read())
theListing = path2list( thePath )
print(theListing)
|
{
"content_hash": "31aea136c81c4043c4109e8f087b0e60",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 28.15625,
"alnum_prop": 0.5749167591564928,
"repo_name": "mrwizard82d1/pyutils",
"id": "e4134de92015354e4a27a0dd06c5bf3adcf4c8df",
"size": "912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "path2list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "61490"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
}
|
import logging
import datetime as dt
import wukong.errors as solr_errors
from wukong.request import SolrRequest
from wukong.zookeeper import Zookeeper
import json
logger = logging.getLogger(__name__)
def _add_scheme_if_not_there(url, scheme='http'):
if not (
url.startswith('http://')
or url.startswith('https://')
):
url = '{}://{}'.format(scheme, url)
return url
def _format_solr_url(url):
url = _add_scheme_if_not_there(url)
if not url.endswith('/solr') and not url.endswith('/solr/'):
url = '{}/solr/'.format(url)
if not url.endswith('/'):
url = '{}/'.format(url)
return url
class SolrAPI(object):
def __init__(self, solr_hosts, solr_collection,
zookeeper_hosts=None, timeout=15, zookeeper_timeout=5):
"""
Do all the interactions with SOLR server
(e.g. update, select, get and delete)
:param solr_hosts: the hosts for SOLR.
:type server: str
:param solr_collection: the name of the collection in SOLR.
:type solr_collection: str
:param zookeeper_hosts: the hosts for zookeeper.
:type zookeeper_hosts: str
:param timeout: the timeout for request to SOLR.
:type timeout: int
"""
if solr_hosts is None and zookeeper_hosts is not None:
logger.info(
'Getting solr hosts from zookeeper for collection %s',
solr_collection
)
zk = Zookeeper(zookeeper_hosts, zookeeper_timeout)
solr_hosts = zk.get_active_hosts(collection_name=solr_collection)
if solr_hosts is None or solr_collection is None:
logger.error('Neither solr_hosts nor solr_collection has been set')
raise solr_errors.SolrError(
"Either solr_hosts or solr_collection can not be None"
)
if not isinstance(solr_hosts, list):
solr_hosts = solr_hosts.split(",")
if zookeeper_hosts is not None:
hostnames, sep, chroot = zookeeper_hosts.rpartition('/')
# If hostnames is empty then there is no chroot. Set it to empty.
if not hostnames:
chroot = ''
else:
chroot = '/%s' % chroot
logger.debug('Using solr via zookeeper at chroot %s', chroot)
self.zookeeper_hosts = [
"http://%s%s" % (host, chroot,)
for host in zookeeper_hosts.split(",")
]
logger.info(
'Connected to zookeeper hosts at %s',
self.zookeeper_hosts
)
else:
logger.debug('Not using zookeeper for SolrCloud')
self.zookeeper_hosts = None
logger.info('Connected to solr hosts %s', solr_hosts)
self.solr_hosts = [_format_solr_url(host) for host in solr_hosts]
self.solr_collection = solr_collection
self.client = SolrRequest(
solr_hosts=self.solr_hosts,
zookeeper_hosts=zookeeper_hosts,
timeout=timeout
)
def _get_collection_url(self, path):
return "%s/%s" % (self.solr_collection, path)
def is_alive(self):
"""
Check if current collection is live from zookeeper.
:return: weather or not if the collection is live
:rtype: boolean
"""
params = {'detail': 'true', 'path': '/clusterstate.json'}
try:
response = self.client.get('zookeeper', params)
except solr_errors.SolrError:
logger.exception('Failed to check zookeeper')
return False
else:
try:
data = json.loads(response['znode']['data'])
except ValueError:
return False
for name, collection in data.items():
shards = collection['shards']
for shard, shard_info in shards.items():
replicas = shard_info['replicas']
for replica, info in replicas.items():
state = info['state']
if name == self.solr_collection and state != 'active':
return False
return True
def update(self, docs, commit=False):
"""
Add new docs or updating existing docs.
:param docs: a list of instances of SolrDoc.
:type server: list
:param commit: whether or not we should commit the documents.
:type server: boolean
"""
if not docs:
return
data = json.dumps(
docs,
default=lambda obj: obj.isoformat() if isinstance(
obj, dt.datetime) else None
)
params = {}
if commit:
params['commit'] = 'true'
return self.client.post(
self._get_collection_url('update/json'),
params=params,
body=data
)
def select(self,
query_dict,
groups=False,
facets=False,
stats=False,
**kwargs
):
"""
Query documents from SOLR.
:param query_dict: a dict containing the query params to SOLR
:type query_dict: dict
:param metadata: whether or not solr metadata should be returned
:type metadata: boolean
:param kwargs: a dict of additional params for SOLR
:type kwargs: dict
:return: reformatted response from SOLR
:rtype: dict
"""
if kwargs:
query_dict.update(kwargs)
response = self.client.post(
self._get_collection_url('select'),
body=json.dumps({'params': query_dict})
)
data = {}
if groups and 'grouped' in response:
data['groups'] = response['grouped']
if facets and 'facet_counts' in response:
data['facets'] = response['facet_counts']
if stats and 'stats' in response:
data['stats'] = response['stats']
if 'response' in response and 'docs' in response['response']:
response_data = response['response']
data['docs'] = response_data['docs']
data['total'] = response_data.get('numFound', len(data['docs']))
return data
def delete(self, unique_key, unique_key_value, commit=False):
"""
Deleting a document from SOLR.
:param unique_key: the unique key for the doc to delete
:param unique_key_value: the value for the unique_key
:param commit: whether or not we should commit the documents.
:type server: boolean
"""
params = {}
if commit:
params['commit'] = 'true'
data = json.dumps({"delete": {"query": "%s:%s" %
(unique_key, unique_key_value)}})
return self.client.post(
self._get_collection_url('update/json'),
params=params,
body=data
)
def commit(self):
"""
Hard commit documents to SOLR.
"""
params = {'commit': 'true'}
return self.client.post(
self._get_collection_url('update/json'), params=params)
def get_schema(self):
"""
Get the SOLR schema for the solr collection.
:return: the schema for the current collection
:rtype: dict
"""
response = self.client.get(self._get_collection_url('schema'))
return response.get('schema', {})
def add_schema_fields(self, fields):
"""
Add new fields to the schema of current collection
:param fields: a list of dicts of fields.
:type fields: list
"""
if not fields:
return
data = json.dumps(fields)
try:
return self.client.post(
self._get_collection_url('schema/fields'),
body=data
)
except solr_errors.SolrError as e:
raise solr_errors.SolrSchemaUpdateError(fields, message=e.args[0])
|
{
"content_hash": "5f0fed1a12f0c72eaf885a1beb3d996b",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 79,
"avg_line_length": 29.15,
"alnum_prop": 0.5428816466552315,
"repo_name": "SurveyMonkey/wukong",
"id": "f5352362409c5f673ca76b36a03f6228c109d2bd",
"size": "8162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wukong/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153988"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import json
from dusty import constants
from dusty.systems.docker.config import registry_from_image, get_authed_registries
from ....testcases import DustyTestCase
class TestDockerConfigSystem(DustyTestCase):
def setUp(self):
super(TestDockerConfigSystem, self).setUp()
self.temp_docker_config_path = tempfile.mkstemp()[1]
self.old_docker_config_path = constants.DOCKER_CONFIG_PATH
constants.DOCKER_CONFIG_PATH = self.temp_docker_config_path
def tearDown(self):
super(TestDockerConfigSystem, self).tearDown()
constants.DOCKER_CONFIG_PATH = self.old_docker_config_path
if os.path.exists(self.temp_docker_config_path):
os.remove(self.temp_docker_config_path)
def _write_config(self, config):
json.dump(config, open(self.temp_docker_config_path, 'w'))
def test_authed_registries_from_empty_config(self):
os.remove(self.temp_docker_config_path)
self.assertEqual(get_authed_registries(), set())
def test_authed_registries_with_no_auth_key(self):
self._write_config({'some_stuff': 'not auth'})
self.assertEqual(get_authed_registries(), set())
def test_authed_registries_with_https_auth(self):
self._write_config({'auths': {'https://index.docker.io/v1/': {'stuff': 'irrelevant'}}})
self.assertEqual(get_authed_registries(), set(['index.docker.io']))
def test_authed_registries_with_multiple_styles(self):
self._write_config({'auths': {'https://index.docker.io/v1/': {'stuff': 'irrelevant'},
'gamechanger.io': {'stuff': 'irrelevant'}}})
self.assertEqual(get_authed_registries(), set(['index.docker.io', 'gamechanger.io']))
def test_registry_from_image_official(self):
self.assertEqual(registry_from_image('postgres:9.3'),
'index.docker.io')
def test_registry_from_image_public(self):
self.assertEqual(registry_from_image('library/postgres:9.3'),
'index.docker.io')
self.assertEqual(registry_from_image('thieman/clojure'),
'index.docker.io')
def test_registry_from_image_private(self):
self.assertEqual(registry_from_image('gamechanger.io/clojure:1.6'),
'gamechanger.io')
self.assertEqual(registry_from_image('a.b.c.com/clojure:1.6'),
'a.b.c.com')
|
{
"content_hash": "bcf90bb8b513d661d2227b801e99d68a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 95,
"avg_line_length": 43.892857142857146,
"alnum_prop": 0.6419853539462979,
"repo_name": "gamechanger/dusty",
"id": "e831326ac32afe70ed30e59b0fdecd03a3a72aeb",
"size": "2458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/systems/docker/config_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "845"
},
{
"name": "JavaScript",
"bytes": "1675"
},
{
"name": "Python",
"bytes": "493669"
},
{
"name": "Ruby",
"bytes": "769"
},
{
"name": "Shell",
"bytes": "3875"
}
],
"symlink_target": ""
}
|
from numpy.testing import TestCase
from parameterized import parameterized
from .. import palindrome as pl
class ShortPalindromesTest(TestCase):
@parameterized.expand(
[("RACE", "ECARACE"), ("TOPCODER", "REDTOCPCOTDER"), ("Q", "Q"),
("MADAMIMADAM", "MADAMIMADAM"),
("ALRCAGOEUAOEURGCOEUOOIGFA",
"AFLRCAGIOEOUAEOCEGRURGECOEAUOEOIGACRLFA")
]
)
def test_shortest(self, base, expected):
self.assertEqual(pl.shortest_palindromes(base), expected)
@parameterized.expand(
[('ECA', '', 'ECA'),
("BCX", "DCYX", "BDCYX"),
('AGGTAB', 'GXTXAYB', 'AGGXTXAYB'),
# ('ALRCAGOEUAOE', 'AFGIOOUEOCGR', 'AFLRCAGIOEOUAEOCEGR'),
]
)
def test_shortest_common_super_sequence(self, a, b, expected):
self.assertEqual(pl.shortest_common_super_sequence(a, b), expected)
@parameterized.expand(
[("ABCDGH", "AEDFHR", "ADH"), ("AGGTAB", "GXTXAYB", "GTAB"),
('', '', ''), ('abc', '', '')]
)
def test_longest_common_sub_sequence(self, a, b, expected):
self.assertEqual(pl.longest_common_sub_sequence(a, b), expected)
@parameterized.expand(
[("ADH", False), ("AFDFDFA", True), ("ARRA", False), ("Q", True)]
)
def test_is_palindrome(self, s, expected):
self.assertEqual(pl.is_palindrome(s), expected)
|
{
"content_hash": "9da30dda156110414a5fd346a87a7dad",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 34.15,
"alnum_prop": 0.5980966325036603,
"repo_name": "aliciawyy/dmining",
"id": "b4a369f9523b796121310ec164fa4b9c74678335",
"size": "1366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puzzle/tests/test_palindrome.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "74603"
}
],
"symlink_target": ""
}
|
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
import typing
from typing import Any, Callable, Iterable, List, Optional, Text, Tuple, Union
from absl import logging
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tf2xla.python import xla as tf2xla
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf.tpu import dynamic_padding_pb2 as dynamic_padding
from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2 as embedding_pb2
from tensorflow.python.compiler.xla import xla
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import tpu_name_util
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.types import core as core_types
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("TPUReplicatedInput")
# Operations that indicate some error in the users graph, e.g. a placeholder
# that's introduced outside of the infeed.
_DENYLISTED_OPS = set([
"Placeholder",
])
# XLA doesn't currently support reading of intermediate tensors, thus some ops
# are not supported.
_UNSUPPORTED_OPS = set([
"AudioSummary",
"AudioSummaryV2",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
"TensorSummaryV2",
])
# Ops which can be safely pruned from XLA compile if they have no consumers.
# These ops should also have no inputs.
_UNCONNECTED_OPS_TO_PRUNE = set(["Placeholder", "VarHandleOp"])
_MAX_WARNING_LINES = 5
_TPU_REPLICATE_ATTR = "_tpu_replicate"
_POST_DEVICE_REWRITE_ATTR = "_post_device_rewrite"
_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status"
_OUTSIDE_COMPILATION_ATTR = "_xla_outside_compilation"
_PIVOT_FOR_CLUSTER = "_pivot_for_cluster"
core = tpu_name_util.core
def _tpu_system_device_name(job: Optional[Text]) -> Text:
"""Returns the device name for the TPU_SYSTEM device of `job`."""
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
@tf_export(v1=["tpu.initialize_system"])
def initialize_system(
embedding_config: Optional[embedding_pb2.TPUEmbeddingConfiguration] = None,
job: Optional[Text] = None,
compilation_failure_closes_chips: bool = True
) -> core_types.Tensor:
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, a `TPUEmbeddingConfiguration` proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be initialized. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
compilation_failure_closes_chips: Set the configuration whether
we want to close TPU chips when there is a compilation failure.
Returns:
A serialized `TopologyProto` that describes the TPU system. Note:
the topology must be evaluated using `Session.run` before it can be used.
"""
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(_tpu_system_device_name(job)):
topology = tpu_ops.configure_distributed_tpu(
compilation_failure_closes_chips=compilation_failure_closes_chips)
if embedding_config is None:
return topology
# This set of control dependencies is needed as this function is expected to
# return an op which will return the topology when executed, but we need to
# call the embedding initialization op between initializing the TPU and
# returning the topology.
with ops.control_dependencies([topology]):
embedding_init = tpu_ops.configure_tpu_embedding(config=config_string)
with ops.control_dependencies([embedding_init]):
return array_ops.identity(topology, name="tpu_init_identity")
def initialize_system_for_tpu_embedding(
embedding_config: embedding_pb2.TPUEmbeddingConfiguration,
job: Optional[Text] = None,
) -> ops.Operation:
"""Initializes a distributed TPU Embedding system for use with TensorFlow.
The following two are equivalent:
1. initialize_system() with embedding_config.
2. initialize_system() without embedding_config, then
initialize_system_for_tpu_embedding().
initialize_system() should not be called with embedding_config if
initialize_system_for_tpu_embedding() is meant to be called later.
Args:
embedding_config: a `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables.
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be initialized. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
Returns:
A no-op.
"""
config_string = embedding_config.SerializeToString()
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_tpu_embedding(config=config_string)
@tf_export(v1=["tpu.shutdown_system"])
def shutdown_system(job: Optional[Text] = None) -> ops.Operation:
"""Shuts down a running a distributed TPU system.
Args:
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be shutdown. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
"""
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
def _enclosing_tpu_context_and_graph() -> Tuple[Any, Any]:
"""Returns the TPUReplicateContext and its associated graph."""
graph = ops.get_default_graph()
while graph is not None:
# pylint: disable=protected-access
context_ = graph._get_control_flow_context()
# pylint: enable=protected-access
while context_ is not None:
if isinstance(context_, TPUReplicateContext):
return context_, graph
context_ = context_.outer_context
graph = getattr(graph, "outer_graph", None)
raise ValueError("get_replicated_var_handle() called without "
"TPUReplicateContext. This shouldn't happen. Please file "
"a bug.")
def is_tpu_strategy(strategy: Any) -> bool:
is_tpu_strat = lambda k: k.__name__.startswith("TPUStrategy")
clz = strategy.__class__
return is_tpu_strat(clz) or any(map(is_tpu_strat, clz.__bases__))
def _enclosing_tpu_device_assignment(
) -> Optional[device_assignment_lib.DeviceAssignment]:
if not distribution_strategy_context.has_strategy():
return None
strategy = distribution_strategy_context.get_strategy()
if not is_tpu_strategy(strategy):
return None
return strategy.extended._device_assignment # pylint: disable=protected-access
@auto_control_deps.register_acd_resource_resolver
def tpu_replicated_input_resolver(
op: ops.Operation,
resource_reads: object_identity.ObjectIdentitySet,
resource_writes: object_identity.ObjectIdentitySet) -> bool:
"""Replaces TPUReplicatedInput outputs with its inputs in resource_inputs."""
# Ignore TPUReplicatedInput for ACD purposes since we will be directly adding
# control deps on the replicated inputs.
if op.type == "TPUReplicatedInput":
if resource_reads or resource_writes:
resource_reads.clear()
resource_writes.clear()
return True
else:
return False
# Replace tensors in `resource_inputs` which are outputs of TPUReplicatedInput
# with the actual replicated inputs. This allows ACD to correct add control
# deps when there are multiple calls to `run` in a
# `tf.function`.
def replace_with_unreplicated_resources(resource_inputs):
"""Replaces handles in `resource_inputs` with their unreplicated inputs."""
to_remove = []
to_add = []
for resource in resource_inputs:
if resource.op.type == "TPUReplicatedInput":
to_remove.append(resource)
to_add.extend(resource.op.inputs)
for t in to_remove:
resource_inputs.discard(t)
resource_inputs.update(to_add)
return to_add or to_remove
return bool(replace_with_unreplicated_resources(resource_reads) or
replace_with_unreplicated_resources(resource_writes))
class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU computation.
The primary role of `TPUReplicateContext` is to mark operators inside a
tpu.replicate() computation with the attribute "_tpu_replicate=XYZ", where XYZ
is a unique name.
We use a `ControlFlowContext` to perform the annotation since it integrates
with Tensorflow constructs like ResourceVariables. For example, if a
`ResourceVariable` is constructed inside a tpu.replicate() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the replicated computation.
"""
def __init__(self, name: Text, num_replicas: int, pivot: ops.Operation):
"""Builds a new TPUReplicateContext.
Args:
name: a unique name for the context, used to populate the `_tpu_replicate`
attribute.
num_replicas: an integer that gives the number of replicas for the
computation.
pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
"""
super(TPUReplicateContext, self).__init__()
self._num_replicas = num_replicas
self._outer_device_function_stack = None
self._oc_dev_fn_stack = None
self._outside_compilation_cluster = None
self._outside_compilation_v2_context = None
self._outside_compilation_counter = 0
self._in_gradient_colocation = None
self._gradient_colocation_stack = []
self._host_compute_core = []
self._name = name
self._name_as_bytes = compat.as_bytes(name)
self._tpu_relicate_attr_buf = c_api_util.ScopedTFBuffer(
attr_value_pb2.AttrValue(s=self._name_as_bytes).SerializeToString())
self._unsupported_ops = []
self._pivot = pivot
self._replicated_vars = {}
def get_replicated_var_handle(self,
name: Text,
vars_: Union[List[core_types.Tensor],
List[variables.Variable]],
is_mirrored: bool = False,
is_packed: bool = False) -> core_types.Tensor:
"""Returns a variable handle for replicated TPU variable 'var'.
This is a method used by an experimental replicated variable implementation
and is not intended as a public API.
Args:
name: The common name of the variable.
vars_: The replicated TPU variables or handles.
is_mirrored: Whether the variables are mirrored, which guarantees the
values in each replica are always the same.
is_packed: Whether the replicated variables are packed into one variable.
Returns:
The handle of the TPU replicated input node.
"""
device_assignment = _enclosing_tpu_device_assignment()
# We don't need to put device assignment as part of the replicated_vars key
# because each TPUReplicateContext will only have one device assignment.
handle = self._replicated_vars.get(name)
if handle is not None:
return handle
if device_assignment is not None and not is_packed:
# Find a variable copy for each replica in the device assignment.
# Note that the order of devices for replicas for the variable and the
# device assignment might not match.
job_name = pydev.DeviceSpec.from_string(vars_[0].device).job
devices_to_vars = {device_util.canonicalize(v.device): v for v in vars_}
replicated_vars = []
for replica_id in range(device_assignment.num_replicas):
for logical_core in range(device_assignment.num_cores_per_replica):
device = device_util.canonicalize(
device_assignment.tpu_device(
replica=replica_id, logical_core=logical_core, job=job_name))
if device in devices_to_vars:
replicated_vars.append(devices_to_vars[device])
break
else:
raise ValueError(
"Failed to find a variable on any device in replica {} for "
"current device assignment".format(replica_id))
else:
replicated_vars = vars_
# Builds a TPUReplicatedInput node for the variable, if one does not already
# exist. The TPUReplicatedInput node must belong to the enclosing
# control-flow scope of the TPUReplicateContext.
# TODO(phawkins): consider changing the contract of the TPU encapsulation
# so the TPUReplicatedInput nodes go inside the TPUReplicateContext scope
# instead.
_, graph = _enclosing_tpu_context_and_graph()
with graph.as_default():
# pylint: disable=protected-access
saved_context = graph._get_control_flow_context()
graph._set_control_flow_context(self.outer_context)
# If replicated_vars are variables, get the handles. Note that this can be
# done inside TPUReplicateContext because replicated_vars.handle may
# create new ops.
if isinstance(replicated_vars[0], variables.Variable):
replicated_vars = [v.handle for v in replicated_vars]
handle = tpu_ops.tpu_replicated_input(replicated_vars,
name=name + "/handle",
is_mirrored_variable=is_mirrored,
is_packed=is_packed)
graph._set_control_flow_context(saved_context)
# pylint: enable=protected-access
self._replicated_vars[name] = handle
return handle
def report_unsupported_operations(self) -> None:
if self._unsupported_ops:
op_str = "\n".join(" %s (%s)" % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES])
logging.warning("%d unsupported operations found: \n%s",
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning("... and %d more" %
(len(self._unsupported_ops) - _MAX_WARNING_LINES))
def EnterGradientColocation(self, op: ops.Operation, gradient_uid: Text):
if op is not None:
if ops.get_default_graph()._control_flow_context is None: # pylint: disable=protected-access
# If we are in TF 2 functions (control flow V2 functions, or
# tf.function()), we need to attach _xla_outside_compilation attribute
# directly because we are not in TPUReplicateContext.
try:
outside_attr = op.get_attr(_OUTSIDE_COMPILATION_ATTR).decode("ascii")
except ValueError:
# The attr was not present: do nothing.
return
parts = outside_attr.split(".")
cluster = parts[0] + "." + gradient_uid
self._outside_compilation_v2_context = OutsideCompilationV2Context(
cluster)
self._outside_compilation_v2_context.Enter()
return
self._gradient_colocation_stack.append(op)
if not self._outside_compilation_cluster:
try:
outside_attr = op.get_attr(_OUTSIDE_COMPILATION_ATTR).decode("ascii")
if self._in_gradient_colocation:
raise NotImplementedError(
"Cannot nest gradient colocation operations outside compilation"
)
if gradient_uid == "__unsupported__":
raise NotImplementedError(
"No gradient_uid calling gradient within outside_compilation")
# When we take the gradient of an op X in an outside_compilation
# cluster C in a forward computation we would like to put the ops
# corresponding to the gradient of X into a new outside_compilation
# cluster C'. However, if we take the gradient of X twice, the second
# one should get yet another new outside_compilation cluster C''.
#
# The mechanism we adopt is to use a 'root_cluster' which is the
# cluster that X was in before we took gradients, and a 'gradient_uid'
# which is different for every invocation of gradients, and put the
# gradient of X in cluster 'root_cluster.gradient_uid'.
#
# When taking a gradient of a gradient, some ops will be colocated
# with Op in the forward pass (e.g., cluster root_cluster) and some in
# the backward pass (e.g., cluster root_cluster.initial_gradient_uid).
# We need all of the grad-of-grad ops to be in the same cluster to
# avoid cyclic dependencies between clusters. We adopt a heuristic
# that puts any op clustered with root_cluster.<xxx> in
# root_cluster.gradient_uid, even if xxx was initial_gradient_uid.
self._in_gradient_colocation = op
parts = outside_attr.split(".")
cluster = parts[0] + "." + gradient_uid
self._EnterOutsideCompilationScope(cluster=cluster)
except ValueError:
# The attr was not present: do nothing.
pass
def ExitGradientColocation(self, op: ops.Operation, gradient_uid: Text):
if op is not None:
if ops.get_default_graph()._control_flow_context is None: # pylint: disable=protected-access
# Inside a TF2 tf.function or control flow graph and `op` was not
# marked to be outside compiled.
assert self._outside_compilation_v2_context is None
return
if self._outside_compilation_v2_context is not None:
# Inside a TF2 tf.function or control flow graph and `op` was
# marked to be outside compiled.
self._outside_compilation_v2_context.Exit()
self._outside_compilation_v2_context = None
return
if not self._gradient_colocation_stack:
raise errors.InternalError(
op.node_def, op,
"Badly nested gradient colocation: empty stack when popping Op " +
op.name)
last_op = self._gradient_colocation_stack.pop()
if op is last_op:
if op is self._in_gradient_colocation:
self._in_gradient_colocation = None
self._ExitOutsideCompilationScope()
else:
raise errors.InternalError(
op.node_def, op, "Badly nested gradient colocation, expected " +
last_op + ", got " + op.name)
def _EnterOutsideCompilationScope(self, cluster: Optional[Text] = None):
class FakeOp(object):
"""A helper class to determine the current device.
Supports only the type and device set/get methods needed to run the
graph's _apply_device_function method.
"""
def __init__(self):
self._device = ""
@property
def type(self):
return "FakeOp"
@property
def device(self):
return self._device
def _set_device(self, device):
if isinstance(device, pydev.DeviceSpec):
self._device = device.to_string()
else:
self._device = device
def _set_device_from_string(self, device_str):
self._device = device_str
if self._outside_compilation_cluster:
raise NotImplementedError("Cannot nest outside_compilation clusters")
if cluster:
self._outside_compilation_cluster = cluster
else:
self._outside_compilation_cluster = str(self._outside_compilation_counter)
self._outside_compilation_counter += 1
graph = ops.get_default_graph()
fake_op = FakeOp()
graph._apply_device_functions(fake_op) # pylint: disable=protected-access
device = pydev.DeviceSpec.from_string(fake_op.device)
if (device.device_type == "TPU_REPLICATED_CORE" and
device.device_index is not None):
self._host_compute_core.append(self._outside_compilation_cluster + ":" +
str(device.device_index))
self._oc_dev_fn_stack = graph._device_function_stack # pylint: disable=protected-access
graph._device_function_stack = self._outer_device_function_stack # pylint: disable=protected-access
def _ExitOutsideCompilationScope(self):
if not self._outside_compilation_cluster:
raise NotImplementedError(
"Attempted to exit outside_compilation scope when not in scope")
self._outside_compilation_cluster = None
graph = ops.get_default_graph()
graph._device_function_stack = self._oc_dev_fn_stack # pylint: disable=protected-access
def Enter(self) -> None:
if not self._outer_device_function_stack:
# Capture the device function stack at the time of first entry
# since that is the stack that will be used outside_compilation.
graph = ops.get_default_graph()
# pylint: disable=protected-access
self._outer_device_function_stack = graph._device_function_stack.copy()
# pylint: enable=protected-access
super(TPUReplicateContext, self).Enter()
def HostComputeCore(self) -> List[Text]:
return self._host_compute_core
def _RemoveExternalControlEdges(
self, op: ops.Operation
) -> Tuple[List[ops.Operation], List[ops.Operation]]:
"""Remove any external control dependency on this op."""
internal_control_inputs = []
external_control_inputs = []
for x in op.control_inputs:
# pylint: disable=protected-access
is_internal_op = False
ctxt = x._get_control_flow_context()
while ctxt is not None:
if ctxt == self:
is_internal_op = True
break
ctxt = ctxt._outer_context
if is_internal_op:
internal_control_inputs.append(x)
else:
external_control_inputs.append(x)
# pylint: enable=protected-access
# pylint: disable=protected-access
op._remove_all_control_inputs()
op._add_control_inputs(internal_control_inputs)
# pylint: enable=protected-access
return internal_control_inputs, external_control_inputs
def AddOp(self, op: ops.Operation) -> None:
# pylint: disable=protected-access
if op.type in _DENYLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. ",
op.type, op.name)
if op.type in _UNSUPPORTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
# TensorFlowOpLayer may clone nodes that are in tpu.rewrite()s. It'll add
# the "_cloned" attribute and we should continue in that case.
if (_TPU_REPLICATE_ATTR in op.node_def.attr and
"_cloned" not in op.node_def.attr):
raise ValueError("TPU computations cannot be nested on op (%s)" %
op)
op._set_attr_with_buf(_TPU_REPLICATE_ATTR,
self._tpu_relicate_attr_buf.buffer)
if self._outside_compilation_cluster:
op._set_attr(
_OUTSIDE_COMPILATION_ATTR,
attr_value_pb2.AttrValue(
s=compat.as_bytes(self._outside_compilation_cluster)))
if self._num_replicas > 1 or not self._outside_compilation_cluster:
# Prevent feeding or fetching anything that is being compiled,
# and any replicated outside_compilation Op.
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
# Remove any control edges from outer control flow contexts. These may cause
# mismatched frame errors.
(internal_control_inputs,
external_control_inputs) = self._RemoveExternalControlEdges(op)
if not op.inputs:
# Add a control edge from the control pivot to this op.
if not internal_control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot())
# pylint: enable=protected-access
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x is not x:
op._update_input(index, real_x) # pylint: disable=protected-access
if external_control_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(phawkins): fix that.
with ops.control_dependencies(None):
self.Enter()
external_control_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_control_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_control_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
# pylint: disable=protected-access
context._values.update(output_names)
context = context._outer_context
# pylint: enable=protected-access
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val: core_types.Tensor) -> core_types.Tensor:
"""Add `val` to the current context and its outer context recursively."""
if not self._outer_context:
return val
if val.name in self._values:
# Use the real value if it comes from outer context.
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op: ops.Operation):
self.AddOp(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the TPUReplicateContext to
# be None as the TPUReplicateContext does not get nested nor does the
# grad_state outside the TPUReplicateContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
@property
def back_prop(self):
"""Forwards to the enclosing while context, if any."""
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def GetControlPivot(self) -> ops.Operation:
return self._pivot
def RequiresUniqueFunctionRetracing(self):
# More context: b/158152827. TPU stack uses the TPUReplicateContext to
# create replicated variable handles and cluster TPU computations, thus we
# always retrace a tf.function when the wrapped TPUReplicateContext changes.
return True
class OutsideCompilationV2Context(control_flow_ops.ControlFlowContext):
"""The context for outside compilation in Tensorflow 2.0.
Every op added in this context will be assigned an _xla_outside_compilation
attribute.
"""
def __init__(self, name: Text):
control_flow_ops.ControlFlowContext.__init__(self)
self._name = name
def AddOp(self, op: ops.Operation) -> None:
if self._outer_context:
self._outer_context.AddOp(op)
# pylint: disable=protected-access
op._set_attr("_xla_outside_compilation",
attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)))
# pylint: enable=protected-access
def AddInnerOp(self, op: ops.Operation) -> None:
if self._outer_context:
self._outer_context.AddInnerOp(op)
# pylint: disable=protected-access
op._set_attr("_xla_outside_compilation",
attr_value_pb2.AttrValue(s=compat.as_bytes(self._name)))
# pylint: enable=protected-access
def to_control_flow_context_def(self, context_def, export_scope=None):
raise NotImplementedError("to_control_flow_context_def not implemented")
@tf_export(v1=["tpu.outside_compilation"])
def outside_compilation(
computation: Callable[..., Any], *args, **kwargs
) -> Any:
"""Builds part of a computation outside any current TPU replicate scope.
`tf.tpu.outside_compilation()` is used to run ops in `computation` on CPU
instead of running on TPU. For example, users can run ops that are not
supported on TPU's (e.g. tf.summary.write()) by explicitly placing those
ops on CPU's. Below usage of outside compilation will place ops in
`computation_with_string_ops` on CPU.
Example usage:
```python
def computation_with_string_ops(x):
# strings types are not supported on TPU's and below ops must
# run on CPU instead.
output = tf.strings.format('1{}', x)
return tf.strings.to_number(output)
def tpu_computation():
# Expected output is 11.
output = tf.tpu.outside_compilation(computation_with_string_ops, 1)
```
Outside compilation should be called inside TPUReplicateContext. That is,
`tf.tpu.outside_compilation()` should be called inside a function that is
passed to `tpu.split_compile_and_replicate()` -- this is implied when
outside compilation is invoked inside a function passed to TPUStrategy
`run()`. If invoked outside of TPUReplicateContext,
then this simply returns the result of `computation`, and therefore,
would be a no-op. Note that outside compilation is different from
`tf.distribute.experimental.TPUStrategy.merge_call()` as logic in
outside compilation is replicated and executed separately for each
replica. On the other hand, `merge_call()` requires a `merge_fn`
to aggregate the inputs from different replicas and is executed only
once.
For variables placed in TPU device, which includes variables created inside
TPUStrategy scope, outside compilation logic must not include variable
read/write. For variables placed on host, which is the case when variables
created via TPUEstimator, variable read/write is only allowed if the variable
is not accessed by any other ops in the TPU computation. Variable read/write
from outside compilation cluster is not visible from TPU computation and
vice versa. Therefore, if outside compilation logic contains such host
variables read/write ops and if the variables are accessed by TPU
computation as well, then this may lead to deadlock.
Internally, `tf.tpu.outside_compilation()` adds outside compilation
attributes to all ops in `computation`. During later graph pass, these
ops with outside compilation attribute is extracted out and replicated
into a host-side graph. Inputs to this extract host-side graph is sent
from TPU computation graph to host graph via a pair of XlaSendToHost and
XlaRecvFromHost ops. Note that using `tf.tpu.outside_compilation()`
may result in tensor transfer between TPU and CPU, leading to non-trivial
performance impact.
Args:
computation: A Python function that builds the computation to
place on the host.
*args: the positional arguments for the computation.
**kwargs: the keyword arguments for the computation.
Returns:
The Tensors returned by computation.
"""
args = [] if args is None else args
graph = ops.get_default_graph()
# If we are in TF 2 functions (control flow V2 functions, or tf.function()),
# we need to attach _xla_outside_compilation attribute directly because we are
# not in TPUReplicateContext.
if isinstance(graph, func_graph.FuncGraph):
try:
tpu_context, _ = _enclosing_tpu_context_and_graph()
except ValueError:
logging.warning(
"Outside compilation attempted outside TPUReplicateContext "
"scope. As no enclosing TPUReplicateContext can be found, "
"returning the result of `computation` as is.")
return computation(*args, **kwargs)
# pylint: disable=protected-access
outside_compilation_name = str(tpu_context._outside_compilation_counter)
tpu_context._outside_compilation_counter = (
tpu_context._outside_compilation_counter + 1)
# pylint: enable=protected-access
outside_compilation_context = OutsideCompilationV2Context(
outside_compilation_name)
outside_compilation_context.Enter()
args = [] if args is None else args
retval = computation(*args, **kwargs)
outside_compilation_context.Exit()
return retval
# If we are in a TPUReplicateContext, signal that we are now
# outside_compilation
initial_context = graph._get_control_flow_context() # pylint: disable=protected-access
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._EnterOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
retval = computation(*args, **kwargs)
# If we are in a TPUReplicateContext, signal that we are no longer
# outside_compilation
final_context = graph._get_control_flow_context() # pylint: disable=protected-access
if initial_context is not final_context:
raise NotImplementedError(
"Control-flow context cannot be different at start and end of an "
"outside_compilation scope")
context = initial_context
while context:
if isinstance(context, TPUReplicateContext):
context._ExitOutsideCompilationScope() # pylint: disable=protected-access
context = context.outer_context
return retval
@tf_export(v1=["tpu.PaddingSpec"])
class PaddingSpec(enum.IntEnum):
"""Represents the type of padding policies for tpu.replicate."""
# By default the policy is set to AUTO, the dynamic input shape dimension will
# be pad to maximum of all the replicas.
AUTO = 0
# Bucketize the dynamic input shape dimension into a power of 2.
POWER_OF_TWO = 1
@tf_export("tpu.XLAOptions")
class XLAOptions(
collections.namedtuple("XLAOptions", [
"use_spmd_for_xla_partitioning",
"enable_xla_dynamic_padder",
])):
"""XLA compilation options.
Attributes:
use_spmd_for_xla_partitioning: Boolean. Whether to use XLA's SPMD
partitioner instead of MPMD partitioner when compiler partitioning is
requested.
enable_xla_dynamic_padder: Boolean. Whether to enable XLA dynamic padder
infrastructure to handle dynamic shapes inputs inside XLA. True by
default. Disabling this may cause correctness issues with dynamic shapes
inputs, as XLA will just assume the inputs are with padded shapes. However
users can optionally set it to False to improve device time if masking is
already handled in the user side.
"""
def __new__(cls,
use_spmd_for_xla_partitioning=True,
enable_xla_dynamic_padder=True):
return super(XLAOptions, cls).__new__(cls, use_spmd_for_xla_partitioning,
enable_xla_dynamic_padder)
@tf_export(v1=["tpu.replicate"])
def replicate(
computation: Callable[..., Any],
inputs: Optional[List[List[core_types.Tensor]]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
maximum_shapes: Optional[Any] = None,
padding_spec: Optional[PaddingSpec] = None,
xla_options: Optional[XLAOptions] = None) -> List[Any]:
"""Builds a graph operator that runs a replicated TPU computation.
Example for the basic usage that `inputs` has static shape:
```python
def computation(x):
x = x + 1
return tf.math.reduce_mean(x)
x = tf.convert_to_tensor([1., 2., 3.])
y = tf.convert_to_tensor([4., 5., 6.])
tf.compat.v1.tpu.replicate(computation, inputs=[[x], [y]])
```
If the `inputs` has dynamic shapes and you would like to automatically
bucketize the inputs to avoid XLA recompilation. See the advanced example
below:
```python
def computation(x):
x = x + 1
return tf.math.reduce_mean(x)
# Assume input tensors in two replicas `x` and `y` both have dynamic shape
# ([None, 2]).
tf.compat.v1.tpu.replicate(
computation,
inputs=[x, y],
maximum_shapes=[tf.TensorShape([None, None])],
padding_spec=tf.compat.v1.tpu.PaddingSpec.POWER_OF_TWO)
```
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
padding_spec: An enum specified by `tpu.PaddingSpec`. This describes the
padding policy when the `inputs` to `tpu.replicate` is dynamic.
One usage is to enable automatic bucketizing on the inputs by setting the
value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the
recompilation in the XLA side.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A list of outputs, indexed by `[replica_num]` each output can be a nested
structure same as what computation() returns with a few exceptions.
Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
return split_compile_and_replicate(
computation,
inputs,
infeed_queue,
device_assignment,
name,
maximum_shapes=maximum_shapes,
padding_spec=padding_spec,
xla_options=xla_options)[1]
def _ceil_to_pow_of_n(x, n):
"""Ceil input `x` to power of `n`."""
x = math_ops.cast(x, dtypes.float32)
lognx = math_ops.log(x) / math_ops.log(n * 1.0)
lognx = math_ops.ceil(lognx)
result = math_ops.pow(n * 1.0, lognx)
result = math_ops.cast(result, dtypes.int32)
return result
def _pad_all_input(
inputs: Iterable[core_types.Tensor],
padded_shapes: List[Optional[tensor_shape.TensorShape]],
padding_spec: PaddingSpec
) -> Tuple[List[List[Any]], List[dynamic_padding.PaddingMap]]:
"""Pad all input tensors given padded_shapes.
The real shape tensors will be concatenated with the padded original inputs.
Args:
inputs: The original inputs.
padded_shapes: A list of padded shapes for each input. If an entry is None,
no padding is performed.
padding_spec: An enum specified by `tpu.PaddingSpec`. This describes the
padding policy when the `inputs` to `tf.tpu.replicate` is dynamic.
One usage is to enable automatic bucketizing on the inputs by setting the
value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the
recompilation in the XLA side.
Returns:
The padded inputs and a PaddingMap list which maps the padded input
dimension to the real shape argument index.
"""
# maximum_static_shapes[idx][i] indicates the maximum static size of ith
# dimension of the idx input among all the replicas.
maximum_static_shapes = []
# need_padding[idx][i] indicates whether the ith dimension of the idx input
# needs padding.
need_padding = []
input_shape_tensors = []
for core_idx, inputs_per_core in enumerate(inputs):
for idx, input_tensor in enumerate(inputs_per_core):
input_shape = input_tensor.get_shape().as_list()
if core_idx == 0:
input_shape_tensors.append([])
maximum_static_shapes.append(input_shape)
need_padding.append(np.full_like(input_shape, False, dtype=bool))
else:
for i, s in enumerate(input_shape):
if s is None or s != maximum_static_shapes[idx][i]:
need_padding[idx][i] = True
maximum_static_shapes[idx] = max(input_shape,
maximum_static_shapes[idx])
# Append _POST_DEVICE_REWRITE_ATTR attributes to the real shape ops.
real_input_shape = array_ops.shape(input_tensor)
real_input_shape.op._set_attr( # pylint: disable=protected-access
_POST_DEVICE_REWRITE_ATTR,
attr_value_pb2.AttrValue(b=True))
input_shape_tensors[idx].append(real_input_shape)
maximum_shapes = []
for shapes_per_input in input_shape_tensors:
maximum_shapes.append(
math_ops.reduce_max(array_ops.stack(shapes_per_input), axis=0))
padded_inputs = []
real_shapes = []
padding_maps = []
for core_idx, inputs_per_core in enumerate(inputs):
padded_inputs.append([])
real_shapes.append([])
real_shape_idx = len(inputs_per_core) - 1
for idx, input_tensor in enumerate(inputs_per_core):
input_shape_tensor = input_shape_tensors[idx][core_idx]
input_shape = input_tensor.get_shape().as_list()
padded_shape = padded_shapes[idx]
# If we have no padded_shape, then skip padding.
if any(need_padding[idx]) and padded_shape is not None:
for i, s in enumerate(input_shape):
if need_padding[idx][i]:
if core_idx == 0:
real_shape_idx += 1
padding_map = dynamic_padding.PaddingMap()
padding_map.arg_index = idx
padding_map.shape_index = i
padding_map.padding_arg_index = real_shape_idx
padding_maps.append(padding_map)
real_shapes[core_idx].append(
math_ops.cast(input_shape_tensor[i], dtypes.int32))
paddings = []
for i, s in enumerate(padded_shape.dims):
if need_padding[idx][i]:
# The minimum padded dimension size is 2 as XLA doesn't support size
# 1 dynamic size.
minimum_dynamic_dim_size = 2
if s.value is not None:
# Pad to the given maximum value.
max_dim_size = max(s.value, minimum_dynamic_dim_size)
else:
# If maximum value is not given, then pad to the maximum dimension
# among all the cores.
max_dim_size = math_ops.maximum(maximum_shapes[idx][i],
minimum_dynamic_dim_size)
if padding_spec == PaddingSpec.POWER_OF_TWO:
max_dim_size = _ceil_to_pow_of_n(max_dim_size, 2)
# Pad to the given maximum value.
padding = [0, max_dim_size - input_shape_tensor[i]]
else:
padding = [0, 0]
paddings.append(padding)
if input_tensor.get_shape().is_fully_defined():
# TODO(rxsang): This is a hack to make sure padded_input has dynamic
# shapes, so any tf.size/tf.shape op performed on it won't be constant
# folded. Do we have better ways to do it?
padded_input = control_flow_ops.cond(
array_ops.constant(True),
lambda: array_ops.pad(input_tensor, paddings), # pylint: disable=cell-var-from-loop
lambda: input_tensor)
else:
padded_input = array_ops.pad(input_tensor, paddings)
# Append _POST_DEVICE_REWRITE_ATTR attributes to all padded inputs.
padded_input.op._set_attr( # pylint: disable=protected-access
_POST_DEVICE_REWRITE_ATTR,
attr_value_pb2.AttrValue(b=True))
padded_inputs[core_idx].append(padded_input)
else:
padded_inputs[core_idx].append(input_tensor)
num_replicas = len(padded_inputs)
for i in range(num_replicas):
padded_inputs[i].extend(real_shapes[i])
return padded_inputs, padding_maps
def _flatten_and_filter_composite(maybe_composite, non_composite_output,
composite_output=None):
"""For an input, replaced the input by a tuple if the input is composite.
If `maybe_composite` is not composite, return the parameter
`non_composite_output` otherwise return a tuple which consists of the value of
the parameter `composite_output` the same number of times as there are
components of the composite tensor.
This is useful for computing a mask when flattening nested data with
`expand_composites=True`. For example
```python
nest.flatten(data, expand_composites=True)
```
and
```python
nest.flatten(nest.map(
data, lambda x: _flatten_and_filter_composite(x, False, True)))
```
will have the same length and second will be True if the tensor in the first
is derived from a expanding a composite tensor.
Args:
maybe_composite: A value to test for being a composite tensor.
non_composite_output: The value to return when `maybe_composite` is not a
composite.
composite_output: the value to fill the output tuple with if
`maybe_composite` is a composite.
Returns:
`non_composite_output` or a tuple with multiple copies of
`composite_output`.
"""
if isinstance(maybe_composite, composite_tensor.CompositeTensor):
num_components = len(nest.flatten(maybe_composite, expand_composites=True))
return (composite_output,) * num_components
return non_composite_output
def split_compile_and_replicate(
computation: Callable[..., Any],
inputs: Optional[List[List[core_types.Tensor]]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
use_tpu: bool = True,
maximum_shapes: Optional[Any] = None,
padding_spec: Optional[PaddingSpec] = None,
xla_options: Optional[XLAOptions] = None,
) -> List[List[core_types.Tensor]]:
"""Builds graph operators that runs compilation and replicated computation.
This is a lower level interface than replicate that returns a separate compile
and execute output tensor. In the generated graph the compile op feeds into
the execute op and no additional compilation is incurred when running the
compile op before the execute op. The compile op returns additional
information about the compilation but does not return the compiled program.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU
backends. Currently, only supports a default placement (computation is
placed on GPU if one is available, and on CPU if not).
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
padding_spec: An enum specified by `tf.tpu.PaddingSpec`. This describes the
padding policy when the `inputs` to `tf.tpu.replicate` is dynamic.
One usage is to enable automatic bucketizing on the inputs by setting the
value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the
recompilation in the XLA side.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A list of lists with the first list corresponding to the compile op and the
second a list of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
del name
inputs = [[]] if inputs is None else inputs
xla_options = xla_options or XLAOptions()
metadata_kwargs = {}
if device_assignment is not None:
# Turn the Numpy array into a flattened list so we can pass it as an
# operator attribute.
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist()
}
metadata_kwargs["num_cores_per_replica"] = (
device_assignment.num_cores_per_replica)
# This entry is used for enabling automatic outside compilation.
metadata_kwargs["allow_soft_placement"] = config.get_soft_device_placement()
if config.get_soft_device_placement():
logging.info("Automatic outside compilation is enabled. "
"Ops without XLA kernels will be automatically "
"placed on CPU.")
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Checks all replicas have the same structure.
for i in xrange(1, num_replicas):
nest.assert_same_structure(inputs[0], inputs[i])
# Flatten inputs. This structure may contain None values, which will be
# handled later.
flat_inputs_with_nones = [
nest.flatten(per_replica_input, expand_composites=True)
for per_replica_input in inputs
]
# Mask parallel to one replica's inputs with True for tensors coming from
# composites.
is_composite = nest.flatten(nest.map_structure(
lambda x: _flatten_and_filter_composite(x, False, True), inputs[0]))
# Converts inputs to Tensors, replacing Nones with a placeholder 0 since
# tpu_ops.tpu_replicated_input() can't handle non-Tensor values.
flat_inputs = []
for inp in flat_inputs_with_nones:
flat_inputs.append([
constant_op.constant(0) if x is None else ops.convert_to_tensor(x)
for x in inp
])
# Verifies that all replicas have matching numbers and types of inputs
flat_input_types = [x.dtype for x in flat_inputs[0]]
input_arity = len(inputs[0])
flat_input_arity = len(flat_input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in flat_inputs[i]]
if types != flat_input_types:
raise ValueError("Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
flat_input_types, i, types))
arg_error = xla.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
dynamic_shape_inputs = False
if maximum_shapes:
if infeed_queue:
raise ValueError(
"Dynamic input shapes are not supported with infeed queues")
# Make sure maximum_shapes has the same structure as inputs.
nest.assert_same_structure(inputs[0], maximum_shapes, check_types=False)
# Flatten padded shapes:
# For composite tensor components, we don't want to pad them. For each
# entry of maximum_shapes that corresponds to a composite tensor, replace it
# by a tuple of Nones of the same length as the number of components of the
# composite tensor. When we flatten a second time, this makes
# flat_maximum_shapes have the same length as flat_inputs[i]. We can then
# avoid padding these tensors. The assumption is that they will be used by
# outside compilation or that the components are statically shaped and will
# be used by tpu compatible ops.
flat_maximum_shapes = nest.flatten(
[_flatten_and_filter_composite(x, y)
for x, y in zip(nest.flatten(inputs[0]),
nest.flatten(maximum_shapes))])
flat_maximum_shapes = [
tensor_shape.TensorShape(s) if s is not None else None
for s in flat_maximum_shapes
]
nest.assert_same_structure(flat_inputs[0], flat_maximum_shapes,
check_types=False)
unpadded_inputs = flat_inputs
flat_inputs, padding_maps = _pad_all_input(unpadded_inputs,
flat_maximum_shapes,
padding_spec)
if padding_maps:
dynamic_shape_inputs = True
logging.info("TPU has inputs with dynamic shapes: %s", unpadded_inputs[0])
metadata_kwargs["step_marker_location"] = getattr(
computation, "step_marker_location", "STEP_MARK_AT_ENTRY")
metadata_kwargs["use_spmd_for_xla_partitioning"] = \
xla_options.use_spmd_for_xla_partitioning
graph = ops.get_default_graph()
# Fan-in: Builds a TPUReplicatedInput node for each input.
flat_replicated_inputs = []
for i in range(0, len(flat_inputs[0])):
replicas = [flat_inputs[replica][i] for replica in xrange(num_replicas)]
flat_replicated_inputs.append(
tpu_ops.tpu_replicated_input(
replicas, name="input{}".format(i), index=i))
if isinstance(graph, func_graph.FuncGraph):
# When we are in Tensorflow 2.0 function, 'graph' will be a FuncGraph
# object. If both outside graph and this function have a TPU cluster,
# they will have the same cluster name and it will cause problems (because
# we lower functional ops in Tensorflow 2.0). Append function name to
# 'cluster_name' to avoid cluster name collision.
cluster_name = graph.unique_name("cluster_" + graph.name)
else:
cluster_name = graph.unique_name("cluster")
pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
pivot._set_attr(_PIVOT_FOR_CLUSTER, # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name)))
context = TPUReplicateContext(
name=cluster_name, num_replicas=num_replicas, pivot=pivot)
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
if dynamic_shape_inputs and xla_options.enable_xla_dynamic_padder:
for padding_map in padding_maps:
input_shape = flat_replicated_inputs[padding_map.arg_index].shape
flat_replicated_inputs[
padding_map.arg_index] = tf2xla.set_dynamic_dimension_size(
flat_replicated_inputs[padding_map.arg_index],
padding_map.shape_index,
flat_replicated_inputs[padding_map.padding_arg_index])
flat_replicated_inputs[padding_map.arg_index].set_shape(input_shape)
# Add identity ops so even unused inputs are "consumed" by the
# computation. This is to avoid orphaned TPUReplicatedInput nodes.
# TODO(phawkins): consider instead pruning unused TPUReplicatedInput
# and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs.
flat_replicated_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(flat_replicated_inputs)
]
for i, composite in zip(flat_replicated_inputs, is_composite):
# pylint: disable=protected-access
# Add an attribute to the identity node so that they could be removed in
# encapsulate TPU computation pass if unused. However we don't remove
# inputs when dynamic padding is enabled.
# TODO(rxsang): Use other ways except argument index in padding_map so
# outside compilation can work with dynamic padding correctly.
if not dynamic_shape_inputs or composite:
i.op._set_attr("_tpu_input_identity",
attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
# Clobber replicated placeholders with Nones.
computation_inputs = [
None if inp is None else replicated for replicated, inp in zip(
flat_replicated_inputs, flat_inputs_with_nones[0])
]
# Unflatten the computation inputs to match original input structure.
computation_inputs = nest.pack_sequence_as(
structure=inputs[0],
flat_sequence=computation_inputs[:flat_input_arity],
expand_composites=True)
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
# Partitioned variables is not supported (b/112311320).
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
saved_custom_getter = vscope.custom_getter
def custom_getter(getter, name, *args, **kwargs):
"""Variables on TPU have a few restrictions."""
partitioner = kwargs.get("partitioner", None)
if partitioner is not None:
kwargs["partitioner"] = None
logging.warning(
"Partitioned variables are not supported on TPU. Got "
"`partitioner` that is %s for variable %s. "
"Setting `partitioner` to `None`.", partitioner, name)
if saved_custom_getter is None:
return getter(name, *args, **kwargs)
else:
return saved_custom_getter(getter, name, *args, **kwargs)
vscope.set_use_resource(True)
vscope.set_custom_getter(custom_getter)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
vscope.set_custom_getter(saved_custom_getter)
outputs_is_flat = xla.is_flat(outputs)
if outputs_is_flat:
output_tensors, control_deps, pack_template = _postprocess_flat_outputs(
outputs)
else:
output_tensors, control_deps, pack_template = (
_postprocess_non_flat_outputs(outputs))
# tensor_tracer imports tpu.py. Local import to tensor_tracer to avoid
# import-cycle
if typing.TYPE_CHECKING:
tensor_tracer = Any
else:
# pylint: disable=g-import-not-at-top
from tensorflow.python.tpu import tensor_tracer
# pylint: enable=g-import-not-at-top
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
output_tensors = tt.trace_tpu(ops.get_default_graph(),
output_tensors, control_deps,
num_replicas)
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
host_compute_core = context.HostComputeCore()
if host_compute_core:
attr_value = attr_value_pb2.AttrValue()
attr_value.list.s.extend(compat.as_bytes(x) for x in host_compute_core)
metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access
with ops.control_dependencies([metadata]):
if use_tpu:
compile_status = tpu_ops.tpu_compilation_result()
op = compile_status.op
attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access
else:
compile_status = control_flow_ops.no_op(name="compilation_status")
if not output_tensors:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
compile_status,
[
control_flow_ops.group(control_deps, name="shard_%d" % i)
for i in range(num_replicas)
]
]
# Fan-out: Builds a TPUReplicatedOutput node for each output.
replicated_outputs = [[] for i in range(num_replicas)]
for i, t in enumerate(output_tensors):
# None values returned by the computation can't be sent to
# tpu_ops.tpu_replicated_output(), we handle them specially here. We can
# avoid the placeholder 0 routine required on the inputs since outputs are
# replicated per-tensor, not per-replica, so we can skip replication.
if t is None:
for replica in range(num_replicas):
replicated_outputs[replica].append(None)
continue
# Fan-out: Builds a TPUReplicatedOutput node for each output.
ys = tpu_ops.tpu_replicated_output(
t, num_replicas, name="output{}".format(i))
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
with ops.control_dependencies(control_deps):
for replica in range(num_replicas):
replicated_outputs[replica].append(
array_ops.identity(
ys[replica], name="output_%d_shard_%d" % (i, replica)))
replicated_outputs = [
nest.pack_sequence_as(pack_template, replica_outs, expand_composites=True)
for replica_outs in replicated_outputs
]
return [compile_status, replicated_outputs]
def _postprocess_flat_outputs(
outputs: Any
) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]:
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
Returns:
- Tensors extracted from outputs.
- Operations extracted from outputs.
- A pack template for use with nest.pack_sequence_as to pack the tensors.
"""
# Following code segment is to preserve legacy behavior. Previously we only
# supported flat outputs and thus for consistency it was nice to convert even
# single element into a tuple. But now that we support arbitrary output
# structure, this is no longer necessary.
# TODO(b/121383831): Migrate all legacy use cases and delete this special
# case.
# If the computation returns `None`, make it an empty tuple.
if outputs is None:
outputs = tuple()
# For legacy / backwards compatibility reasons we return a list for "flat"
# output values (even if the user's flat return value was a different type or
# even just a scalar value) so use nest.flatten to compute a flat list pack
# template.
pack_template = nest.flatten(outputs, expand_composites=False)
# Even though outputs is already "flat", we flatten any composites so their
# component tensors can be tagged and replicated. The pack_template will be
# used by the caller to repack the composite tensors.
outputs = nest.flatten(outputs, expand_composites=True)
# Append `no_op` here so that fetching any return value of this function
# will trigger TPUExecute node.
outputs += (control_flow_ops.no_op(),)
maybe_convert = lambda x: None if x is None else ops.convert_to_tensor(x)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else maybe_convert(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
# Trim operations off the end of the pack template. output_operations has 1
# extra element due to the no-op that is added.
if len(output_operations) > 1:
pack_template = pack_template[:1 - len(output_operations)]
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
if t is None:
new_output_tensors.append(None)
with ops.device(t.device if t.device else core(0)):
o = array_ops.identity(t)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
new_output_tensors.append(o)
return new_output_tensors, output_operations, pack_template
def _postprocess_non_flat_outputs(
outputs: Any
) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]:
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
Returns:
- Tensors extracted from outputs.
- An empty Operations list because Operations are not allowed in non-flat
outputs.
- A pack template for use with nest.pack_sequence_as to pack the tensors.
"""
# Flatten output items.
flat_outputs = nest.flatten(outputs, expand_composites=True)
# Convert all non-None non-Operation outputs to Tensors.
for i, o in enumerate(flat_outputs):
if o is None:
flat_outputs[i] = None
continue
if isinstance(o, ops.Operation):
raise ValueError(
"tpu.rewrite does not support Operation as return value in non-flat "
"output structure. You can set returned Operations as control "
"dependencies of returned Tensors so Operations are triggered when "
'Tensors are evaluated. Operation found: "%s"' % o.name)
try:
o = ops.convert_to_tensor(o)
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
'convertible to Tensors. Got error: "%s"' % str(e))
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
with ops.device(o.device if o.device else core(0)):
o = array_ops.identity(o)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
flat_outputs[i] = array_ops.identity(o)
# All flat_outputs are Tensors, and no Operations.
return flat_outputs, [], outputs
def split_compile_and_shard(
computation: Callable[..., Any],
inputs: Optional[List[List[Optional[core_types.Tensor]]]] = None,
num_shards: int = 1,
input_shard_axes: Optional[List[int]] = None,
outputs_from_all_shards: Union[bool, List[bool]] = True,
output_shard_axes: Optional[List[int]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
xla_options: Optional[XLAOptions] = None,
) -> Tuple[ops.Operation, List[core_types.Tensor]]:
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shard_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A tuple of (compile op, [output tensors]).
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
# TODO(phawkins): consider adding support for broadcasting Tensors passed as
# inputs.
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
inputs = [] if inputs is None else inputs
if not isinstance(inputs, list):
raise TypeError("tpu.shard()'s inputs must be a list of Tensors or None.")
# Converts inputs to Tensors.
inputs = [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
compile_op, outputs = split_compile_and_replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name,
xla_options=xla_options)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return compile_op, [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return compile_op, results
@tf_export(v1=["tpu.shard"])
def shard(
computation: Callable[..., Any],
inputs: Optional[List[core_types.Tensor]] = None,
num_shards: int = 1,
input_shard_axes: Optional[List[int]] = None,
outputs_from_all_shards: Union[bool, List[bool]] = True,
output_shard_axes: Optional[List[int]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
xla_options: Optional[XLAOptions] = None) -> List[core_types.Tensor]:
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shard_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A list of output tensors.
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
return split_compile_and_shard(
computation,
inputs=inputs,
num_shards=num_shards,
input_shard_axes=input_shard_axes,
outputs_from_all_shards=outputs_from_all_shards,
output_shard_axes=output_shard_axes,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name,
xla_options=xla_options)[1]
@tf_export(v1=["tpu.batch_parallel"])
def batch_parallel(
computation: Callable[..., Any],
inputs: Optional[List[List[Optional[core_types.Tensor]]]] = None,
num_shards: int = 1,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
xla_options: Optional[XLAOptions] = None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty list).
Each input is split into `num_shards` pieces along the 0-th dimension, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). The
0-th dimension of each Tensor must have size divisible by `num_shards`.
num_shards: The number of shards.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A list of output tensors.
Raises:
ValueError: If `num_shards <= 0`
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name,
xla_options=xla_options)
@tf_export(v1=["tpu.rewrite"])
def rewrite(
computation: Callable[..., Any],
inputs: Optional[List[List[Optional[core_types.Tensor]]]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
xla_options: Optional[XLAOptions] = None) -> Any:
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors.
`computation` may return a list of operations and tensors. Tensors must
come before operations in the returned list. The return value of
`rewrite` is a list of tensors corresponding to the tensors from the
output of `computation`.
All `Operation`s constructed during `computation` will be executed when
evaluating any of the returned output tensors, not just the ones returned.
inputs: A list of input tensors or `None` (equivalent to an empty list).
Each input can be a nested structure containing values that are
convertible to tensors. Note that passing an N-dimension list of
compatible values will result in a N-dimension list of scalar tensors
rather than a single Rank-N tensors. If you need different behavior,
convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: (Deprecated) Does nothing.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
"""
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name,
xla_options=xla_options)[0]
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_DENYLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
"AssignSubVariableOp",
"VarHandleOp",
"Variable",
"VariableV2",
])
def under_tpu_inference_context() -> bool:
"""Check if it is currently under `_TPUInferenceContext`."""
graph = ops.get_default_graph()
while graph:
context = graph._get_control_flow_context() # pylint: disable=protected-access
while context:
if isinstance(context, _TPUInferenceContext):
return True
context = context.outer_context
if isinstance(graph, function._FuncGraph): # pylint: disable=protected-access
graph = graph._outer_graph # pylint: disable=protected-access
elif isinstance(graph, func_graph.FuncGraph):
graph = graph.outer_graph
else:
return False
class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU inference computation.
The primary role of `_TPUInferenceContext` is to indicate the mode of
operation and possibly sanity check operators inside a
tpu.rewrite_for_inference() computation.
"""
def __init__(self, name: Text, check_ops: bool = True):
super(_TPUInferenceContext, self).__init__()
self._name = name
self._check_ops = check_ops
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if self._check_ops and op.type in _DENYLISTED_INFERENCE_OPS:
raise NotImplementedError(
"Operation of type %s (%s) is not supported on the TPU for inference."
" Execution will fail if this op is used in the graph. Make sure your"
" variables are using variable_scope." % (op.type, op.name))
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
def validate_inference_rewrite_for_variables(graph: ops.Graph):
"""Validates whether rewrite_for_inference() 'worked' for variables.
The rewrite_for_inference() method is supposed to append GuaranteeConstOps
after ReadVariableOps, but this mechanism works only if you are using
tf.compat.v1.get_variable() to create and access variables in your tpu
computation. This validation method can be called immediately after calling
tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added
to the graph.
Typical usages:
tpu.validate_inference_rewrite_for_variables(
tf.compat.v1.get_default_graph())
tpu.validate_inference_rewrite_for_variables(sess.graph)
Args:
graph: The graph which needs to be validated.
Raises:
RuntimeError: if validation failed.
"""
if not any(x.type == "GuaranteeConst" for x in graph.get_operations()):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after running "
"tpu.rewrite_for_inference(...). Please check that you are using "
"tf.get_variable() to create and access variables in your tpu "
"computation.")
def rewrite_for_inference(
computation: Callable[..., Any],
inputs: Optional[List[core_types.Tensor]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None) -> List[core_types.Tensor]:
"""Rewrites `computation` for inference on a TPU system.
Other than 'rewriting' the computation to run on a TPU, if using variables
in your computation, it moves the ReadVariableOps outside the TPU
computation, and adds GuaranteeConst ops just after the ReadVariableOps.
This mechanism works only if you are using tf.compat.v1.get_variable() to
create and access variables in your tpu computation. You can validate
whether this worked, by calling validate_inference_rewrite_for_variables()
method immediately after this method to check whether GuaranteeConstOps
where added to the graph.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors. If the function returns m outputs, rewrite will return a list of
m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: The name of the operator.
Returns:
A list of output tensors.
"""
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
"""Execute computation under `_TPUInferenceContext`."""
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
# pylint: disable=undefined-variable
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# pylint: enable=undefined-variable
def prune_unconnected_ops_from_xla(prune_graph: ops.Graph):
"""Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE.
Args:
prune_graph: A tensorflow graph from which we wish to prune unconnected ops
as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have
no inputs and no consumers. These can often be left behind due to graph
construction rewiring (for instance TF-Hub). While they never execute,
they will cause XLA compile to fail so we strip them from XLA compile by
removing the tpu_replicate attribute.
"""
# Scan over the top level graph and all function graphs.
for graph in [prune_graph] + [
f for f in prune_graph._functions.values() # pylint: disable=protected-access
]:
if not isinstance(graph, ops.Graph):
continue
for op in graph.get_operations():
if op.type not in _UNCONNECTED_OPS_TO_PRUNE:
continue
outputs_consumed = False
for output in op.outputs:
if output.consumers():
outputs_consumed = True
break
if not outputs_consumed:
logging.info(
"Pruning OP %s of type %s from XLA Compile due to "
"it being disconnected.", op.name, op.type)
op._clear_attr(_TPU_REPLICATE_ATTR) # pylint: disable=protected-access
|
{
"content_hash": "4db6212cd12d933a611e60d0c594e93f",
"timestamp": "",
"source": "github",
"line_count": 2269,
"max_line_length": 104,
"avg_line_length": 42.3415601586602,
"alnum_prop": 0.6878519459161263,
"repo_name": "sarvex/tensorflow",
"id": "ad40555ceeaca3b64465e6d082cc99ba25c0dac2",
"size": "96723",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow/python/tpu/tpu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
}
|
"""Tests for certbot.error_handler."""
import signal
import sys
import unittest
import mock
class ErrorHandlerTest(unittest.TestCase):
"""Tests for certbot.error_handler."""
def setUp(self):
from certbot import error_handler
self.init_func = mock.MagicMock()
self.init_args = set((42,))
self.init_kwargs = {'foo': 'bar'}
self.handler = error_handler.ErrorHandler(self.init_func,
*self.init_args,
**self.init_kwargs)
# pylint: disable=protected-access
self.signals = error_handler._SIGNALS
def test_context_manager(self):
try:
with self.handler:
raise ValueError
except ValueError:
pass
self.init_func.assert_called_once_with(*self.init_args,
**self.init_kwargs)
@mock.patch('certbot.error_handler.os')
@mock.patch('certbot.error_handler.signal')
def test_signal_handler(self, mock_signal, mock_os):
# pylint: disable=protected-access
mock_signal.getsignal.return_value = signal.SIG_DFL
self.handler.set_signal_handlers()
signal_handler = self.handler._signal_handler
for signum in self.signals:
mock_signal.signal.assert_any_call(signum, signal_handler)
signum = self.signals[0]
signal_handler(signum, None)
self.init_func.assert_called_once_with(*self.init_args,
**self.init_kwargs)
mock_os.kill.assert_called_once_with(mock_os.getpid(), signum)
self.handler.reset_signal_handlers()
for signum in self.signals:
mock_signal.signal.assert_any_call(signum, signal.SIG_DFL)
def test_bad_recovery(self):
bad_func = mock.MagicMock(side_effect=[ValueError])
self.handler.register(bad_func)
self.handler.call_registered()
self.init_func.assert_called_once_with(*self.init_args,
**self.init_kwargs)
bad_func.assert_called_once_with()
def test_sysexit_ignored(self):
try:
with self.handler:
sys.exit(0)
except SystemExit:
pass
self.assertFalse(self.init_func.called)
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
{
"content_hash": "cd11d7e41e2c81cd2d507aaf43471b9c",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 70,
"avg_line_length": 34.46478873239437,
"alnum_prop": 0.5713118103800572,
"repo_name": "wteiken/letsencrypt",
"id": "5434b36be8f4b217239377b62cc9e0246068bf1c",
"size": "2447",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "certbot/tests/error_handler_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "50680"
},
{
"name": "Augeas",
"bytes": "5062"
},
{
"name": "Batchfile",
"bytes": "35005"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37245"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1384952"
},
{
"name": "Shell",
"bytes": "123322"
}
],
"symlink_target": ""
}
|
"""
Base class for all nodes created and used by the binary expression
BooleanExpressionTree defines an interface used for evaluating the tree.
"""
class Node(object):
def evaluate(self, evaluation_param):
raise NotImplementedError("evaluate not implemented")
def add_child(self, node):
raise NotImplementedError("add_child not implemented")
def __str__(self):
raise NotImplementedError("__str__ not implemented in base EvaluationEngine")
"""
ExpressionNodes contain the tokens of an expression to be evaluated by
the EvaluationEngine.
"""
class ExpressionNode(Node):
def __init__(self, expression_tokens, evaluation_engine):
self.expression_tokens = expression_tokens
self.evaluation_engine = evaluation_engine
def evaluate(self, evaluation_param):
return self.evaluation_engine.evaluate(self.expression_tokens, evaluation_param)
def __str__(self):
return ' '.join(self.expression_tokens)
"""
UnaryOperatorNodes evaluate boolean operations on a single operand node
"""
class UnaryOperatorNode(Node):
OPERATORS = frozenset(tuple("!"))
def __init__(self, operator, operand_node=None):
self.operator = operator
self.operand_node = operand_node
def evaluate(self, evaluation_param):
if self.operator == "!":
return not self.operand_node.evaluate(evaluation_param)
else:
raise Exception("Unary operator %s not defined" % self.operator)
def add_child(self, node):
if self.operand_node is not None:
node.add_child(self.operand_node)
self.operand_node = node
def __str__(self):
operand_str = str(self.operand_node)
if operand_str.startswith('('):
return self.operator + operand_str
else:
return self.operator + '(' + operand_str + ')'
"""
BinaryOperatorNodes evaluate boolean operations on two operand nodes
"""
class BinaryOperatorNode(Node):
OPERATORS = frozenset(("and", "&&", "or", "||"))
def __init__(self, operator, operand_node1=None, operand_node2=None):
self.operator = operator.lower()
self.operand_node1 = operand_node1
self.operand_node2 = operand_node2
def evaluate(self, evaluation_param):
if self.operator == "and" or self.operator == "&&":
return self.operand_node1.evaluate(evaluation_param) and self.operand_node2.evaluate(evaluation_param)
elif self.operator == "or" or self.operator == "||":
return self.operand_node1.evaluate(evaluation_param) or self.operand_node2.evaluate(evaluation_param)
else:
raise Exception("Binary operator %s not defined" % self.operator)
def add_child(self, node):
if self.operand_node1 is None:
self.operand_node1 = node
else:
if self.operand_node2 is not None:
node.add_child(self.operand_node2)
self.operand_node2 = node
def is_or_op(self):
return self.operator in ("or", "||")
def is_and_op(self):
return self.operator in ("and", "&&")
def insert_child(self):
raise NotImplementedError("evaluate not implemented")
def __str__(self):
return "(" + str(self.operand_node1) + ' ' + self.operator + ' ' + str(self.operand_node2) + ")"
|
{
"content_hash": "57465d98270568821295aa16d7573cde",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 114,
"avg_line_length": 30.770642201834864,
"alnum_prop": 0.6437090041741205,
"repo_name": "bheni/sql4json",
"id": "6aae120bf2f7bf70d248d03a663cac318fcd1815",
"size": "3354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sql4json/boolean_expressions/tree_nodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "74486"
},
{
"name": "Shell",
"bytes": "56"
}
],
"symlink_target": ""
}
|
from rulengine.core import (import_class, get_date, ConditionOperator,
ExecutableCondition)
def execute_condition(cond):
"""
Get a rule instance for given operator and
return condition lambda func
"""
condition_method = 'rulengine.conditions.c_{0}_{1}'.format(
cond.data_type, cond.operator)
try:
func = import_class(condition_method)
except AttributeError:
condition_method = 'rulengine.conditions.c_{0}'.format(
cond.data_type)
func = import_class(condition_method)
executable_cond = convert_condition_to_executable(cond)
return func(executable_cond)
def convert_condition_to_executable(cond):
try:
func = ConditionOperator.FUNC_MAPPING[cond.operator]
except KeyError:
raise ValueError('Invalid condition operator.')
executable_cond = ExecutableCondition(
value=cond.value, func=func, comparison_value=cond.comparison_value)
return executable_cond
def c_int(cond):
return cond.func(int(cond.value), int(cond.comparison_value))
def c_int_in(cond):
val = int(cond.value)
return cond.func(val, [int(_) for _ in cond.comparison_value.split(',')])
def c_int_contains(cond):
raise ValueError('Wrong usage')
def c_float(cond):
return cond.func(float(cond.value), float(cond.comparison_value))
def c_float_in(cond):
val = float(cond.value)
return cond.func(val, [float(_) for _ in cond.comparison_value.split(',')])
def c_float_contains(cond):
raise ValueError('Wrong usage')
def c_str(cond):
return cond.func(str(cond.value), str(cond.comparison_value))
def c_str_in(cond):
val = str(cond.value)
return cond.func(val, [str(_) for _ in cond.comparison_value.split(',')])
def c_str_contains(cond):
return c_str(cond)
def c_date(cond):
return cond.func(get_date(cond.value), get_date(cond.comparison_value))
def c_date_in(cond):
val = get_date(cond.value)
return cond.func(
val, [get_date(_) for _ in cond.comparison_value.split(',')])
def c_date_contains(cond):
raise ValueError('Wrong usage')
|
{
"content_hash": "773418b9fc5be14e2a6d2091356d3128",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 25.152941176470588,
"alnum_prop": 0.6641721234798877,
"repo_name": "baranbartu/rulengine",
"id": "3146d2562d43818617c7a33761f5f7aaa46d4e6d",
"size": "2138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rulengine/conditions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5428"
}
],
"symlink_target": ""
}
|
import functools
import time
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1 import loading as keystone
from keystoneauth1 import session
from oslo_log import log as logging
from nova.compute import utils as compute_utils
import nova.conf
from nova.i18n import _LE, _LI, _LW
from nova import objects
from nova.objects import fields
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
VCPU = fields.ResourceClass.VCPU
MEMORY_MB = fields.ResourceClass.MEMORY_MB
DISK_GB = fields.ResourceClass.DISK_GB
def safe_connect(f):
@functools.wraps(f)
def wrapper(self, *a, **k):
try:
# We've failed in a non recoverable way, fully give up.
if self._disabled:
return
return f(self, *a, **k)
except ks_exc.EndpointNotFound:
msg = _LW("The placement API endpoint not found. Optional use of "
"placement API for reporting is now disabled.")
LOG.warning(msg)
self._disabled = True
except ks_exc.MissingAuthPlugin:
msg = _LW("No authentication information found for placement API. "
"Optional use of placement API for reporting is now "
"disabled.")
LOG.warning(msg)
self._disabled = True
except ks_exc.ConnectFailure:
msg = _LW('Placement API service is not responding.')
LOG.warning(msg)
return wrapper
class SchedulerReportClient(object):
"""Client class for updating the scheduler."""
ks_filter = {'service_type': 'placement',
'region_name': CONF.placement.os_region_name}
def __init__(self):
# A dict, keyed by the resource provider UUID, of ResourceProvider
# objects that will have their inventories and allocations tracked by
# the placement API for the compute host
self._resource_providers = {}
auth_plugin = keystone.load_auth_from_conf_options(
CONF, 'placement')
self._client = session.Session(auth=auth_plugin)
# TODO(sdague): use this to disable fully when we don't find
# the endpoint.
self._disabled = False
def get(self, url):
return self._client.get(
url,
endpoint_filter=self.ks_filter, raise_exc=False)
def post(self, url, data):
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
return self._client.post(
url, json=data,
endpoint_filter=self.ks_filter, raise_exc=False)
def put(self, url, data):
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
return self._client.put(
url, json=data,
endpoint_filter=self.ks_filter, raise_exc=False)
def delete(self, url):
return self._client.delete(
url,
endpoint_filter=self.ks_filter, raise_exc=False)
@safe_connect
def _get_resource_provider(self, uuid):
"""Queries the placement API for a resource provider record with the
supplied UUID.
Returns an `objects.ResourceProvider` object if found or None if no
such resource provider could be found.
:param uuid: UUID identifier for the resource provider to look up
"""
resp = self.get("/resource_providers/%s" % uuid)
if resp.status_code == 200:
data = resp.json()
return objects.ResourceProvider(
uuid=uuid,
name=data['name'],
generation=data['generation'],
)
elif resp.status_code == 404:
return None
else:
msg = _LE("Failed to retrieve resource provider record from "
"placement API for UUID %(uuid)s. "
"Got %(status_code)d: %(err_text)s.")
args = {
'uuid': uuid,
'status_code': resp.status_code,
'err_text': resp.text,
}
LOG.error(msg, args)
@safe_connect
def _create_resource_provider(self, uuid, name):
"""Calls the placement API to create a new resource provider record.
Returns an `objects.ResourceProvider` object representing the
newly-created resource provider object.
:param uuid: UUID of the new resource provider
:param name: Name of the resource provider
"""
url = "/resource_providers"
payload = {
'uuid': uuid,
'name': name,
}
resp = self.post(url, payload)
if resp.status_code == 201:
msg = _LI("Created resource provider record via placement API "
"for resource provider with UUID {0} and name {1}.")
msg = msg.format(uuid, name)
LOG.info(msg)
return objects.ResourceProvider(
uuid=uuid,
name=name,
generation=1,
)
elif resp.status_code == 409:
# Another thread concurrently created a resource provider with the
# same UUID. Log a warning and then just return the resource
# provider object from _get_resource_provider()
msg = _LI("Another thread already created a resource provider "
"with the UUID {0}. Grabbing that record from "
"the placement API.")
msg = msg.format(uuid)
LOG.info(msg)
return self._get_resource_provider(uuid)
else:
msg = _LE("Failed to create resource provider record in "
"placement API for UUID %(uuid)s. "
"Got %(status_code)d: %(err_text)s.")
args = {
'uuid': uuid,
'status_code': resp.status_code,
'err_text': resp.text,
}
LOG.error(msg, args)
def _ensure_resource_provider(self, uuid, name=None):
"""Ensures that the placement API has a record of a resource provider
with the supplied UUID. If not, creates the resource provider record in
the placement API for the supplied UUID, optionally passing in a name
for the resource provider.
The found or created resource provider object is returned from this
method. If the resource provider object for the supplied uuid was not
found and the resource provider record could not be created in the
placement API, we return None.
:param uuid: UUID identifier for the resource provider to ensure exists
:param name: Optional name for the resource provider if the record
does not exist. If empty, the name is set to the UUID
value
"""
if uuid in self._resource_providers:
return self._resource_providers[uuid]
rp = self._get_resource_provider(uuid)
if rp is None:
name = name or uuid
rp = self._create_resource_provider(uuid, name)
if rp is None:
return
self._resource_providers[uuid] = rp
return rp
def _compute_node_inventory(self, compute_node):
inventories = {
'VCPU': {
'total': compute_node.vcpus,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': compute_node.cpu_allocation_ratio,
},
'MEMORY_MB': {
'total': compute_node.memory_mb,
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': compute_node.ram_allocation_ratio,
},
'DISK_GB': {
'total': compute_node.local_gb,
'reserved': CONF.reserved_host_disk_mb * 1024,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': compute_node.disk_allocation_ratio,
},
}
data = {
'inventories': inventories,
}
return data
def _get_inventory(self, compute_node):
url = '/resource_providers/%s/inventories' % compute_node.uuid
result = self.get(url)
if not result:
return {'inventories': {}}
return result.json()
def _update_inventory_attempt(self, compute_node):
"""Update the inventory for this compute node if needed.
:param compute_node: The objects.ComputeNode for the operation
:returns: True if the inventory was updated (or did not need to be),
False otherwise.
"""
data = self._compute_node_inventory(compute_node)
curr = self._get_inventory(compute_node)
# Update our generation immediately, if possible. Even if there
# are no inventories we should always have a generation but let's
# be careful.
server_gen = curr.get('resource_provider_generation')
if server_gen:
my_rp = self._resource_providers[compute_node.uuid]
if server_gen != my_rp.generation:
LOG.debug('Updating our resource provider generation '
'from %(old)i to %(new)i',
{'old': my_rp.generation,
'new': server_gen})
my_rp.generation = server_gen
# Check to see if we need to update placement's view
if data['inventories'] == curr.get('inventories', {}):
return True
data['resource_provider_generation'] = (
self._resource_providers[compute_node.uuid].generation)
url = '/resource_providers/%s/inventories' % compute_node.uuid
result = self.put(url, data)
if result.status_code == 409:
LOG.info(_LI('Inventory update conflict for %s'),
compute_node.uuid)
# Invalidate our cache and re-fetch the resource provider
# to be sure to get the latest generation.
del self._resource_providers[compute_node.uuid]
self._ensure_resource_provider(compute_node.uuid,
compute_node.hypervisor_hostname)
return False
elif not result:
LOG.warning(_LW('Failed to update inventory for '
'%(uuid)s: %(status)i %(text)s'),
{'uuid': compute_node.uuid,
'status': result.status_code,
'text': result.text})
return False
if result.status_code != 200:
LOG.info(
_LI('Received unexpected response code %(code)i while '
'trying to update inventory for compute node %(uuid)s'
': %(text)s'),
{'uuid': compute_node.uuid,
'code': result.status_code,
'text': result.text})
return False
# Update our view of the generation for next time
updated_inventories_result = result.json()
new_gen = updated_inventories_result['resource_provider_generation']
self._resource_providers[compute_node.uuid].generation = new_gen
LOG.debug('Updated inventory for %s at generation %i' % (
compute_node.uuid, new_gen))
return True
@safe_connect
def _update_inventory(self, compute_node):
for attempt in (1, 2, 3):
if compute_node.uuid not in self._resource_providers:
# NOTE(danms): Either we failed to fetch/create the RP
# on our first attempt, or a previous attempt had to
# invalidate the cache, and we were unable to refresh
# it. Bail and try again next time.
LOG.warning(_LW(
'Unable to refresh my resource provider record'))
return False
if self._update_inventory_attempt(compute_node):
return True
time.sleep(1)
return False
def update_resource_stats(self, compute_node):
"""Creates or updates stats for the supplied compute node.
:param compute_node: updated nova.objects.ComputeNode to report
"""
compute_node.save()
self._ensure_resource_provider(compute_node.uuid,
compute_node.hypervisor_hostname)
self._update_inventory(compute_node)
def _allocations(self, instance):
# NOTE(danms): Boot-from-volume instances consume no local disk
is_bfv = compute_utils.is_volume_backed_instance(instance._context,
instance)
disk = ((0 if is_bfv else instance.flavor.root_gb) +
instance.flavor.swap +
instance.flavor.ephemeral_gb)
return {
MEMORY_MB: instance.flavor.memory_mb,
VCPU: instance.flavor.vcpus,
DISK_GB: disk,
}
def _get_allocations_for_instance(self, compute_node, instance):
url = '/allocations/%s' % instance.uuid
resp = self.get(url)
if not resp:
return {}
else:
# NOTE(cdent): This trims to just the allocations being
# used on this compute node. In the future when there
# are shared resources there might be other providers.
return resp.json()['allocations'].get(
compute_node.uuid, {}).get('resources', {})
@safe_connect
def _allocate_for_instance(self, compute_node, instance):
url = '/allocations/%s' % instance.uuid
my_allocations = self._allocations(instance)
current_allocations = self._get_allocations_for_instance(compute_node,
instance)
if current_allocations == my_allocations:
allocstr = ','.join(['%s=%s' % (k, v)
for k, v in my_allocations.items()])
LOG.debug('Instance %(uuid)s allocations are unchanged: %(alloc)s',
{'uuid': instance.uuid, 'alloc': allocstr})
return
allocations = {
'allocations': [
{
'resource_provider': {
'uuid': compute_node.uuid,
},
'resources': my_allocations,
},
],
}
LOG.debug('Sending allocation for instance %s',
allocations,
instance=instance)
r = self.put(url, allocations)
if r:
LOG.info(_LI('Submitted allocation for instance'),
instance=instance)
else:
LOG.warning(
_LW('Unable to submit allocation for instance '
'%(uuid)s (%(code)i %(text)s)'),
{'uuid': instance.uuid,
'code': r.status_code,
'text': r.text})
@safe_connect
def _delete_allocation_for_instance(self, uuid):
url = '/allocations/%s' % uuid
r = self.delete(url)
if r:
LOG.info(_LI('Deleted allocation for instance %s'),
uuid)
else:
LOG.warning(
_LW('Unable to delete allocation for instance '
'%(uuid)s: (%(code)i %(text)s)'),
{'uuid': uuid,
'code': r.status_code,
'text': r.text})
def update_instance_allocation(self, compute_node, instance, sign):
if sign > 0:
self._allocate_for_instance(compute_node, instance)
else:
self._delete_allocation_for_instance(instance.uuid)
@safe_connect
def _get_allocations(self, compute_node):
url = '/resource_providers/%s/allocations' % compute_node.uuid
resp = self.get(url)
if not resp:
return {}
else:
return resp.json()['allocations']
def remove_deleted_instances(self, compute_node, instance_uuids):
allocations = self._get_allocations(compute_node)
if allocations is None:
allocations = {}
instance_dict = {instance['uuid']: instance
for instance in instance_uuids}
removed_instances = set(allocations.keys()) - set(instance_dict.keys())
for uuid in removed_instances:
LOG.warning(_LW('Deleting stale allocation for instance %s'),
uuid)
self._delete_allocation_for_instance(uuid)
|
{
"content_hash": "13d84507da43510a490d9b64e78dc2b3",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 79,
"avg_line_length": 39.13729977116705,
"alnum_prop": 0.5490264865813015,
"repo_name": "xuweiliang/Codelibrary",
"id": "fa5041e664b52a50acbb37a7fa1cf090531878bf",
"size": "17736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/scheduler/client/report.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134284"
},
{
"name": "HTML",
"bytes": "830844"
},
{
"name": "JavaScript",
"bytes": "2421484"
},
{
"name": "Makefile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "17185807"
},
{
"name": "Shell",
"bytes": "9144"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from django.core.management.base import BaseCommand
from apps.django_crontab.crontab import Crontab
class Command(BaseCommand):
args = '<add|show|remove>'
help = 'run this command to add, show or remove the jobs defined in CRONJOBS setting from/to crontab'
crontab_lines = []
def handle(self, *args, **options):
"""
Dispatches by given subcommand
"""
if len(args) > 0:
if args[0] == 'add':
with Crontab(**options) as crontab:
crontab.remove_jobs()
crontab.add_jobs()
return
elif args[0] == 'show':
with Crontab(readonly=True, **options) as crontab:
crontab.show_jobs()
return
elif args[0] == 'remove':
with Crontab(**options) as crontab:
crontab.remove_jobs()
return
elif args[0] == 'run':
Crontab().run_job(args[1])
return
print(help)
|
{
"content_hash": "12312f24bd823f0aaff1e00642039d64",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 105,
"avg_line_length": 33.03030303030303,
"alnum_prop": 0.5174311926605505,
"repo_name": "SnailJin/house",
"id": "d044111a36ce9e90981d7891b47affa22a713136",
"size": "1090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/django_crontab/management/commands/crontab.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "41783"
},
{
"name": "HTML",
"bytes": "29875"
},
{
"name": "JavaScript",
"bytes": "80698"
},
{
"name": "Python",
"bytes": "55152"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
}
|
import django.core.validators
import django_inet.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_peeringdb", "0017_facility_fields"),
]
operations = [
migrations.AddField(
model_name="facility",
name="region_continent",
field=models.CharField(
blank=True,
choices=[
("North America", "North America"),
("Asia Pacific", "Asia Pacific"),
("Europe", "Europe"),
("South America", "South America"),
("Africa", "Africa"),
("Australia", "Australia"),
("Middle East", "Middle East"),
],
max_length=255,
null=True,
verbose_name="Continental Region",
),
),
]
|
{
"content_hash": "b0d890010bc9c0b49fdf13338696a90c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 55,
"avg_line_length": 29.4375,
"alnum_prop": 0.46709129511677283,
"repo_name": "peeringdb/django-peeringdb",
"id": "90672c711e1ebc19ff4efe55ec6281d2fe461ade",
"size": "991",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/django_peeringdb/migrations/0018_add_region_continent_field.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "122972"
}
],
"symlink_target": ""
}
|
"""
Exceptions raised by the Horizon code and the machinery for handling them.
"""
import logging
import os
import sys
import six
from django.core.management import color_style # noqa
from django.http import HttpRequest # noqa
from django.utils import encoding
from django.utils.translation import ugettext_lazy as _
from django.views.debug import CLEANSED_SUBSTITUTE # noqa
from django.views.debug import SafeExceptionReporterFilter # noqa
from horizon.conf import HORIZON_CONFIG # noqa
from horizon import messages
LOG = logging.getLogger(__name__)
class HorizonReporterFilter(SafeExceptionReporterFilter):
"""Error report filter that's always active, even in DEBUG mode."""
def is_active(self, request):
return True
# TODO(gabriel): This bugfix is cribbed from Django's code. When 1.4.1
# is available we can remove this code.
def get_traceback_frame_variables(self, request, tb_frame):
"""Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper'
in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper,
'sensitive_variables',
None)
break
current_frame = current_frame.f_back
cleansed = []
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed.append((name, CLEANSED_SUBSTITUTE))
return cleansed
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
else:
# Potentially cleanse only the request if it's one of the
# frame variables.
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
class HorizonException(Exception):
"""Base exception class for distinguishing our own exception classes."""
pass
class Http302(HorizonException):
"""Error class which can be raised from within a handler to cause an
early bailout and redirect at the middleware level.
"""
status_code = 302
def __init__(self, location, message=None):
self.location = location
self.message = message
class NotAuthorized(HorizonException):
"""Raised whenever a user attempts to access a resource which they do not
have permission-based access to (such as when failing the
:func:`~horizon.decorators.require_perms` decorator).
The included :class:`~horizon.middleware.HorizonMiddleware` catches
``NotAuthorized`` and handles it gracefully by displaying an error
message and redirecting the user to a login page.
"""
status_code = 401
class NotAuthenticated(HorizonException):
"""Raised when a user is trying to make requests and they are not logged
in.
The included :class:`~horizon.middleware.HorizonMiddleware` catches
``NotAuthenticated`` and handles it gracefully by displaying an error
message and redirecting the user to a login page.
"""
status_code = 403
class NotFound(HorizonException):
"""Generic error to replace all "Not Found"-type API errors."""
status_code = 404
class Conflict(HorizonException):
"""Generic error to replace all "Conflict"-type API errors."""
status_code = 409
class RecoverableError(HorizonException):
"""Generic error to replace any "Recoverable"-type API errors."""
status_code = 100 # HTTP status code "Continue"
class ServiceCatalogException(HorizonException):
"""Raised when a requested service is not available in the
``ServiceCatalog`` returned by Keystone.
"""
def __init__(self, service_name):
message = 'Invalid service catalog service: %s' % service_name
super(ServiceCatalogException, self).__init__(message)
class AlreadyExists(HorizonException):
"""Exception to be raised when trying to create an API resource which
already exists.
"""
def __init__(self, name, resource_type):
self.attrs = {"name": name, "resource": resource_type}
self.msg = _('A %(resource)s with the name "%(name)s" already exists.')
def __repr__(self):
return self.msg % self.attrs
def __str__(self):
return self.msg % self.attrs
def __unicode__(self):
return self.msg % self.attrs
class NotAvailable(HorizonException):
"""Exception to be raised when something is not available."""
pass
class WorkflowError(HorizonException):
"""Exception to be raised when something goes wrong in a workflow."""
pass
class WorkflowValidationError(HorizonException):
"""Exception raised during workflow validation if required data is missing,
or existing data is not valid.
"""
pass
class HandledException(HorizonException):
"""Used internally to track exceptions that have gone through
:func:`horizon.exceptions.handle` more than once.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
UNAUTHORIZED = tuple(HORIZON_CONFIG['exceptions']['unauthorized'])
NOT_FOUND = tuple(HORIZON_CONFIG['exceptions']['not_found'])
RECOVERABLE = (AlreadyExists, Conflict, NotAvailable, ServiceCatalogException)
RECOVERABLE += tuple(HORIZON_CONFIG['exceptions']['recoverable'])
def error_color(msg):
return color_style().ERROR_OUTPUT(msg)
def check_message(keywords, message):
"""Checks an exception for given keywords and raises a new ``ActionError``
with the desired message if the keywords are found. This allows selective
control over API error messages.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
if set(str(exc_value).split(" ")).issuperset(set(keywords)):
exc_value._safe_message = message
raise
def handle(request, message=None, redirect=None, ignore=False,
escalate=False, log_level=None, force_log=None):
"""Centralized error handling for Horizon.
Because Horizon consumes so many different APIs with completely
different ``Exception`` types, it's necessary to have a centralized
place for handling exceptions which may be raised.
Exceptions are roughly divided into 3 types:
#. ``UNAUTHORIZED``: Errors resulting from authentication or authorization
problems. These result in being logged out and sent to the login screen.
#. ``NOT_FOUND``: Errors resulting from objects which could not be
located via the API. These generally result in a user-facing error
message, but are otherwise returned to the normal code flow. Optionally
a redirect value may be passed to the error handler so users are
returned to a different view than the one requested in addition to the
error message.
#. RECOVERABLE: Generic API errors which generate a user-facing message
but drop directly back to the regular code flow.
All other exceptions bubble the stack as normal unless the ``ignore``
argument is passed in as ``True``, in which case only unrecognized
errors are bubbled.
If the exception is not re-raised, an appropriate wrapper exception
class indicating the type of exception that was encountered will be
returned.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
log_method = getattr(LOG, log_level or "exception")
force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False)
force_silence = getattr(exc_value, "silence_logging", False)
# Because the same exception may travel through this method more than
# once (if it's re-raised) we may want to treat it differently
# the second time (e.g. no user messages/logging).
handled = issubclass(exc_type, HandledException)
wrap = False
# Restore our original exception information, but re-wrap it at the end
if handled:
exc_type, exc_value, exc_traceback = exc_value.wrapped
wrap = True
log_entry = encoding.force_text(exc_value)
# We trust messages from our own exceptions
if issubclass(exc_type, HorizonException):
message = exc_value
# Check for an override message
elif getattr(exc_value, "_safe_message", None):
message = exc_value._safe_message
# If the message has a placeholder for the exception, fill it in
elif message and "%(exc)s" in message:
message = encoding.force_text(message) % {"exc": log_entry}
if message:
message = encoding.force_text(message)
if issubclass(exc_type, UNAUTHORIZED):
if ignore:
return NotAuthorized
if not force_silence and not handled:
log_method(error_color("Unauthorized: %s" % log_entry))
if not handled:
if message:
message = _("Unauthorized: %s") % message
# We get some pretty useless error messages back from
# some clients, so let's define our own fallback.
fallback = _("Unauthorized. Please try logging in again.")
messages.error(request, message or fallback)
# Escalation means logging the user out and raising NotAuthorized
# so the middleware will redirect them appropriately.
if escalate:
# Prevents creation of circular import. django.contrib.auth
# requires openstack_dashboard.settings to be loaded (by trying to
# access settings.CACHES in in django.core.caches) while
# openstack_dashboard.settings requires django.contrib.auth to be
# loaded while importing openstack_auth.utils
from django.contrib.auth import logout # noqa
logout(request)
raise NotAuthorized
# Otherwise continue and present our "unauthorized" error message.
return NotAuthorized
if issubclass(exc_type, NOT_FOUND):
wrap = True
if not force_silence and not handled and (not ignore or force_log):
log_method(error_color("Not Found: %s" % log_entry))
if not ignore and not handled:
messages.error(request, message or log_entry)
if redirect:
raise Http302(redirect)
if not escalate:
return NotFound # return to normal code flow
if issubclass(exc_type, RECOVERABLE):
wrap = True
if not force_silence and not handled and (not ignore or force_log):
# Default recoverable error to WARN log level
log_method = getattr(LOG, log_level or "warning")
log_method(error_color("Recoverable error: %s" % log_entry))
if not ignore and not handled:
messages.error(request, message or log_entry)
if redirect:
raise Http302(redirect)
if not escalate:
return RecoverableError # return to normal code flow
# If we've gotten here, time to wrap and/or raise our exception.
if wrap:
raise HandledException([exc_type, exc_value, exc_traceback])
six.reraise(exc_type, exc_value, exc_traceback)
|
{
"content_hash": "e748f25463cc6a717061cf5bb0c43cfb",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 79,
"avg_line_length": 38.45,
"alnum_prop": 0.6568595578673602,
"repo_name": "394954369/horizon",
"id": "a282764c472484f609feaf31acffd724ded6692e",
"size": "12909",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "horizon/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from heapq import heappush, heappop
import unittest
class Heap:
def __init__(self, max_heap=False):
self.max_heap = max_heap
self._heap = []
def push(self, val):
if self.max_heap:
val *= -1
heappush(self._heap, val)
def pop(self):
ret = heappop(self._heap)
if self.max_heap:
ret *= -1
return ret
def __len__(self):
return len(self._heap)
def __getitem__(self, key):
ret = self._heap[key]
if self.max_heap:
ret *= -1
return ret
def __bool__(self):
return bool(self._heap)
def __iter__(self):
self._i = -1
return self
def __next__(self):
if self._i + 1 < len(self._heap):
self._i += 1
return self._heap[self._i]
else:
raise StopIteration
def __str__(self):
to_str = '['
for val in self._heap:
if self.max_heap:
val *= -1
to_str += ' ' + str(val)
to_str += ' ]'
return to_str
class RunningMedianContainer:
def __init__(self):
self.max_heap = Heap(max_heap=True)
self.min_heap = Heap()
def add_element(self, val):
max_heap = self.max_heap
min_heap = self.min_heap
if not min_heap or min_heap[0] < val:
min_heap.push(val)
else:
max_heap.push(val)
self._rebalance()
def _rebalance(self):
large_heap = self.min_heap if len(self.min_heap) > len(self.max_heap) else self.max_heap
small_heap = self.max_heap if len(self.max_heap) < len(self.min_heap) else self.min_heap
if len(large_heap) - len(small_heap) > 1:
small_heap.push(large_heap.pop())
def get_median(self):
large_heap = self.min_heap if len(self.min_heap) > len(self.max_heap) else self.max_heap
small_heap = self.max_heap if len(self.max_heap) < len(self.min_heap) else self.min_heap
if len(small_heap) == len(large_heap):
return (small_heap[0]+large_heap[0])/2
else:
return large_heap[0]
class MyTestCases(unittest.TestCase):
def test_running_median(self):
medianContainer = RunningMedianContainer()
medianContainer.add_element(1)
self.assertEqual(1, medianContainer.get_median())
medianContainer.add_element(2)
self.assertEqual(1.5, medianContainer.get_median())
medianContainer.add_element(3)
self.assertEqual(2, medianContainer.get_median())
medianContainer.add_element(4)
self.assertEqual(2.5, medianContainer.get_median())
if __name__ == '__main__':
n = int(input().strip())
a = []
runningMedianContainer = RunningMedianContainer()
for _ in range(n):
runningMedianContainer.add_element(int(input().strip()))
print(runningMedianContainer.get_median())
|
{
"content_hash": "d7cd79fa109030dcbdf3470d9d93a35a",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 96,
"avg_line_length": 28.21153846153846,
"alnum_prop": 0.5589638718473074,
"repo_name": "MFry/pyAlgoDataStructures",
"id": "497fe12ab3ae81e490843ec99215cb8634ea7cd2",
"size": "3065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hacker_rank/Cracking the coding interview challenge/heaps_find_the_running_median.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "243997"
}
],
"symlink_target": ""
}
|
"""
warning for 'import queue' in 2.7 from the future
Problem appears to be that pyinstaller cannot have two modules of the same
name that differ only by lower/upper case. The from the future 'queue' simply
imports all of the 'Queue' module. So by my reading, since 'queue' and 'Queue'
can not coexist in a frozen app, and since 'queue' requires 'Queue', there is
no way to use 'queue' in a frozen 2.7 app.
"""
from PyInstaller.compat import is_py2
from PyInstaller.utils.hooks import logger
def pre_find_module_path(api):
if not is_py2:
return
# maybe the 'import queue' was not really needed, so just make sure it
# is not found, otherwise it will crowd out the potential future
# import of 'Queue'
api.search_dirs = []
logger.warning("import queue (lowercase), not supported")
|
{
"content_hash": "49aa4b29f60cea1df74a497e29cc3659",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 35.608695652173914,
"alnum_prop": 0.717948717948718,
"repo_name": "ivandeex/dz",
"id": "240c4e1533df521ec1eb1d27fb033ee6ef0bf107",
"size": "1230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inst/hooks/pre_find_module_path/hook-queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "216611"
},
{
"name": "HTML",
"bytes": "34476"
},
{
"name": "JavaScript",
"bytes": "10185"
},
{
"name": "PowerShell",
"bytes": "305"
},
{
"name": "Python",
"bytes": "174484"
},
{
"name": "Shell",
"bytes": "2755"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
# Author: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import os
import os.path as op
import warnings
from nose.tools import assert_true, assert_raises, assert_equal
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from scipy import stats
from itertools import product
from mne import io, Epochs, read_events, pick_types
from mne.cov import read_cov
from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
read_ica, run_ica)
from mne.preprocessing.ica import get_score_funcs
from mne.io.meas_info import Info
from mne.utils import (set_log_file, _TempDir, requires_sklearn, slow_test,
run_tests_if_main)
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
evoked_nf_name = op.join(data_dir, 'test-nf-ave.fif')
test_cov_name = op.join(data_dir, 'test-cov.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 6
score_funcs_unsuited = ['pointbiserialr', 'ansari']
try:
from sklearn.utils.validation import NonBLASDotWarning
warnings.simplefilter('error', NonBLASDotWarning)
except:
pass
@requires_sklearn
def test_ica_full_data_recovery():
"""Test recovery of full data when no source is rejected"""
# Most basic recovery
raw = io.Raw(raw_fname).crop(0.5, stop, False)
raw.preload_data()
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
evoked = epochs.average()
n_channels = 5
data = raw._data[:n_channels].copy()
data_epochs = epochs.get_data()
data_evoked = evoked.data
for method in ['fastica']:
stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
for n_components, n_pca_components, ok in stuff:
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components,
method=method, max_iter=1)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=list(range(n_channels)))
raw2 = ica.apply(raw, exclude=[], copy=True)
if ok:
assert_allclose(data[:n_channels], raw2._data[:n_channels],
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
assert_true(np.max(diff) > 1e-14)
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=list(range(n_channels)))
epochs2 = ica.apply(epochs, exclude=[], copy=True)
data2 = epochs2.get_data()[:, :n_channels]
if ok:
assert_allclose(data_epochs[:, :n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data_epochs[:, :n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
evoked2 = ica.apply(evoked, exclude=[], copy=True)
data2 = evoked2.data[:n_channels]
if ok:
assert_allclose(data_evoked[:n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(evoked.data[:n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
assert_raises(ValueError, ICA, method='pizza-decomposision')
@requires_sklearn
def test_ica_rank_reduction():
"""Test recovery of full data when no source is rejected"""
# Most basic recovery
raw = io.Raw(raw_fname).crop(0.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
max_pca_components = len(picks)
for n_pca_components in [6, 10]:
with warnings.catch_warnings(record=True): # non-convergence
warnings.simplefilter('always')
ica = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
method='fastica', max_iter=1).fit(raw, picks=picks)
rank_before = raw.estimate_rank(picks=picks)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw, copy=True)
rank_after = raw_clean.estimate_rank(picks=picks)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert_true(n_components < n_pca_components <= rank_after <=
rank_before)
@requires_sklearn
def test_ica_reset():
"""Test ICA resetting"""
raw = io.Raw(raw_fname).crop(0.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
run_time_attrs = (
'_pre_whitener',
'unmixing_matrix_',
'mixing_matrix_',
'n_components_',
'n_samples_',
'pca_components_',
'pca_explained_variance_',
'pca_mean_'
)
with warnings.catch_warnings(record=True):
ica = ICA(
n_components=3, max_pca_components=3, n_pca_components=3,
method='fastica', max_iter=1).fit(raw, picks=picks)
assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
ica._reset()
assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
@requires_sklearn
def test_ica_core():
"""Test ICA on raw and epochs"""
raw = io.Raw(raw_fname).crop(1.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
# XXX. The None cases helped revealing bugs but are time consuming.
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
noise_cov = [None, test_cov]
# removed None cases to speed up...
n_components = [2, 1.0] # for future dbg add cases
max_pca_components = [3]
picks_ = [picks]
methods = ['fastica']
iter_ica_params = product(noise_cov, n_components, max_pca_components,
picks_, methods)
# # test init catchers
assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)
# test essential core functionality
for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
# Test ICA raw
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0, method=method, max_iter=1)
assert_raises(ValueError, ica.__contains__, 'mag')
print(ica) # to test repr
# test fit checker
assert_raises(RuntimeError, ica.get_sources, raw)
assert_raises(RuntimeError, ica.get_sources, epochs)
# test decomposition
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
repr(ica) # to test repr
assert_true('mag' in ica) # should now work without error
# test re-fit
unmixing1 = ica.unmixing_matrix_
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
sources = ica.get_sources(raw)[:, :][0]
assert_true(sources.shape[0] == ica.n_components_)
# test preload filter
raw3 = raw.copy()
raw3.preload = False
assert_raises(ValueError, ica.apply, raw3,
include=[1, 2])
#######################################################################
# test epochs decomposition
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=picks)
data = epochs.get_data()[:, 0, :]
n_samples = np.prod(data.shape)
assert_equal(ica.n_samples_, n_samples)
print(ica) # to test repr
sources = ica.get_sources(epochs).get_data()
assert_true(sources.shape[1] == ica.n_components_)
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# test preload filter
epochs3 = epochs.copy()
epochs3.preload = False
assert_raises(ValueError, ica.apply, epochs3,
include=[1, 2])
# test for bug with whitener updating
_pre_whitener = ica._pre_whitener.copy()
epochs._data[:, 0, 10:15] *= 1e12
ica.apply(epochs, copy=True)
assert_array_equal(_pre_whitener, ica._pre_whitener)
# test expl. var threshold leading to empty sel
ica.n_components = 0.1
assert_raises(RuntimeError, ica.fit, epochs)
offender = 1, 2, 3,
assert_raises(ValueError, ica.get_sources, offender)
assert_raises(ValueError, ica.fit, offender)
assert_raises(ValueError, ica.apply, offender)
@slow_test
@requires_sklearn
def test_ica_additional():
"""Test additional ICA functionality"""
tempdir = _TempDir()
stop2 = 500
raw = io.Raw(raw_fname).crop(1.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
# test if n_components=None works
with warnings.catch_warnings(record=True):
ica = ICA(n_components=None,
max_pca_components=None,
n_pca_components=None, random_state=0)
ica.fit(epochs, picks=picks, decim=3)
# for testing eog functionality
picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=True, exclude='bads')
epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
baseline=(None, 0), preload=True)
test_cov2 = deepcopy(test_cov)
ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
n_pca_components=4)
assert_true(ica.info is None)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5])
assert_true(isinstance(ica.info, Info))
assert_true(ica.n_components_ < 5)
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
assert_raises(RuntimeError, ica.save, '')
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, start=start, stop=stop2)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
ica.save(ica_badname)
read_ica(ica_badname)
assert_true(len(w) == 2)
# test decim
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
raw_ = raw.copy()
for _ in range(3):
raw_.append(raw_)
n_samples = raw_._data.shape[1]
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(raw_._data.shape[1], n_samples)
# test expl var
ica = ICA(n_components=1.0, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(ica.n_components_ == 4)
# epochs extraction from raw fit
assert_raises(RuntimeError, ica.get_sources, epochs)
# test reading and writing
test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
for cov in (None, test_cov):
ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True): # ICA does not converge
ica.fit(raw, picks=picks, start=start, stop=stop2)
sources = ica.get_sources(epochs).get_data()
assert_true(ica.mixing_matrix_.shape == (2, 2))
assert_true(ica.unmixing_matrix_.shape == (2, 2))
assert_true(ica.pca_components_.shape == (4, len(picks)))
assert_true(sources.shape[1] == ica.n_components_)
for exclude in [[], [0]]:
ica.exclude = [0]
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.exclude == ica_read.exclude)
ica.exclude = []
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [])
ica.exclude = [0, 1]
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [0, 1])
ica_raw = ica.get_sources(raw)
assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
ica_raw.info['bads']])
# test filtering
d1 = ica_raw._data[0].copy()
with warnings.catch_warnings(record=True): # dB warning
ica_raw.filter(4, 20)
assert_true((d1 != ica_raw._data[0]).any())
d1 = ica_raw._data[0].copy()
with warnings.catch_warnings(record=True): # dB warning
ica_raw.notch_filter([10])
assert_true((d1 != ica_raw._data[0]).any())
ica.n_pca_components = 2
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.n_pca_components == ica_read.n_pca_components)
# check type consistency
attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
'pca_explained_variance_ _pre_whitener')
def f(x, y):
return getattr(x, y).dtype
for attr in attrs.split():
assert_equal(f(ica_read, attr), f(ica, attr))
ica.n_pca_components = 4
ica_read.n_pca_components = 4
ica.exclude = []
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
'pca_mean_', 'pca_explained_variance_',
'_pre_whitener']:
assert_array_almost_equal(getattr(ica, attr),
getattr(ica_read, attr))
assert_true(ica.ch_names == ica_read.ch_names)
assert_true(isinstance(ica_read.info, Info))
sources = ica.get_sources(raw)[:, :][0]
sources2 = ica_read.get_sources(raw)[:, :][0]
assert_array_almost_equal(sources, sources2)
_raw1 = ica.apply(raw, exclude=[1])
_raw2 = ica_read.apply(raw, exclude=[1])
assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
os.remove(test_ica_fname)
# check scrore funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(raw, target='EOG 061', score_func=func,
start=0, stop=10)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(raw, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, raw,
target=np.arange(1))
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx params
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
eog_ch=ch_name, skew_criterion=idx,
var_criterion=idx, kurt_criterion=idx)
with warnings.catch_warnings(record=True):
idx, scores = ica.find_bads_ecg(raw, method='ctps')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(raw, method='correlation')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(epochs, method='ctps')
assert_equal(len(scores), ica.n_components_)
assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
method='ctps')
assert_raises(ValueError, ica.find_bads_ecg, raw,
method='crazy-coupling')
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
idx, scores = ica.find_bads_eog(raw)
assert_true(isinstance(scores, list))
assert_equal(len(scores[0]), ica.n_components_)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(epochs_eog, target='EOG 061',
score_func=func)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(epochs, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# ecg functionality
ecg_scores = ica.score_sources(raw, target='MEG 1531',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
ecg_events = ica_find_ecg_events(raw,
sources[np.abs(ecg_scores).argmax()])
assert_true(ecg_events.ndim == 2)
# eog functionality
eog_scores = ica.score_sources(raw, target='EOG 061',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
eog_events = ica_find_eog_events(raw,
sources[np.abs(eog_scores).argmax()])
assert_true(eog_events.ndim == 2)
# Test ica fiff export
ica_raw = ica.get_sources(raw, start=0, stop=100)
assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
assert_true(len(ica_raw._filenames) == 0) # API consistency
ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
ica.n_components = np.int32(ica.n_components)
ica_raw.save(test_ica_fname, overwrite=True)
ica_raw2 = io.Raw(test_ica_fname, preload=True)
assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
ica_raw2.close()
os.remove(test_ica_fname)
# Test ica epochs export
ica_epochs = ica.get_sources(epochs)
assert_true(ica_epochs.events.shape == epochs.events.shape)
ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
assert_true(ica_epochs._raw is None)
assert_true(ica_epochs.preload is True)
# test float n pca components
ica.pca_explained_variance_ = np.array([0.2] * 5)
ica.n_components_ = 0
for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
ncomps_ = ica._check_n_pca_components(ncomps)
assert_true(ncomps_ == expected)
@requires_sklearn
def test_run_ica():
"""Test run_ica function"""
raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
warnings.simplefilter('always')
with warnings.catch_warnings(record=True):
run_ica(raw, n_components=2, start=0, stop=6, start_find=0,
stop_find=5, ecg_ch=ch_name, eog_ch=ch_name,
skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
@requires_sklearn
def test_ica_reject_buffer():
"""Test ICA data raw buffer rejection"""
tempdir = _TempDir()
raw = io.Raw(raw_fname).crop(1.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
raw._data[2, 1000:1005] = 5e-12
drop_log = op.join(op.dirname(tempdir), 'ica_drop.log')
set_log_file(drop_log, overwrite=True)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
tstep=0.01, verbose=True)
assert_true(raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_)
with open(drop_log) as fid:
log = [l for l in fid if 'detected' in l]
assert_equal(len(log), 1)
@requires_sklearn
def test_ica_twice():
"""Test running ICA twice"""
raw = io.Raw(raw_fname).crop(1.5, stop, False)
raw.preload_data()
picks = pick_types(raw.info, meg='grad', exclude='bads')
n_components = 0.9
max_pca_components = None
n_pca_components = 1.1
with warnings.catch_warnings(record=True):
ica1 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components, random_state=0)
ica1.fit(raw, picks=picks, decim=3)
raw_new = ica1.apply(raw, n_pca_components=n_pca_components)
ica2 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=1.0, random_state=0)
ica2.fit(raw_new, picks=picks, decim=3)
assert_equal(ica1.n_components_, ica2.n_components_)
run_tests_if_main()
|
{
"content_hash": "a4617c6bf2c559594aec68f9efed29e5",
"timestamp": "",
"source": "github",
"line_count": 579,
"max_line_length": 79,
"avg_line_length": 39.9153713298791,
"alnum_prop": 0.5915797672104193,
"repo_name": "lorenzo-desantis/mne-python",
"id": "36664fd92c5ddc67c126066ed01d4eb4b901486a",
"size": "23111",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mne/preprocessing/tests/test_ica.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4322690"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
import unittest
from pychbase._pychbase import _connection, _table
from pychbase import Connection, Table, Batch
from StringIO import StringIO
from config import ZOOKEEPERS, TABLE_NAME
from datetime import datetime
class TestCConnection(unittest.TestCase):
def test_bad_cldbs(self):
# Cloudera struggles with this test
connection = _connection('abc')
self.assertFalse(connection.is_open())
self.assertRaises(ValueError, connection.open)
self.assertFalse(connection.is_open())
connection.close()
def test_good_cldbs(self):
connection = _connection(ZOOKEEPERS)
self.assertFalse(connection.is_open())
connection.open()
self.assertTrue(connection.is_open())
connection.close()
self.assertFalse(connection.is_open())
connection.close()
def test_enable_table(self):
connection = _connection(ZOOKEEPERS)
self.assertRaises(ValueError, connection.is_table_enabled, TABLE_NAME)
self.assertRaises(ValueError, connection.enable_table, TABLE_NAME)
self.assertRaises(ValueError, connection.disable_table, TABLE_NAME)
connection.create_table(TABLE_NAME, {'f': {}})
self.assertEquals(connection.is_table_enabled(TABLE_NAME), True)
connection.disable_table(TABLE_NAME)
self.assertEquals(connection.is_table_enabled(TABLE_NAME), False)
# This wont throw an error in MapR, maybe it would in Cloudera
connection.disable_table(TABLE_NAME)
connection.enable_table(TABLE_NAME)
self.assertEquals(connection.is_table_enabled(TABLE_NAME), True)
connection.delete_table(TABLE_NAME)
connection.close()
class TestCConnectionManageTable(unittest.TestCase):
def setUp(self):
connection = _connection(ZOOKEEPERS)
try:
connection.delete_table(TABLE_NAME)
except ValueError:
pass
connection.close()
def tearDown(self):
connection = _connection(ZOOKEEPERS)
try:
connection.delete_table(TABLE_NAME)
except ValueError:
pass
connection.close()
def test_good(self):
connection = _connection(ZOOKEEPERS)
connection.create_table(TABLE_NAME, {'f': {}})
connection.delete_table(TABLE_NAME)
def test_already_created(self):
connection = _connection(ZOOKEEPERS)
connection.create_table(TABLE_NAME, {'f': {}})
self.assertRaises(ValueError, connection.create_table, TABLE_NAME, {'f': {}})
connection.delete_table(TABLE_NAME)
def test_already_deleted(self):
connection = _connection(ZOOKEEPERS)
connection.create_table(TABLE_NAME, {'f': {}})
connection.delete_table(TABLE_NAME)
self.assertRaises(ValueError, connection.delete_table, TABLE_NAME)
#
def test_large_qualifier(self):
connection = _connection(ZOOKEEPERS)
connection.create_table(TABLE_NAME, {''.join(['a' for _ in range(1000)]): {}})
connection.delete_table(TABLE_NAME)
def test_too_large_qualifier(self):
connection = _connection(ZOOKEEPERS)
self.assertRaises(ValueError, connection.create_table, TABLE_NAME, {''.join(['a' for _ in range(10000)]): {}})
# Verify that table was not fake-created mapr bug
self.assertRaises(ValueError, connection.delete_table, TABLE_NAME)
def test_really_big_table_name(self):
## I think MapR C API seg faults with a tablename > 10000
connection = _connection(ZOOKEEPERS)
self.assertRaises(ValueError, connection.create_table, TABLE_NAME + ''.join(['a' for _ in range(10000)]), {'f': {}})
self.assertRaises(ValueError, connection.delete_table, TABLE_NAME + ''.join(['a' for _ in range(10000)]))
#
def test_pretty_big_table_name(self):
## I think MapR C API does not seg faults with a tablename ~ 1000
connection = _connection(ZOOKEEPERS)
self.assertRaises(ValueError, connection.create_table, TABLE_NAME + ''.join(['a' for _ in range(1000)]), {'f': {}})
self.assertRaises(ValueError, connection.delete_table, TABLE_NAME + ''.join(['a' for _ in range(1000)]))
def test_delete_really_big_table_name(self):
connection = _connection(ZOOKEEPERS)
self.assertRaises(ValueError, connection.delete_table, TABLE_NAME + ''.join(['a' for _ in range(10000)]))
def test_delete_pretty_big_table_name(self):
connection = _connection(ZOOKEEPERS)
self.assertRaises(ValueError, connection.delete_table, TABLE_NAME + ''.join(['a' for _ in range(1000)]))
def test_max_versions_happy(self):
connection = _connection(ZOOKEEPERS)
connection.create_table(TABLE_NAME, {'f': {
'max_versions': 1,
'min_versions': 1,
'time_to_live': 0,
'in_memory': 0,
}})
connection.delete_table(TABLE_NAME)
def test_attrs_bad_key(self):
connection = _connection(ZOOKEEPERS)
cfs = {'f': {
'max_versions': 1,
'min_versions': 1,
'time_to_live': 0,
'not a valid key': 0,
}}
self.assertRaises(ValueError, connection.create_table, TABLE_NAME, cfs)
def test_attrs_bad_value(self):
connection = _connection(ZOOKEEPERS)
cfs = {'f': {
'max_versions': 1,
'min_versions': 1,
'time_to_live': 0,
'in_memory': 'not an int',
}}
self.assertRaises(TypeError, connection.create_table, TABLE_NAME, cfs)
def test_bad_key(self):
connection = _connection(ZOOKEEPERS)
cfs = {1: {
'max_versions': 1,
'min_versions': 1,
'time_to_live': 0,
'in_memory': 0,
}}
self.assertRaises(TypeError, connection.create_table, TABLE_NAME, cfs)
def test_bad_value(self):
connection = _connection(ZOOKEEPERS)
cfs = {'f': "not a dict"}
self.assertRaises(TypeError, connection.create_table, TABLE_NAME, cfs)
def test_accept_unicode(self):
connection = _connection(ZOOKEEPERS)
connection.create_table(TABLE_NAME, {u'f': {u'max_versions': 1}})
class TestCTableInit(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
try:
self.connection.delete_table(TABLE_NAME)
except ValueError:
pass
self.connection.create_table(TABLE_NAME, {'f': {}})
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME)
except ValueError:
pass
self.connection.close()
def test_bad_table(self):
self.connection.open()
self.connection.delete_table(TABLE_NAME)
self.assertRaises(ValueError, _table, self.connection, TABLE_NAME)
self.connection.close() # This segfaulted if I set self->connection before raising the exception for some reason
def test_unopened_connection(self):
self.connection = _connection(ZOOKEEPERS)
table = _table(self.connection, TABLE_NAME)
self.connection.close()
class TestCTableRow(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("foo", {"f:bar": "baz"})
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME)
except ValueError:
pass
self.connection.close()
def test_happy(self):
row = self.table.row('foo')
self.assertEquals(row, {'f:bar': "baz"})
def test_read_only_table_name(self):
self.assertRaises(TypeError, setattr, self.table, 'table_name', 'foo')
def test_timestamp_type(self):
row = self.table.row('foo', None, None)
row = self.table.row('foo', None, 1)
self.assertRaises(TypeError, self.table.row, 'foo', None, 'invalid')
def test_include_timestamp_type(self):
row = self.table.row('foo', None, None, None)
row = self.table.row('foo', None, None, True)
row = self.table.row('foo', None, None, False)
self.assertRaises(TypeError, self.table.row, 'foo', None, None, 'invalid')
class TestCTableRowColumns(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME)
except ValueError:
pass
self.connection.close()
def test_happy(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
row = self.table.row('foo')
self.assertEquals(row, {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
row = self.table.row('foo', ('f',))
self.assertEquals(row, {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
row = self.table.row('foo', ('f:',))
self.assertEquals(row, {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
row = self.table.row('foo', ('f:a',))
self.assertEquals(row, {"f:a": "foo"})
row = self.table.row('foo', ('f:a', 'f:ab'))
self.assertEquals(row, {"f:a": "foo", 'f:ab': 'bar'})
row = self.table.row('foo', ('f:a', 'f:ab', 'f:abc'))
self.assertEquals(row, {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
row = self.table.row('foo', ('f:nope',))
self.assertEquals(row, {})
# Hm, should I return an empty string if 'f:nope' doesn't exist?
row = self.table.row('foo', ('f:a', 'f:nope'))
self.assertEquals(row, {"f:a": "foo"})
def test_type(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.assertRaises(TypeError, self.table.row, 'foo', 'bar')
self.assertRaises(TypeError, self.table.row, 'foo', {'set', 'should', 'fail'})
self.assertRaises(TypeError, self.table.row, 'foo', {'dict': 'should', 'also': 'fail'})
def test_bad_column_family(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.assertRaises(ValueError, self.table.row, 'foo', ('b',))
class TestCTablePut(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
def tearDown(self):
self.connection.delete_table(TABLE_NAME)
self.connection.close()
def test_happy(self):
self.table.put("foo", {"f:bar": "baz"})
row = self.table.row('foo')
for _ in range(100):
# Loop to check for buffer overflow error
self.assertEquals(row, {'f:bar': "baz"})
def test_invalid_key(self):
self.assertRaises(TypeError, self.table.put, "foo", {10: "baz"})
def test_invalid_value(self):
self.assertRaises(TypeError, self.table.put, "foo", {"bar": 10})
def test_empty_put(self):
self.assertRaises(ValueError, self.table.put, 'foo', {})
def test_bad_column_family_no_colon(self):
#All keys in the put dict must contain a colon separating the family from the qualifier
self.assertRaises(ValueError, self.table.put, 'foo', {'bar': 'baz'})
def test_bad_colon_no_family(self):
self.assertRaises(ValueError, self.table.put, 'foo', {":bar": "baz", 'invalid:foo': 'bar'})
row = self.table.row('foo')
self.assertEquals(row, {})
def test_bad_colon_no_qualifier(self):
# LOL Apparently this is totaly fine
self.table.put('foo', {"f:": "baz"})
row = self.table.row('foo')
self.assertEquals(row, {"f:": "baz"})
def test_invalid_column_family(self):
self.assertRaises(ValueError, self.table.put, 'foo', {"f:bar": "baz", 'invalid:foo': 'bar'})
row = self.table.row('foo')
self.assertEquals(row, {})
def test_set(self):
self.assertRaises(TypeError, self.table.put, 'foo', {"f:bar", "baz"})
row = self.table.row('foo')
self.assertEquals(row, {})
def test_empty_value(self):
self.table.put("foo", {"f:bar": ""})
row = self.table.row('foo')
self.assertEquals(row, {'f:bar': ""})
def test_unicode(self):
self.table.put(u"foo", {u"f:bar": u"baz"})
row = self.table.row('foo')
self.assertEquals(row, {'f:bar': "baz"})
def test_big_value(self):
## Greater than 1024
#raise NotImplementedError
self.table.put('foo', {'f:bar': ''.join(['a' for _ in range(10000)])})
row = self.table.row('foo')
self.assertEquals(row, {'f:bar': ''.join(['a' for _ in range(10000)])})
def test_big_qualifier(self):
## Greater than 1024
self.table.put('foo', {'f:' + ''.join(['a' for _ in range(10000)]): 'baz'})
row = self.table.row('foo')
self.assertEquals(row, {'f:' + ''.join(['a' for _ in range(10000)]): 'baz'})
def test_big_row_key(self):
## Greater than 1024
self.table.put(''.join(['a' for _ in range(10000)]), {'f:bar': 'baz'})
row = self.table.row(''.join(['a' for _ in range(10000)]))
self.assertEquals(row, {'f:bar': 'baz'})
def test_big_column_family(self):
self.connection.delete_table(TABLE_NAME)
self.connection.create_table(TABLE_NAME, {''.join(['a' for _ in range(1000)]): {}})
self.table.put('foo', {''.join(['a' for _ in range(1000)]) + ':bar': 'baz'})
row = self.table.row('foo')
self.assertEquals(row, {''.join(['a' for _ in range(1000)]) + ':bar': 'baz'})
def test_type_timestamp(self):
self.table.put("foo", {'f:foo': 'bar'}, None)
self.assertEquals(self.table.row("foo"), {'f:foo': 'bar'})
def test_type_timestamp_1(self):
self.table.put("foo", {'f:foo': 'bar'}, 10)
self.assertEquals(self.table.row("foo"), {'f:foo': 'bar'})
def test_type_timestamp_2(self):
self.assertRaises(TypeError, self.table.put, 'foo', {'f:foo': 'bar'}, 'invalid')
def test_type_is_wall(self):
self.table.put("foo", {"f:foo": "bar"}, None, None)
self.table.put("foo", {"f:foo": "bar"}, None, True)
self.table.put("foo", {"f:foo": "bar"}, None, False)
self.assertRaises(TypeError, self.table.put, "foo", {"f:foo": "bar"}, None, 'invalid')
class TestCTableTimestamp(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
try:
self.connection.create_table(TABLE_NAME, {'f': {}})
except ValueError:
pass
self.table = _table(self.connection, TABLE_NAME)
def tearDown(self):
self.connection.delete_table(TABLE_NAME)
self.connection.close()
def test_happy(self):
self.table.put('foo', {'f:foo': 'bar'}, 10)
row = self.table.row('foo', None, None, True)
self.assertEquals(row, {'f:foo': ('bar', 10)})
def test_happy_version(self):
self.table.put('foo', {'f:foo': 'bar'}, 10)
row = self.table.row('foo', None, 10, True)
self.assertEquals(row, {'f:foo': ('bar', 10)})
row = self.table.row('foo', None, 11, True)
self.assertEquals(row, {'f:foo': ('bar', 10)})
row = self.table.row('foo', None, 9, True)
self.assertEquals(row, {})
def test_happy_update(self):
self.table.put('foo', {'f:foo': 'bar'}, 5)
self.table.put('foo', {'f:foo': 'bar'}, 10)
row = self.table.row('foo', None, None, True)
self.assertEquals(row, {'f:foo': ('bar', 10)})
row = self.table.row('foo', None, 5, True)
self.assertEquals(row, {'f:foo': ('bar', 5)})
def test_row_version_too_low(self):
self.table.put('foo', {'f:foo': 'bar'}, 10)
row = self.table.row('foo', None, 5)
self.assertEquals(row, {})
def test_happy_delete(self):
self.table.put('foo', {'f:foo': 'bar'}, 10)
self.table.delete('foo', None, 10)
row = self.table.row('foo')
self.assertEquals(row, {})
def test_happy_delete_update_1(self):
self.table.put('foo', {'f:foo': 'foo'}, 5)
self.table.put('foo', {'f:foo': 'bar'}, 10)
# This deletes everything up to the timestamp
self.table.delete('foo', None, 10)
row = self.table.row('foo', None, None, True)
self.assertEquals(row, {})
row = self.table.row('foo', None, 5)
self.assertEquals(row, {})
def test_happy_delete_update_2(self):
self.table.put('foo', {'f:foo': 'foo'}, 5)
self.table.put('foo', {'f:foo': 'bar'}, 10)
self.table.delete('foo', None, 5)
row = self.table.row('foo', None, None, True)
self.assertEquals(row, {'f:foo': ('bar', 10)})
row = self.table.row('foo', None, 5, True)
self.assertEquals(row, {})
def test_happy_delete_update_3(self):
self.table.put('foo', {'f:foo': 'foo'}, 5)
self.table.put('foo', {'f:foo': 'bar'}, 10)
self.table.put('foo', {'f:foo': 'baz'}, 15)
self.table.delete('foo', None, 10)
row = self.table.row('foo', None, 5, True)
self.assertEquals(row, {})
row = self.table.row('foo', None, 10, True)
self.assertEquals(row, {})
row = self.table.row('foo', None, None, True)
self.assertEquals(row, {'f:foo': ('baz', 15)})
def test_scan_happy(self):
for i in range(0, 10):
self.table.put('foo%i' % i, {'f:foo': 'foo'}, 5)
self.table.put('foo%i' % i, {'f:foo': 'bar'}, 10)
for row, data in self.table.scan('', '', None, None, 5):
self.assertEquals(data, {'f:foo': 'foo'})
for row, data in self.table.scan('', '', None, None, 10):
self.assertEquals(data, {'f:foo': 'bar'})
def test_scan_to_low(self):
for i in range(0, 10):
self.table.put('foo%i' % i, {'f:foo': 'foo'}, 5)
i = 0
for row, data in self.table.scan('', '', None, None, 4):
i += 1
self.assertEquals(i, 0)
def test_batch_put_happy(self):
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 5, None) for i in range(10)])
i = 0
for row, data in self.table.scan('','', None, None, None, True):
self.assertEquals(row, 'foo%i' % i)
self.assertEquals(data, {'f:foo': ('foo%i' % i, 5)})
i += 1
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 10, None) for i in range(10)])
i = 0
for row, data in self.table.scan('', '', None, None, None, True):
self.assertEquals(row, 'foo%i' % i)
self.assertEquals(data, {'f:foo': ('foo%i' % i, 10)})
i += 1
def test_batch_delete_happy(self):
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 5, None) for i in range(10)])
self.table.batch([('delete', 'foo%i' % i, None, 5, None) for i in range(10)])
i = 0
for row, data in self.table.scan():
i += 1
self.assertEquals(i, 0)
def test_batch_delete_happy_1(self):
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 5, None) for i in range(10)])
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 10, None) for i in range(10)])
self.table.batch([('delete', 'foo%i' % i, None, 5, None) for i in range(10)])
i = 0
for row, data in self.table.scan('', '', None, None, 5, True):
i += 1
self.assertEquals(i, 0)
for row, data in self.table.scan('', '', None, None, 10, True):
i += 1
self.assertEquals(i, 10)
def test_batch_delete_happy_2(self):
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 5, None) for i in range(10)])
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 10, None) for i in range(10)])
self.table.batch([('delete', 'foo%i' % i, None, 10, None) for i in range(10)])
i = 0
for row, data in self.table.scan('', '', None, None, 10, True):
i += 1
self.assertEquals(i, 0)
for row, data in self.table.scan('', '', None, None, 5, True):
i += 1
self.assertEquals(i, 0)
def test_batch_delete_happy_3(self):
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 5, None) for i in range(10)])
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 10, None) for i in range(10)])
self.table.batch([('put', 'foo%i' % i, {'f:foo': 'foo%i' % i}, 15, None) for i in range(10)])
self.table.batch([('delete', 'foo%i' % i, None, 10, None) for i in range(10)])
i = 0
for row, data in self.table.scan('', '', None, None, 5, True):
i += 1
self.assertEquals(i, 0)
i = 0
for row, data in self.table.scan('', '', None, None, 10, True):
i += 1
self.assertEquals(i, 0)
for row, data in self.table.scan('', '', None, None, 15, True):
i += 1
self.assertEquals(i, 10)
def test_negative_timestamp(self):
self.assertRaises(ValueError, self.table.put, 'foo', {'f:foo': 'bar'}, -1)
class TestCTablePutNull(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
def tearDown(self):
self.connection.delete_table(TABLE_NAME)
self.connection.close()
def test_input_with_null_as_final(self):
self.table.put("foo", {"f:bar\\0": "baz\\0"})
row = self.table.row('foo')
self.assertEquals(row, {"f:bar\\0": "baz\\0"})
self.table.put("bar", {"f:bar\0": "baz\0"})
row = self.table.row('bar')
self.assertEquals(row, {"f:bar": "baz"})
def test_input_with_null_in_middle(self):
self.table.put("foo", {"f:bar\\0baz": "baz\\0foo"})
row = self.table.row('foo')
self.assertEquals(row, {"f:bar\\0baz": "baz\\0foo"})
self.table.put("bar", {"f:bar\0baz": "baz\0foo"})
row = self.table.row('bar')
self.assertEquals(row, {"f:bar": "baz"})
def test_input_with_null_as_final_rowkey(self):
self.table.put("bar\\0", {"f:bar\\0baz": "baz\\0foo"})
row = self.table.row('bar\\0')
self.assertEquals(row, {"f:bar\\0baz": "baz\\0foo"})
self.assertRaises(TypeError, self.table.put, "bar\0", {"f:foo": "bar"})
def test_input_with_null_in_middle_rowkey(self):
self.table.put("bar\\0foo", {"f:bar\\0baz": "baz\\0foo"})
row = self.table.row('bar\\0foo')
self.assertEquals(row, {"f:bar\\0baz": "baz\\0foo"})
self.assertRaises(TypeError, self.table.put, "bar\0foo", {"f:foo": "bar"})
def test_input_with_xnull_as_final(self):
self.table.put("foo", {"f:bar\\x00": "baz\\x00"})
row = self.table.row('foo')
self.assertEquals(row, {"f:bar\\x00": "baz\\x00"})
self.table.put("bar", {"f:bar\x00": "baz\x00"})
row = self.table.row('bar')
self.assertEquals(row, {"f:bar": "baz"})
def test_input_with_xnull_in_middle(self):
self.table.put("foo", {"f:bar\\x00baz": "baz\\x00foo"})
row = self.table.row('foo')
self.assertEquals(row, {"f:bar\\x00baz": "baz\\x00foo"})
self.table.put("bar", {"f:bar\x00baz": "baz\x00foo"})
row = self.table.row('bar')
self.assertEquals(row, {"f:bar": "baz"})
def test_input_with_xnull_rowkey(self):
self.table.put("foo\\x00", {"f:bar\\x00": "baz\\x00"})
row = self.table.row('foo\\x00')
self.assertEquals(row, {"f:bar\\x00": "baz\\x00"})
self.assertRaises(TypeError, self.table.put, "bar\x00", {"f:foo": "bar"})
class TestCTablePutSplit(unittest.TestCase):
#Purpose of this is to test the C split function
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME)
except ValueError:
pass
self.connection.close()
def test_first(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("a", {"f:{cq}".format(cq='f' * i): str(i) for i in range(1, 1000)})
row = self.table.row("a")
self.assertEquals(row, {"f:{cq}".format(cq='f' * i): str(i) for i in range(1, 1000)})
def test_second(self):
self.connection.create_table(TABLE_NAME, {'ff': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("a", {"ff:{cq}".format(cq='f' * i): str(i) for i in range(1, 1000)})
row = self.table.row("a")
self.assertEquals(row, {"ff:{cq}".format(cq='f' * i): str(i) for i in range(1, 1000)})
def test_third(self):
self.connection.create_table(TABLE_NAME, {'fff': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("a", {"fff:{cq}".format(cq='f' * i): str(i) for i in range(1, 1000)})
row = self.table.row("a")
self.assertEquals(row, {"fff:{cq}".format(cq='f' * i): str(i) for i in range(1, 1000)})
# START HERE
class TestCTableDelete(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
def tearDown(self):
self.connection.delete_table(TABLE_NAME)
self.connection.close()
def test_happy(self):
self.table.put("foo", {"f:bar": "baz"})
row = self.table.row('foo')
self.assertEquals(row, {'f:bar': "baz"})
self.table.delete('foo')
row = self.table.row('foo')
self.assertEquals(row, {})
for i in range(100):
self.table.delete('foo')
def test_type_rowkey(self):
self.assertRaises(TypeError, self.table.delete, 1)
self.assertRaises(TypeError, self.table.delete, {'foo': 'bar'})
def test_type_timestamp(self):
self.table.put("foo", {"f:bar": "baz"}, 10)
self.table.delete("foo", None, None)
self.assertEquals(self.table.row("foo"), {})
self.table.put("foo", {"f:bar": "baz"}, 10)
self.table.delete("foo", None, 10)
self.assertEquals(self.table.row("foo"), {})
self.table.put("foo", {"f:bar": "baz"}, 10)
self.assertRaises(TypeError, self.table.delete, "foo", None, "invalid")
def test_type_is_wall(self):
self.table.put("foo", {"f:bar": "baz"})
self.table.delete("foo", None, None, None)
self.assertEquals(self.table.row("foo"), {})
self.table.put("foo", {"f:bar": "baz"})
self.table.delete("foo", None, None, True)
self.assertEquals(self.table.row("foo"), {})
self.table.put("foo", {"f:bar": "baz"})
self.table.delete("foo", None, None, False)
self.assertEquals(self.table.row("foo"), {})
self.assertRaises(TypeError, self.table.delete, 'foo', None, None, 'invalid')
def test_empty_row_key(self):
self.assertRaises(ValueError, self.table.delete, '')
class TestCTableDeleteColumns(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME)
except ValueError:
pass
self.connection.close()
def test_happy(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
row = self.table.row('foo')
self.assertEquals(row, {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
self.table.delete('foo', ('f',))
row = self.table.row('foo')
self.assertEquals(row, {})
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
self.table.delete('foo', ('f:',))
row = self.table.row('foo')
self.assertEquals(row, {})
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
self.table.delete('foo', ('f:a',))
row = self.table.row('foo')
self.assertEquals(row, {'f:ab': 'bar', 'f:abc': 'baz'})
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
self.table.delete('foo', ('f:a', 'f:ab'))
row = self.table.row('foo')
self.assertEquals(row, {'f:abc': 'baz'})
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
self.table.delete('foo', ('f:a', 'f:ab', 'f:abc'))
row = self.table.row('foo')
self.assertEquals(row, {})
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
self.table.delete('foo', ('f:nope',))
row = self.table.row('foo')
self.assertEquals(row, {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'})
self.table.delete('foo', ('f:a', 'f:nope'))
row = self.table.row('foo')
self.assertEquals(row, {'f:ab': 'bar', 'f:abc': 'baz'})
def test_batch_delete_columns_happy(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
for i in range(10):
self.table.put('foo%i' % i, {'f:a': 'foo%i' % i, 'f:ab': 'bar%i' % i, 'f:abc': 'baz%i' % i})
self.table.batch([('delete', 'foo%i' % i, ('f',)) for i in range(10)])
i = 0
for row, data in self.table.scan():
i += 1
self.assertEquals(i, 0)
for i in range(10):
self.table.put('foo%i' % i, {'f:a': 'foo%i' % i, 'f:ab': 'bar%i' % i, 'f:abc': 'baz%i' % i})
self.table.batch([('delete', 'foo%i' % i, ('f:',)) for i in range(10)])
i = 0
for row, data in self.table.scan():
i += 1
self.assertEquals(i, 0)
for i in range(10):
self.table.put('foo%i' % i, {'f:a': 'foo%i' % i, 'f:ab': 'bar%i' % i, 'f:abc': 'baz%i' % i})
self.table.batch([('delete', 'foo%i' % i, ('f:a',)) for i in range(10)])
i = 0
for row, data in self.table.scan():
self.assertEquals(data, {'f:ab': 'bar%i' % i, 'f:abc': 'baz%i' % i})
i += 1
self.assertEquals(i, 10)
def test_batch_delete_columns_timestamp(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
for i in range(10):
self.table.put('foo%i' % i, {'f:a': 'foo%i' % i, 'f:ab': 'foo%i' % i, 'f:abc': 'foo%i' % i}, 5)
for i in range(10):
self.table.put('foo%i' % i, {'f:a': 'bar%i' % i, 'f:ab': 'bar%i' % i, 'f:abc': 'bar%i' % i}, 10)
for i in range(10):
self.table.put('foo%i' % i, {'f:a': 'baz%i' % i, 'f:ab': 'baz%i' % i, 'f:abc': 'baz%i' % i}, 15)
self.table.batch([('delete', 'foo%i' % i, ('f:a',), 10) for i in range(10)])
i = 0
for row, data in self.table.scan('', '', None, None, 10, True):
self.assertEquals(data, {'f:ab': ('bar%i' % i, 10), 'f:abc': ('bar%i' % i, 10)})
i += 1
self.assertEquals(i, 10)
i = 0
for row, data in self.table.scan('', '', None, None, 15, True):
self.assertEquals(data, {'f:a': ('baz%i' % i, 15), 'f:ab': ('baz%i' % i, 15), 'f:abc': ('baz%i' % i, 15)})
i += 1
self.assertEquals(i, 10)
def test_no_timestamp(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'}, 5)
self.table.put("foo", {"f:a": "FOO", 'f:ab': 'BAR', 'f:abc': 'BAZ'}, 10)
self.table.delete('foo', ('f',))
row = self.table.row('foo')
self.assertEquals(row, {})
row = self.table.row('foo', None, 10)
self.assertEquals(row, {})
row = self.table.row('foo', None, 5)
self.assertEquals(row, {})
def test_with_timestamp(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'}, 5)
self.table.put("foo", {"f:a": "FOO", 'f:ab': 'BAR', 'f:abc': 'BAZ'}, 10)
self.table.delete('foo', ('f',), 10)
row = self.table.row('foo')
self.assertEquals(row, {})
row = self.table.row('foo', None, 10)
self.assertEquals(row, {})
row = self.table.row('foo', None, 5)
self.assertEquals(row, {})
def test_with_timestamp_1(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("foo", {"f:a": "foo", 'f:ab': 'bar', 'f:abc': 'baz'}, 5)
self.table.put("foo", {"f:a": "FOO", 'f:ab': 'BAR', 'f:abc': 'BAZ'}, 10)
self.table.delete('foo', ('f',), 5)
row = self.table.row('foo')
self.assertEquals(row, {"f:a": "FOO", 'f:ab': 'BAR', 'f:abc': 'BAZ'})
row = self.table.row('foo', None, 10)
self.assertEquals(row, {"f:a": "FOO", 'f:ab': 'BAR', 'f:abc': 'BAZ'})
row = self.table.row('foo', None, 5)
self.assertEquals(row, {})
def test_type(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.assertRaises(TypeError, self.table.delete, 'foo', 'bar')
self.assertRaises(TypeError, self.table.delete, 'foo', {'set', 'should', 'fail'})
self.assertRaises(TypeError, self.table.delete, 'foo', {'dict': 'should', 'also': 'fail'})
def test_bad_column_family(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.assertRaises(ValueError, self.table.delete, 'foo', ('b',))
class TestCTableScanStartStop(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
for i in range(1, 10):
self.table.put("foo{i}".format(i=i), {"f:bar{i}".format(i=i): 'baz{i}'.format(i=i)})
for i in range(1, 10):
self.table.put("aaa{i}".format(i=i), {"f:aaa{i}".format(i=i): 'aaa{i}'.format(i=i)})
for i in range(1, 10):
self.table.put("zzz{i}".format(i=i), {"f:zzz{i}".format(i=i): 'zzz{i}'.format(i=i)})
def tearDown(self):
self.connection.delete_table(TABLE_NAME)
self.connection.close()
def test_happy(self):
i = 0
for row_key, obj in self.table.scan():
i += 1
if i <= 9:
self.assertEquals(row_key, "aaa{i}".format(i=i))
self.assertEquals(obj, {"f:aaa{i}".format(i=i): 'aaa{i}'.format(i=i)})
elif i <= 18:
self.assertEquals(row_key, "foo{i}".format(i= i - 9))
self.assertEquals(obj, {"f:bar{i}".format(i=i-9): 'baz{i}'.format(i=i-9)})
else:
self.assertEquals(row_key, "zzz{i}".format(i=i-18))
self.assertEquals(obj, {"f:zzz{i}".format(i=i-18): 'zzz{i}'.format(i=i-18)})
self.assertEquals(i, 27)
def test_happy_start(self):
i = 0
for row_key, obj in self.table.scan('zzz'):
i += 1
self.assertEquals(row_key, "zzz{i}".format(i=i))
self.assertEquals(obj, {"f:zzz{i}".format(i=i): 'zzz{i}'.format(i=i)})
self.assertEquals(i, 9)
def test_happy_stop(self):
i = 0
for row_key, obj in self.table.scan('', 'aaa9~'):
i += 1
self.assertEquals(row_key, "aaa{i}".format(i=i))
self.assertEquals(obj, {"f:aaa{i}".format(i=i): 'aaa{i}'.format(i=i)})
self.assertEquals(i, 9)
def test_happy_start_stop(self):
i = 0
for row_key, obj in self.table.scan('foo1', 'foo9~'):
i += 1
self.assertEquals(row_key, "foo{i}".format(i=i))
self.assertEquals(obj, {"f:bar{i}".format(i=i): 'baz{i}'.format(i=i)})
self.assertEquals(i, 9)
def test_no_rows(self):
i = 0
for row_key, obj in self.table.scan('fake', 'fake~'):
i += 1
self.assertEquals(i, 0)
class TestCTableScan(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
try:
self.connection.create_table(TABLE_NAME, {'f': {}})
except ValueError:
pass
self.table = _table(self.connection, TABLE_NAME)
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME)
except ValueError:
pass
self.connection.close()
def test_timestamp_type(self):
for i in range(0, 10):
self.table.put('foo%i' % i, {'f:foo': 'foo'}, 5)
for row, data in self.table.scan('', '', None, None, None):
self.assertEquals(data, {'f:foo': 'foo'})
for row, data in self.table.scan('', '', None, None, 5):
self.assertEquals(data, {'f:foo': 'foo'})
# 0 shouldn't throw an error
for row, data in self.table.scan('', '', None, None, 0):
pass
# -1 should throw an error
self.assertRaises(ValueError, self.table.scan, timestamp=-1)
self.assertRaises(TypeError, self.table.scan, '', '', None, None, 'invalid')
def test_include_timestamp_type(self):
for i in range(0, 10):
self.table.put('foo%i' % i, {'f:foo': 'foo'}, 5)
for row, data in self.table.scan('', '', None, None, 5, None):
self.assertEquals(data, {'f:foo': 'foo'})
for row, data in self.table.scan('', '', None, None, 5, False):
self.assertEquals(data, {'f:foo': 'foo'})
for row, data in self.table.scan('', '', None, None, 5, True):
self.assertEquals(data, {'f:foo': ('foo', 5)})
self.assertRaises(TypeError, self.table.scan, '', '', None, None, 5, 'invalid')
def test_batch_size_type(self):
for i in range(0, 10):
self.table.put('foo%i' % i, {'f:foo': 'foo'}, 5)
for row, data in self.table.scan(batch_size=None):
self.assertEquals(data, {'f:foo': 'foo'})
for row, data in self.table.scan(batch_size=10):
self.assertEquals(data, {'f:foo': 'foo'})
# 0 and -1 should throw an error
self.assertRaises(ValueError, self.table.scan, batch_size=0)
self.assertRaises(ValueError, self.table.scan, batch_size=-1)
self.assertRaises(TypeError, self.table.scan, batch_size='invalid')
def test_filter_type(self):
for row, data in self.table.scan(filter='FirstKeyOnlyFilter()'):
pass
for row, data in self.table.scan(filter=None):
pass
self.assertRaises(ValueError, self.table.scan, filter="")
self.assertRaises(ValueError, self.table.scan, filter='invalid filter syntax')
self.assertRaises(TypeError, self.table.scan, filter={'foo': 'bar'})
def test_limit_type(self):
for row, data in self.table.scan(limit=None):
pass
for row, data in self.table.scan(limit=1):
pass
self.assertRaises(ValueError, self.table.scan, limit=0)
self.assertRaises(TypeError, self.table.scan, limit='invalid')
def test_limit(self):
for i in range(10):
self.table.put("foo%i" % i, {"f:foo": "bar"})
i = 0
for row, data in self.table.scan(limit=5):
self.assertEquals(row, 'foo%i' % i)
i += 1
self.assertEquals(i, 5)
def test_limit_with_batch_size(self):
for i in range(10):
self.table.put("foo%i" % i, {"f:foo": "bar"})
i = 0
for row, data in self.table.scan(limit=5, batch_size=4):
self.assertEquals(row, 'foo%i' % i)
i += 1
self.assertEquals(i, 5)
i = 0
for row, data in self.table.scan(limit=5, batch_size=5):
self.assertEquals(row, 'foo%i' % i)
i += 1
self.assertEquals(i, 5)
i = 0
for row, data in self.table.scan(limit=5, batch_size=6):
self.assertEquals(row, 'foo%i' % i)
i += 1
self.assertEquals(i, 5)
def test_only_rowkeys(self):
for i in range(10):
self.table.put("foo%i" % i, {"f:foo": "bar"})
i = 0
for row in self.table.scan(only_rowkeys=True):
self.assertEquals(row, 'foo%i' % i)
i += 1
self.assertEquals(i, 10)
def test_only_rowkey_type(self):
self.assertRaises(TypeError, self.table.scan, only_rowkeys='invalid')
self.assertRaises(TypeError, self.table.scan, only_rowkeys=1)
class TestCTableScanFilter(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
try:
self.connection.create_table(TABLE_NAME, {'f': {}})
except ValueError:
pass
self.table = _table(self.connection, TABLE_NAME)
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME)
except ValueError:
pass
self.connection.close()
def test_key_only_filter(self):
self.table.put("foo", {"f:foo": "bar"})
i = 0
for key, data in self.table.scan(filter='KeyOnlyFilter()'):
i += 1
self.assertEquals(key, 'foo')
self.assertEquals(data, {"f:foo": ""})
self.assertEquals(i, 1)
def test_first_key_only_filter(self):
self.table.put("foo", {"f:foo": "bar", "f:baz": "foobar"})
i = 0
for key, data in self.table.scan(filter='FirstKeyOnlyFilter()'):
i += 1
self.assertEquals(key, 'foo')
self.assertEquals(len(data), 1)
self.assertEquals(i, 1)
def test_prefix_filter(self):
self.table.put("foo", {"f:foo": "foo"})
self.table.put("bar1", {"f:bar1": "bar1"})
self.table.put("bar2", {"f:bar2": "bar2"})
self.table.put("bar3", {"f:bar3": "bar3"})
self.table.put("baz", {"f:baz": "baz"})
i = 1
for key, data in self.table.scan(filter="PrefixFilter('bar')"):
self.assertEquals(key, 'bar%i' % i)
self.assertEquals(data, {"f:bar%i" % i: "bar%i" % i})
i += 1
self.assertEquals(i, 4)
def test_column_prefix_filter(self):
self.table.put("foo", {"f:foo": "foo", "f:bar1": "bar1", "f:bar2": "bar2", "f:bar3": "bar3", "f:baz": "baz"})
i = 0
for key, data in self.table.scan(filter="ColumnPrefixFilter('bar')"):
i += 1
self.assertEquals(data, {"f:bar1": "bar1", "f:bar2": "bar2", "f:bar3": "bar3"})
self.assertEquals(i, 1)
def test_multiple_column_prefix_filter(self):
self.table.put("foo", {"f:foo1": "foo1", "f:foo2": "foo2", "f:foo3": "foo3", "f:bar1": "bar1", "f:bar2": "bar2", "f:bar3": "bar3", "f:baz": "baz"})
i = 0
for key, data in self.table.scan(filter="MultipleColumnPrefixFilter('foo', 'bar')"):
i += 1
self.assertEquals(data, {"f:foo1": "foo1", "f:foo2": "foo2", "f:foo3": "foo3", "f:bar1": "bar1", "f:bar2": "bar2", "f:bar3": "bar3"})
self.assertEquals(i, 1)
def test_column_count_filter(self):
self.table.put("foo", {"f:foo": "foo", "f:bar1": "bar1", "f:bar2": "bar2", "f:bar3": "bar3", "f:baz": "baz"})
i = 0
for key, data in self.table.scan(filter="ColumnCountGetFilter(3)"):
self.assertEquals(len(data), 3)
i += 1
self.assertEquals(i, 1)
def test_page_filter(self):
for i in range(10):
self.table.put("foo%i" % i, {"f:foo": "bar"})
i = 0
for key, data in self.table.scan(filter='PageFilter(4)'):
self.assertEquals(key, 'foo%i' % i)
i += 1
self.assertEquals(i, 4)
def test_column_pagination_filter(self):
self.table.put("foo", {"f:foo": "foo", "f:bar1": "bar1", "f:bar2": "bar2", "f:bar3": "bar3", "f:baz": "baz"})
# Column Pagination requires both (limit, offset)
self.assertRaises(ValueError, self.table.scan, filter='ColumnPaginationFilter(4)')
for key, data in self.table.scan(filter='ColumnPaginationFilter(4, 0)'):
self.assertEquals(len(data), 4)
for key, data in self.table.scan(filter='ColumnPaginationFilter(4, 4)'):
self.assertEquals(len(data), 1)
def test_inclusive_stop_filter(self):
self.table.put("bar1", {"f:bar1": "bar1"})
self.table.put("bar2", {"f:bar2": "bar2"})
self.table.put("bar3", {"f:bar3": "bar3"})
i = 1
for key, data in self.table.scan(filter="InclusiveStopFilter('bar3')"):
self.assertEquals(key, 'bar%i' % i)
i += 1
self.assertEquals(i, 4)
self.assertEquals(key, 'bar3')
def test_timestamp_filter(self):
self.table.put('foo', {'f:foo': 'foo'}, 10)
self.table.put('bar', {'f:foo': 'foo'})
i = 0
# TODO why doesn't this work?
for key, data in self.table.scan(filter="TimestampsFilter (10)"):
self.assertEquals(key, 'foo')
i += 1
self.assertEquals(i, 1)
def test_row_filter(self):
self.table.put("bar1", {"f:bar1": "bar1"})
self.table.put("bar2", {"f:bar2": "bar2"})
self.table.put("bar3", {"f:bar3": "bar3"})
i = 1
keys = []
for key, data in self.table.scan(filter="RowFilter(<= 'binary:bar2')"):
keys.append(key)
self.assertEquals(key, 'bar%i' % i)
i += 1
self.assertEquals(i, 3)
def test_family_filter(self):
self.connection.delete_table(TABLE_NAME)
self.connection.create_table(TABLE_NAME, {'f': {}, 'z': {}})
self.table = _table(self.connection, TABLE_NAME)
self.table.put("foo", {'f:foo': 'foo'})
self.table.put("bar", {'z:bar': 'bar', 'f:foo': 'baz'})
i = 0
# Requires binary:
for key, data in self.table.scan(filter="FamilyFilter(=, 'binary:z')"):
self.assertEquals(key, 'bar')
self.assertEquals(data, {'z:bar': 'bar'})
i += 1
self.assertEquals(i, 1)
def test_qualifier_filter(self):
self.table.put("foo", {'f:foo': 'foo'})
self.table.put("bar", {'f:bar': 'foo', 'f:baz': 'lol'})
i = 0
for key, data in self.table.scan(filter="QualifierFilter(=, 'binary:bar')"):
self.assertEquals(key, 'bar')
self.assertEquals(data, {'f:bar': 'foo'})
i += 1
self.assertEquals(i, 1)
def test_value_filter(self):
self.table.put("foo", {'f:foo': 'foo', 'f:bar': 'foo', 'f:baz': 'lol'})
self.table.put("bar", {'f:foo': 'lol', 'f:bar': 'lol', 'f:baz': 'lol'})
i = 0
for row, data in self.table.scan(filter="ValueFilter(=, 'binary:foo')"):
self.assertEquals(row, 'foo')
self.assertEquals(data, {'f:foo': 'foo', 'f:bar': 'foo'})
i += 1
self.assertEquals(i, 1)
def test_dependent_column_filter(self):
self.table.put("foo", {'f:a': 'a', 'f:b': 'b', 'f:c': 'c', 'f:d': 'd'})
self.table.put("bar", {'f:z': 'a', 'f:zz': 'b', 'f:zzz': 'c', 'f:zzzz': 'd'})
i = 0
for row, data in self.table.scan(filter="DependentColumnFilter('f', 'a')"):
self.assertEquals(row, 'foo')
self.assertEquals(data, {'f:a': 'a', 'f:b': 'b', 'f:c': 'c', 'f:d': 'd'})
i += 1
self.assertEquals(i, 1)
def test_single_column_value_filter(self):
self.table.put("foo", {'f:foo': 'foo', 'f:bar': 'bar', 'f:baz': 'baz'})
self.table.put("bar", {'f:foo': 'bar', 'f:bar': 'bar', 'f:baz': 'bar'})
# MapR not showing the expected behavior
i = 0
for row, data in self.table.scan(filter="SingleColumnValueFilter('f', 'foo', =, 'binary:foo')"):
self.assertEquals(row, 'foo')
self.assertEquals(data, {'f:foo': 'foo', 'f:bar': 'bar', 'f:baz': 'baz'})
i += 1
self.assertEquals(i, 1)
def test_single_column_value_exclude_filter(self):
self.table.put("foo", {'f:foo': 'foo', 'f:bar': 'bar', 'f:baz': 'baz'})
self.table.put("bar", {'f:foo': 'bar', 'f:bar': 'bar', 'f:baz': 'bar'})
i = 0
# MapR not showing the expected behavior
for row, data in self.table.scan(filter="SingleColumnValueExcludeFilter('f', 'foo', =, 'binary:foo')"):
self.assertEquals(row, 'foo')
self.assertEquals(data, {'f:bar': 'bar', 'f:baz': 'baz'})
i += 1
self.assertEquals(i, 1)
def test_column_range_filter(self):
self.table.put("foo", {'f:a': 'a', 'f:b': 'b', 'f:c': 'c', 'f:d': 'd'})
self.table.put("bar", {'f:z': 'a', 'f:zz': 'b', 'f:zzz': 'c', 'f:zzzz': 'd'})
i = 0
# MapR doesn't appear to honor the max bool? Also min and max bools seem backwards.
for row, data in self.table.scan(filter="ColumnRangeFilter('a', true, 'd', true)"):
self.assertEquals(row, 'foo')
self.assertEquals(data, {'f:a': 'a', 'f:b': 'b', 'f:c': 'c'})
i += 1
self.assertEquals(i, 1)
class TestCTableScanColumns(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME)
except ValueError:
pass
self.connection.close()
def test_scan_columns_happy(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
for i in range(0, 10):
self.table.put('foo%i' % i, {'f:a': 'foo%i' % i, 'f:ab': 'bar%i' % i, 'f:abc': 'baz%i' % i})
i = 0
for row, data in self.table.scan('', '', ('f',)):
self.assertEquals(data, {'f:a': 'foo%i' % i, 'f:ab': 'bar%i' % i, 'f:abc': 'baz%i' % i})
i += 1
self.assertEquals(i, 10)
i = 0
for row, data in self.table.scan('', '', ('f:',)):
self.assertEquals(data, {'f:a': 'foo%i' % i, 'f:ab': 'bar%i' % i, 'f:abc': 'baz%i' % i})
i += 1
self.assertEquals(i, 10)
i = 0
for row, data in self.table.scan('', '', ('f:a',)):
self.assertEquals(data, {'f:a': 'foo%i' % i})
i += 1
self.assertEquals(i, 10)
i = 0
for row, data in self.table.scan('', '', ('f:a', 'f:ab')):
self.assertEquals(data, {'f:a': 'foo%i' % i, 'f:ab': 'bar%i' % i})
i += 1
self.assertEquals(i, 10)
i = 0
for row, data in self.table.scan('', '', ('f:a', 'f:ab', 'f:abc',)):
self.assertEquals(data, {'f:a': 'foo%i' % i, 'f:ab': 'bar%i' % i, 'f:abc': 'baz%i' % i})
i += 1
self.assertEquals(i, 10)
i = 0
for row, data in self.table.scan('', '', ('f:nope',)):
self.assertEquals(data, {})
i += 1
self.assertEquals(i, 0)
i = 0
for row, data in self.table.scan('', '', ('f:a', 'f:nope')):
self.assertEquals(data, {'f:a': 'foo%i' % i})
i += 1
self.assertEquals(i, 10)
def test_columns_type(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.assertRaises(TypeError, self.table.scan, '', '', 'invalid')
self.assertRaises(TypeError, self.table.scan, '', '', {'no', 'sets'})
self.assertRaises(TypeError, self.table.scan, '', '', {'no': 'dicts'})
def test_bad_column_family(self):
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
self.assertRaises(ValueError, self.table.scan, '', '', ('bad',))
self.assertRaises(ValueError, self.table.scan, '', '', ('bad:',))
self.assertRaises(ValueError, self.table.scan, '', '', ('bad:bad',))
self.assertRaises(ValueError, self.table.scan, '', '', ('f:good', 'bad:bad'))
# TODO Need to add more columns to tests
class TestCTableBatch(unittest.TestCase):
def setUp(self):
self.connection = _connection(ZOOKEEPERS)
self.connection.create_table(TABLE_NAME, {'f': {}})
self.table = _table(self.connection, TABLE_NAME)
def tearDown(self):
self.connection.delete_table(TABLE_NAME)
self.connection.close()
def test_happy(self):
self.table.batch([('put', 'foo{}'.format(i), {"f:bar{i}".format(i=i): 'baz{i}'.format(i=i)}) for i in range(1, 1001)])
rows = sorted(self.table.scan(), key=lambda x: int(x[0][3:]))
i = 1
for row_key, obj in rows:
self.assertEquals(row_key, "foo{i}".format(i=i))
self.assertEquals(obj, {"f:bar{i}".format(i=i): 'baz{i}'.format(i=i)})
i += 1
self.assertEquals(i, 1001)
self.table.batch([('delete', 'foo{}'.format(i)) for i in range(1, 1001)])
i = 0
for row_key, obj in self.table.scan():
i += 1
self.assertEquals(i, 0)
def test_happy_big_column(self):
self.table.batch([('put', 'foo%i' % i, {"f:bar%o" % o: "baz%o" % o for o in range(100)}) for i in range(1, 1001)])
def test_mixed_errors_put(self):
actions = [
('put', 'a', {'f:foo': 'bar'}),
('put', 'b', {'f': 'bar'}),
('put', 'c', {'f:': 'bar'}), # This is legal - not have the new put
('put', 'd', {':foo': 'bar'}),
('put', 'e', {'invalid:foo': 'bar'}),
('put', 'f', 'invalid data type'),
('put', 'g', {'f:foo', 'bar'}),
(1, 'h', {'f:foo': 'bar'}),
('put', 2, {'f:foo': 'bar'}),
('put', 'j', 3),
'not a tuple',
('invalid', 'k', {'f:foo': 'bar'}),
('put', 'z', {'f:foo': 'bar'}),
]
errors, results = self.table.batch(actions)
self.assertEquals(errors, len(actions) - 3)
# TODO scan for the good rows
i = 0
for row_key, obj in self.table.scan():
i += 1
self.assertEquals(i, 3)
def test_mixed_errors_delete(self):
actions = [
('delete', 'a'),
('delete', 1),
('delete', {"foo":"bar"}),
(1, 'a'),
('invalid', 'b'),
'not a tuple'
]
errors, results = self.table.batch(actions)
self.assertEquals(errors, len(actions) - 1)
def test_empty_actions(self):
errors, results = self.table.batch([])
self.assertEquals(errors, 0)
def test_put_timestamp_type(self):
errors, results = self.table.batch([('put', 'a', {'f:foo': 'bar'}, None)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('put', 'a', {'f:foo': 'bar'}, 5)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('put', 'a', {'f:foo': 'bar'}, 'invalid')])
self.assertEquals(errors, 1)
def test_delete_timestamp_type(self):
errors, results = self.table.batch([('delete', 'a', None, None)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('delete', 'a', None, 5)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('delete', 'a', None, 'invalid')])
self.assertEquals(errors, 1)
def test_put_is_wal_type(self):
errors, results = self.table.batch([('put', 'a', {'f:foo': 'bar'}, None, None)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('put', 'a', {'f:foo': 'bar'}, None, True)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('put', 'a', {'f:foo': 'bar'}, None, False)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('put', 'a', {'f:foo': 'bar'}, None, 'invalid')])
self.assertEquals(errors, 1)
def test_delete_is_wal_type(self):
errors, results = self.table.batch([('delete', 'a', None, None, None)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('delete', 'a', None, None, True)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('delete', 'a', None, None, False)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('delete', 'a', None, None, 'invalid')])
self.assertEquals(errors, 1)
def test_delete_columns_type(self):
errors, results = self.table.batch([('delete', 'a', None)])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('delete', 'a', ('f',))])
self.assertEquals(errors, 0)
errors, results = self.table.batch([('delete', 'a', 'invalid')])
self.assertEquals(errors, 1)
errors, results = self.table.batch([('delete', 'a', {'no', 'sets'})])
self.assertEquals(errors, 1)
errors, results = self.table.batch([('delete', 'a', {'no': 'dicts'})])
self.assertEquals(errors, 1)
class TestPython(unittest.TestCase):
def setUp(self):
# TODO Configure this for non-mapr users
self.connection = Connection()
try:
self.connection.create_table(TABLE_NAME, {'f': {}})
except ValueError:
pass
self.table = self.connection.table(TABLE_NAME)
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME, disable=True)
except ValueError:
pass
self.connection.close()
def test_rows(self):
for i in range(5):
self.table.put("foo%i" % i, {'f:foo': 'bar%i' % i})
rows = self.table.rows(['foo%i' % i for i in range(1, 4)])
self.assertEquals(rows, [{'f:foo': 'bar%i' % i} for i in range(1, 4)])
def test_delete_table_no_disable(self):
self.assertRaises(ValueError, self.connection.delete_table, TABLE_NAME)
self.connection.disable_table(TABLE_NAME)
self.connection.delete_table(TABLE_NAME)
def test_delete_table_yes_disable(self):
self.connection.delete_table(TABLE_NAME, disable=True)
class TestPythonHappy(unittest.TestCase):
def setUp(self):
connection = Connection()
try:
connection.delete_table(TABLE_NAME, disable=True)
except ValueError:
pass
connection.close()
def tearDown(self):
connection = Connection()
try:
connection.delete_table(TABLE_NAME, disable=True)
except ValueError:
pass
connection.close()
def test_extract_zookeeper(self):
mapr_clusters_conf = StringIO("hadoopDev secure=true myhost:7222 myhost2:7222 myhost3:7222\n")
cldbs = Connection._extract_mapr_cldbs(mapr_clusters_conf)
self.assertEquals(cldbs, 'myhost:7222,myhost2:7222,myhost3:7222')
mapr_clusters_conf = StringIO("Mapr5.1 hadoopDev secure=true myhost:7222 myhost2:7222 myhost3:7222\n")
cldbs = Connection._extract_mapr_cldbs(mapr_clusters_conf)
self.assertEquals(cldbs, 'myhost:7222,myhost2:7222,myhost3:7222')
def test_happy(self):
connection = Connection()
connection.create_table(TABLE_NAME, {'f': {}})
table = connection.table(TABLE_NAME)
for i in range(0, 10):
table.put("a{}".format(i), {"f:foo{}".format(i): "bar{}".format(i)})
self.assertEquals(table.row("a0"), {"f:foo0": "bar0"})
self.assertEquals(table.row("a4"), {"f:foo4": "bar4"})
self.assertEquals(table.row("a9"), {"f:foo9": "bar9"})
i = 0
for row_key, obj in table.scan():
self.assertEquals(row_key, 'a{}'.format(i))
self.assertEquals(obj, {"f:foo{}".format(i): "bar{}".format(i)})
i += 1
self.assertEquals(i, 10)
i = 1
for row_key, obj in table.scan(start="a1"):
self.assertEquals(row_key, 'a{}'.format(i))
self.assertEquals(obj, {"f:foo{}".format(i): "bar{}".format(i)})
i += 1
self.assertEquals(i, 10)
i = 0
for row_key, obj in table.scan(stop="a8~"):
self.assertEquals(row_key, 'a{}'.format(i))
self.assertEquals(obj, {"f:foo{}".format(i): "bar{}".format(i)})
i += 1
self.assertEquals(i, 9)
i = 1
for row_key, obj in table.scan(start="a1", stop="a8~"):
self.assertEquals(row_key, 'a{}'.format(i))
self.assertEquals(obj, {"f:foo{}".format(i): "bar{}".format(i)})
i += 1
self.assertEquals(i, 9)
table.delete("a0")
table.delete("a9")
self.assertEquals(table.row("a0"), {})
self.assertEquals(table.row("a9"), {})
batch = table.batch()
for i in range(1000):
batch.put("test{}".format(i), {"f:foo{}".format(i): "bar{}".format(i)})
i = 0
for row_key, obj in table.scan(start='test', stop='test~'):
i += 1
self.assertEquals(i, 0)
errors = batch.send()
self.assertEquals(errors, 0)
self.assertEquals(len(batch._actions), 0)
i = 0
for row_key, obj in table.scan(start='test', stop='test~'):
i += 1
self.assertEquals(i, 1000)
table.delete_prefix("test")
i = 0
for row_key, obj in table.scan(start='test', stop='test~'):
i += 1
self.assertEquals(i, 0)
class TestPythonTableCount(unittest.TestCase):
def setUp(self):
# TODO Configure this for non-mapr users
self.connection = Connection()
try:
self.connection.create_table(TABLE_NAME, {'f': {}})
except ValueError:
pass
self.table = self.connection.table(TABLE_NAME)
batch = self.table.batch()
for i in range(10):
batch.put("a%i" % i, {"f:foo": "f:foo%i" % i})
for i in range(10):
batch.put("b%i" % i, {"f:bar": "f:bar%i" % i})
for i in range(10):
batch.put("c%i" % i, {"f:baz": "f:baz%i" % i})
batch.send()
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME, disable=True)
except ValueError:
pass
self.connection.close()
def test_full(self):
count = self.table.count()
self.assertEquals(count, 30)
def test_prefix(self):
count = self.table.count(row_prefix='b')
self.assertEquals(count, 10)
def test_row_regex_filter(self):
count = self.table.count(filter="RowFilter(=, 'regexstring:^b')")
self.assertEquals(count, 10)
class TestPythonRowPrefix(unittest.TestCase):
def setUp(self):
# TODO Configure this for non-mapr users
self.connection = Connection()
try:
self.connection.create_table(TABLE_NAME, {'f': {}})
except ValueError:
pass
self.table = self.connection.table(TABLE_NAME)
batch = self.table.batch()
batch.put('a', {'f:foo': 'foo'})
for i in range(10):
batch.put('b%i' % i, {'f:foo': 'bar'})
batch.put('c', {'f:foo': 'baz'})
batch.send()
def tearDown(self):
try:
self.connection.delete_table(TABLE_NAME, disable=True)
except ValueError:
pass
self.connection.close()
def test_row_prefix_happy(self):
i = 0
for row, data in self.table.scan(row_prefix='b'):
self.assertEquals(row, 'b%i' % i)
self.assertEquals(data, {'f:foo': 'bar'})
i += 1
self.assertEquals(i, 10)
def test_start_happy(self):
i = 0
for row, data in self.table.scan(start='b'):
i += 1
self.assertEquals(row, 'c')
self.assertEquals(data, {'f:foo': 'baz'})
self.assertEquals(i, 11)
def test_stop_happy(self):
i = 0
for row, data in self.table.scan(stop='b9~'):
i += 1
self.assertEquals(row, 'b9')
self.assertEquals(data, {'f:foo': 'bar'})
self.assertEquals(i, 11)
def test_start_stop_happy(self):
i = 2
for row, data in self.table.scan(start='b2', stop='b5~'):
self.assertEquals(row, 'b%i' % i)
self.assertEquals(data, {'f:foo': 'bar'})
i += 1
self.assertEquals(i, 6)
def test_mix(self):
self.assertRaises(TypeError, self.table.scan(start='foo', stop='bar', row_prefix='foobar'))
self.assertRaises(TypeError, self.table.scan(start='foo', row_prefix='foobar'))
self.assertRaises(TypeError, self.table.scan(stop='bar', row_prefix='foobar'))
if __name__ == '__main__':
s = datetime.now()
unittest.main()
e = datetime.now()
print("Tests took %s" % (e - s))
|
{
"content_hash": "fc9e7d900aa188dc383b8b9b41c5bca1",
"timestamp": "",
"source": "github",
"line_count": 1835,
"max_line_length": 155,
"avg_line_length": 35.8708446866485,
"alnum_prop": 0.5455236011728423,
"repo_name": "mkmoisen/pychbase",
"id": "a98859da1ac8d2bb30d7030755a9f37cc154be48",
"size": "65823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1570"
},
{
"name": "C++",
"bytes": "157545"
},
{
"name": "Python",
"bytes": "82353"
},
{
"name": "Shell",
"bytes": "523"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem357.py
#
# Prime generating integers
# =========================
# Published on Saturday, 5th November 2011, 04:00 pm
#
# Consider the divisors of 30: 1,2,3,5,6,10,15,30. It can be seen that for
# every divisor d of 30, d+30/d is prime. Find the sum of all positive
# integers n not exceeding 100 000 000such that for every divisor d of n,
# d+n/d is prime.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
{
"content_hash": "f6441647510a6d344decbd82ac1d97fd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 26.842105263157894,
"alnum_prop": 0.6333333333333333,
"repo_name": "olduvaihand/ProjectEuler",
"id": "c78c37bb1957fcdeb6b39feec60594a6339fe96c",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/problem357.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "0"
},
{
"name": "Python",
"bytes": "422751"
}
],
"symlink_target": ""
}
|
from users.alltests.users_models_tests import *
from users.alltests.users_forms_tests import *
from users.alltests.users_views_tests import *
|
{
"content_hash": "b6d72c6fb148acebc7e693e7ac789182",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 47,
"avg_line_length": 47,
"alnum_prop": 0.8156028368794326,
"repo_name": "none-da/zeshare",
"id": "bb411db073861aad8dabec19aa3581bf8c270ced",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "48335"
},
{
"name": "Python",
"bytes": "347229"
},
{
"name": "Shell",
"bytes": "321"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
import unittest
from flypy import jit, typeof
from flypy.runtime.obj import exceptions
class TestExceptionObjs(unittest.TestCase):
def test_typeof(self):
self.assertEqual(typeof(StopIteration()), exceptions.StopIteration.type)
if __name__ == '__main__':
#TestExceptionObjs('test_typeof').debug()
unittest.main()
|
{
"content_hash": "35ebf256b5b4bfd5f8809f06f0ce0241",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 26.733333333333334,
"alnum_prop": 0.7331670822942643,
"repo_name": "flypy/flypy",
"id": "03228101a9e6cc5ad57bd9e9d1294530dcd6e902",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flypy/runtime/obj/tests/test_exceptions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "540626"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
}
|
import json
import sys
import csv
row=[]
csvPath = sys.argv[1] #Input Path to csv file
with open(csvPath,"r") as f:
lines = csv.reader(f.read().splitlines(), delimiter=' ')
for line in lines:
row.append(line)
data={}
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
data[column[2]]=[]
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
second={}
second["name"]=column[1]+" "+column[2]
second["size"]=column[2]
data[column[2]].append(second)
clusterList = []
i=0
for elem in data.keys():
first={}
first["name"]="cluster "+str(i)
first["children"]=data[elem]
clusterList.append(first)
i+=1
print json.dumps(clusterList, sort_keys=True, indent=4, separators=(',', ': '))
clusterStruct = {"name":"clusters", "children":clusterList}
with open("circle.json", "w") as f: #Pass the json file as input to circle-packing.html
f.write(json.dumps(clusterStruct, sort_keys=True, indent=4, separators=(',', ': ')))
|
{
"content_hash": "e275a273be6e756082b6d977d015a5d0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 99,
"avg_line_length": 26.886363636363637,
"alnum_prop": 0.5841081994928149,
"repo_name": "RashmiNalwad/tika-similarity",
"id": "8e7978bde4ab24fedac95570d6f82dba058ccec0",
"size": "1996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edit-cosine-circle-packing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "41895"
},
{
"name": "Python",
"bytes": "24449"
}
],
"symlink_target": ""
}
|
import pickle
import numpy as np
# import pycuda.autoinit
from chainer import cuda, Function, FunctionSet, gradient_check, Variable, optimizers
import chainer.functions as F
from SuperClass import SuperClass
from dA import DenoisingAutoencoder
class StackedDenoisingAutoencoder(SuperClass):
def __init__(self, n_in, n_hiddens, n_epoch=20, batchsize=100, use_cuda=False):
super().__init__(n_epoch, batchsize, use_cuda)
self.SdA = []
self.n_nodes = (n_in,) + n_hiddens
self.num_layer = len(n_hiddens)
for i in range(self.num_layer):
dA = DenoisingAutoencoder(self.n_nodes[i], self.n_nodes[i+1], n_epoch, batchsize, use_cuda=use_cuda)
self.SdA.append(dA)
# self.registModel()
def predict(self, x_data, bAllLayer=False):
x_data = self.procInput(x_data)
x_eachlayer = []
for i in range(self.num_layer):
x_data = self.SdA[i].predict(x_data)
x_eachlayer.append(self.procOutput(x_data))
p = x_data
if bAllLayer:
return x_eachlayer
else:
return p
# def cost(self, x_data):
# return F.mean_squared_error(y, t)
def train(self, x_data):
for i_layer in range(self.num_layer):
self.SdA[i_layer].train(x_data)
x_data = self.SdA[i_layer].predict(x_data)
def save(self, filedir, n_hiddens, n_epoch, batchsize):
name = "SdA_"+ "layer"+str(n_hiddens) + "_epoch"+str(n_epoch)
params = []
for i in range(len(self.SdA)):
dic = {}
dic['W'] = self.SdA[i].model.encode.parameters[0]
dic['b'] = self.SdA[i].model.encode.parameters[1]
params.append(dic)
pickle.dump(params, open(filedir+'/'+name+'.pkl', 'wb'), pickle.HIGHEST_PROTOCOL)
return
def load(self, filename):
if filename.find('.pkl')==-1:
filename = filename + '.pkl'
params = pickle.load(open(filename, 'rb'))
for i in range(len(self.SdA)):
dic = params[i]
self.SdA[i].model.encode.parameters = (dic['W'], dic['b'])
return
|
{
"content_hash": "a96348eeac476b7d2e453e2f555cad37",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 112,
"avg_line_length": 32.940298507462686,
"alnum_prop": 0.5690983235160851,
"repo_name": "TakuTsuzuki/Hackathon2015",
"id": "972dba7caf09a4bc3c7d1c9ac1281790e8b4adef",
"size": "2207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Hiroshiba/NeuralNetwork/SdA.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36349"
},
{
"name": "HTML",
"bytes": "59"
},
{
"name": "JavaScript",
"bytes": "30754"
},
{
"name": "Jupyter Notebook",
"bytes": "39079"
},
{
"name": "PHP",
"bytes": "17783"
},
{
"name": "Python",
"bytes": "536916"
},
{
"name": "Shell",
"bytes": "220"
}
],
"symlink_target": ""
}
|
"""Look ahead mechanism in regular expressions.
use (?=)
"""
import re
pattern = re.compile(r'(?=fox)')
result = pattern.search("The quick brown fox")
print result
"""
Match any word followed by a comma.
The example below is not the same as re.compile(r"\w+,")
For this will result in ['me,', 'myself,']
"""
patt = re.compile(r"\w+(?=,)")
res = patt.findall("Me, myself, and I")
print res
"""Use alternation (|) to match
any word followed by comma or a dot.
"""
p = re.compile(r'\w+(?=,|\.)')
results = p.findall("Here they are: Tom, Dick, and Harry.")
print results
|
{
"content_hash": "754ffe47cc12504ce9bd57165928e752",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 59,
"avg_line_length": 22.84,
"alnum_prop": 0.6532399299474606,
"repo_name": "andela-ggikera/regex",
"id": "023bd79e4e592c3e1e3751639efeee018d762a78",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "look_ahead.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5990"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.test import TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.es.tests.utils import es_test, populate_user_index
from corehq.apps.users.bulk_download import parse_web_users
from corehq.apps.users.models import Invitation, UserRole, WebUser
from corehq.pillows.mappings.user_mapping import USER_INDEX
from corehq.util.elastic import ensure_index_deleted
@es_test
class TestDownloadWebUsers(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.domain = 'old_shelf'
cls.domain_obj = create_domain(cls.domain)
cls.role = UserRole.create(domain=cls.domain, name='App Editor')
cls.qualified_role_id = cls.role.get_qualified_id()
cls.user1 = WebUser.create(
cls.domain_obj.name,
'edith@wharton.com',
'badpassword',
None,
None,
email='edith@wharton.com',
first_name='Edith',
last_name='Wharton',
role_id=cls.role.get_id,
)
cls.user2 = WebUser.create(
cls.domain_obj.name,
'george@eliot.com',
'anotherbadpassword',
None,
None,
email='george@eliot.com',
first_name='George',
last_name='Eliot',
is_admin=True,
)
cls.invited_user = Invitation.objects.create(
email='invited_to_domain@user.com',
domain=cls.domain_obj.name,
invited_by='tester@test.com',
invited_on=datetime.utcnow(),
role=cls.qualified_role_id,
)
cls.other_domain = 'new_shelf'
cls.other_domain_obj = create_domain(cls.other_domain)
cls.other_role = UserRole.create(domain=cls.domain, name='User Admin')
cls.user10 = WebUser.create(
cls.other_domain_obj.name,
'susan@choi.com',
'secret',
None,
None,
email='susan@choi.com',
first_name='Susan',
last_name='Choi',
role_id=cls.other_role.get_id,
)
cls.user11 = WebUser.create(
cls.other_domain_obj.name,
'zadie@smith.com',
'secret',
None,
None,
email='zadie@smith.com',
first_name='Zadie',
last_name='Smith',
role_id=cls.other_role.get_id,
)
cls.other_invited_user = Invitation.objects.create(
email='invited_to_other_domain@user.com',
domain=cls.other_domain_obj.name,
invited_by='tester@test.com',
invited_on=datetime.utcnow(),
role=cls.other_role.get_qualified_id()
)
populate_user_index([cls.user1, cls.user2, cls.user10, cls.user11])
@classmethod
def tearDownClass(cls):
ensure_index_deleted(USER_INDEX)
cls.user1.delete(cls.domain_obj.name, deleted_by=None)
cls.user2.delete(cls.domain_obj.name, deleted_by=None)
cls.user10.delete(cls.other_domain_obj.name, deleted_by=None)
cls.user11.delete(cls.other_domain_obj.name, deleted_by=None)
cls.invited_user.delete()
cls.other_invited_user.delete()
cls.domain_obj.delete()
cls.other_domain_obj.delete()
cls.role.delete()
cls.other_role.delete()
super().tearDownClass()
def test_download(self):
(headers, rows) = parse_web_users(self.domain_obj.name, {})
rows = list(rows)
self.assertEqual(3, len(rows))
spec = dict(zip(headers, rows[0]))
self.assertEqual('Edith', spec['first_name'])
self.assertEqual('Wharton', spec['last_name'])
self.assertEqual('edith@wharton.com', spec['username'])
self.assertEqual('edith@wharton.com', spec['email'])
self.assertEqual('App Editor', spec['role'])
self.assertEqual('Active User', spec['status'])
spec = dict(zip(headers, rows[1]))
self.assertEqual('Admin', spec['role'])
spec = dict(zip(headers, rows[2]))
self.assertEqual('invited_to_domain@user.com', spec['username'])
self.assertEqual('invited_to_domain@user.com', spec['email'])
self.assertEqual('App Editor', spec['role'])
self.assertEqual('Invited', spec['status'])
def test_search_string(self):
(headers, rows) = parse_web_users(self.domain_obj.name, {"search_string": "Edith"})
rows = list(rows)
self.assertEqual(1, len(rows))
spec = dict(zip(headers, rows[0]))
self.assertEqual('Edith', spec['first_name'])
def test_multi_domain_download(self):
(headers, rows) = parse_web_users(self.domain_obj.name, {"domains": [self.domain, self.other_domain]})
rows = list(rows)
self.assertEqual(6, len(rows))
rows = [dict(zip(headers, row)) for row in rows]
self.assertEqual({(r["username"], r["domain"]) for r in rows}, {
("edith@wharton.com", self.domain),
("george@eliot.com", self.domain),
("invited_to_domain@user.com", self.domain),
("zadie@smith.com", self.other_domain),
("susan@choi.com", self.other_domain),
("invited_to_other_domain@user.com", self.other_domain),
})
|
{
"content_hash": "9d0833c0b012af506358dd8269f96e3e",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 110,
"avg_line_length": 34.21656050955414,
"alnum_prop": 0.5819061801935964,
"repo_name": "dimagi/commcare-hq",
"id": "eefe76a2d29f265054c5830051fee578eabea4ee",
"size": "5372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/users/tests/test_web_user_download.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
"""
.. pakage:: patmat
:synopsis: Functional programming style pattern matching in Python.
"""
__version__ = '1.0.1'
__author__ = 'Xitong Gao'
__email__ = '@'.join(['gxtfmx', 'gmail.com'])
__license__ = 'MIT'
from patmat.mimic import (
Val, ZeroFsGiven, _, Type, Attr, Seq, List, Tuple, Dict, Pred, Or, Mimic,
)
from patmat.match import Match, Switch, case
|
{
"content_hash": "2dbc8d8463d76ff9f34c4977a5ab1c59",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 77,
"avg_line_length": 26.214285714285715,
"alnum_prop": 0.6348773841961853,
"repo_name": "pitcons/patmat",
"id": "f586a13c2271270c317595a14c825c2f456f8a8c",
"size": "367",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "patmat/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16793"
}
],
"symlink_target": ""
}
|
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
PACKAGE = 'django_exceptional_middleware'
VERSION = '1.2.1'
package_data = {
'exceptional_middleware': [ 'templates/http_responses/*.html' ],
}
setup(
name=PACKAGE, version=VERSION,
description="Django middleware to allow generating arbitrary HTTP status codes via exceptions.",
packages=[ 'exceptional_middleware' ],
package_data=package_data,
license='MIT',
author='James Aylett',
url = 'http://tartarus.org/james/computers/django/',
)
|
{
"content_hash": "2cf6a31451cee674660790e2d7f2dc7d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 100,
"avg_line_length": 27.476190476190474,
"alnum_prop": 0.7036395147313691,
"repo_name": "jaylett/django_exceptional_middleware",
"id": "a780acc76ced37b0682fbc0a9441dce4c40d4a3f",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11020"
}
],
"symlink_target": ""
}
|
import collections
import os
import yaml
class Config(collections.UserDict):
"""
Instantiate this object with a dictionary or by parsing a YAML file with from_yaml method.
Once instantiated use to_env to dump all the data into environment variables.
Or use key assignment to assign specific values to environment.
"""
def __setitem__(self, key, value):
"""
Update both the object and the environment variable.
"""
self.data[key] = value
self.to_env(data={key: value})
def __call__(self, value, default=''):
return os.environ.get(value, default)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, str(self.data))
@classmethod
def from_yaml(cls, path='.config.yml'):
"""
Parses YAML config file and creates a Config instance with resulting dictionary.
:param path: (str) YAML config path
:return: (Config)
"""
try:
with open(path, 'r') as fp:
yaml_dict = yaml.load(stream=fp)
except IOError:
raise IOError('YAML config file not found.')
else:
return cls(yaml_dict)
def to_env(self, data=None, parent_key=''):
"""
Exports config data as environment variables by iterating through dictionary data.
If value is a sequence, export it as whitespace separated string.
If value is a mapping, continue recursively.
If value is literal, cast it as str and export directly.
:param data:
:param parent_key:
:return:
"""
env = os.environ
if data is None:
data = self.data
for key, value in data.items():
if isinstance(value, collections.MutableMapping):
self.to_env(data=value, parent_key=key)
elif isinstance(value, collections.MutableSequence):
value = ' '.join(item for item in value)
if parent_key:
env['{}_{}'.format(parent_key, key).upper()] = value
else:
env[key.upper()] = value
else:
if parent_key:
env['{}_{}'.format(parent_key, key).upper()] = str(value)
else:
env[key.upper()] = str(value)
|
{
"content_hash": "3bca6299a6da8eb65b8817a3f6dc569b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 94,
"avg_line_length": 31.426666666666666,
"alnum_prop": 0.5570640644887569,
"repo_name": "bvujicic/yml-to-env",
"id": "3f8f73f4ac818221125d75817172596fbf9859e3",
"size": "2357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yml_config/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
"""
Code taken from SmartMeshSDK. The origin code was modified.
Copyright (c) 2012, Dust Networks
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dust Networks nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DUST NETWORKS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import struct
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('HrParser')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
HR_ID_DEVICE = 0x80
HR_ID_NEIGHBORS = 0x81
HR_ID_DISCOVERED = 0x82
HR_ID_EXTENDED = 0x91
HR_ID_ALL = [
HR_ID_DEVICE,
HR_ID_NEIGHBORS,
HR_ID_DISCOVERED,
HR_ID_EXTENDED,
]
HR_ID_EXTENDED_RSSI = 1
HR_DESC_DEVICE = [
('charge', 'I'),
('queueOcc', 'B'),
('temperature', 'b'),
('batteryVoltage', 'H'),
('numTxOk', 'H'),
('numTxFail', 'H'),
('numRxOk', 'H'),
('numRxLost', 'H'),
('numMacDropped', 'B'),
('numTxBad', 'B'),
('badLinkFrameId', 'B'),
('badLinkSlot', 'I'),
('badLinkOffset', 'B'),
('numNetMicErr', 'B'),
('numMacMicErr', 'B'),
('numMacCrcErr', 'B'),
]
HR_DESC_NEIGHBORS = [
('numItems', 'B'),
]
HR_DESC_NEIGHBOR_DATA = [
('neighborId', 'H'),
('neighborFlag', 'B'),
('rssi', 'b'),
('numTxPackets', 'H'),
('numTxFailures', 'H'),
('numRxPackets', 'H'),
]
HR_DESC_DISCOVERED = [
('numJoinParents', 'B'),
('numItems', 'B'),
]
HR_DESC_DISCOVERED_DATA = [
('neighborId', 'H'),
('rssi', 'b'),
('numRx', 'B'),
]
HR_DESC_EXTENDED = [
('extType', 'B'),
('extLength', 'B'),
]
HR_DESC_EXTENDED_RSSI_DATA = [
('idleRssi', 'b'),
('txUnicastAttempts', 'H'),
('txUnicastFailures', 'H'),
]
# ======================= public ==============================================
def parseHr(hr):
"""
Parse a byte list representing a received HR.
:returns: The parsed HR, of the following format:
{
'Device': {
<fieldName>: <fieldVal>,
...
}
'Neighbors': {
<fieldName>: <fieldVal>,
...,
'neighbors': [
{
<fieldName>: <fieldVal>,
...
}
]
}
'Discovered': {
<fieldName>: <fieldVal>,
...,
'discoveredNeighbors': [
{
<fieldName>: <fieldVal>,
...
}
]
}
}
"""
returnVal = {}
while hr:
if len(hr) < 2:
raise ValueError("Less than 2 bytes in HR")
hr_id = hr[0]
length = hr[1]
payload = hr[2:2+length]
# parse current HR
if hr_id == HR_ID_DEVICE:
returnVal['Device'] = _parseDevice(payload)
elif hr_id == HR_ID_NEIGHBORS:
returnVal['Neighbors'] = _parseNeighbors(payload)
elif hr_id == HR_ID_DISCOVERED:
returnVal['Discovered'] = _parseDiscovered(payload)
elif hr_id == HR_ID_EXTENDED:
returnVal['Extended'] = _parseExtended(payload)
else:
raise ValueError("unknown HR id {0}".format(hr_id))
# remove current HR
hr = hr[2+length:]
return returnVal
def formatHr(hr):
return _formatHr_recursive(hr, 0)
# ======================= private =============================================
def _formatHr_recursive(e, lvl):
output = []
indent = ' '*(4*lvl)
if type(e) in [str, int]:
output += [str(e)]
elif type(e) == dict:
for k in sorted(e.keys()):
if type(e[k]) in [dict, list]:
formatString = '{0}- {1}:\n{2}'
else:
formatString = '{0}- {1:<20}: {2}'
output += [formatString.format(indent, k, _formatHr_recursive(e[k], lvl+1))]
elif type(e) == list:
for idx, v in enumerate(e):
if type(v) in [dict, list]:
output += ['{0}-item {1}\n{2}'.format(
indent,
idx,
_formatHr_recursive(v, lvl+1)
)
]
else:
output += ['{0}- {1}'.format(
indent,
_formatHr_recursive(v, lvl+1)
)
]
else:
raise SystemError("unexpected type {0}".format(type(e)))
output = '\n'.join(output)
return output
def _parseDevice(payload):
(remainder, fields) = _parseAs(
desc = HR_DESC_DEVICE,
payload = payload,
)
assert not remainder
return fields
def _parseNeighbors(payload):
# parse the header
(payload, fields) = _parseAs(
desc = HR_DESC_NEIGHBORS,
payload = payload,
)
# parse the neighbors
fields['neighbors'] = []
for _ in range(fields['numItems']):
(payload, newItem) = _parseAs(
desc = HR_DESC_NEIGHBOR_DATA,
payload = payload,
)
fields['neighbors'] += [newItem]
return fields
def _parseDiscovered(payload):
# parse the header
(payload, fields) = _parseAs(
desc = HR_DESC_DISCOVERED,
payload = payload,
)
# parse the discoveredNeighbors
fields['discoveredNeighbors'] = []
for _ in range(fields['numItems']):
(payload, newItem) = _parseAs(
desc = HR_DESC_DISCOVERED_DATA,
payload = payload,
)
fields['discoveredNeighbors'] += [newItem]
return fields
def _parseExtended(payload):
# parse the header
(payload, fields) = _parseAs(
desc = HR_DESC_EXTENDED,
payload = payload,
)
if fields['extLength'] != len(payload):
raise ValueError("extLength={0} while len(extended HR payload)={1}"
.format(fields['extLength'], len(payload)))
returnVal = {}
if fields['extType'] == HR_ID_EXTENDED_RSSI:
returnVal['RSSI'] = _parseExtendedRSSI(payload)
else:
raise ValueError("unknown extended HR extType {0}".format(fields['extType']))
return returnVal
def _parseExtendedRSSI(payload):
if len(payload) != 75:
raise ValueError("RSSI HR should be of length 75, not {0}".format(len(payload)))
returnVal = []
while payload:
(payload, fields) = _parseAs(
desc = HR_DESC_EXTENDED_RSSI_DATA,
payload = payload,
)
returnVal += [fields]
return returnVal
# ======================= helpers =============================================
def _parseAs(desc, payload):
returnVal = {}
# assemble the format string
fmt = '>'
numFields = 0
while True:
fmt += desc[numFields][1]
numBytes = struct.calcsize(fmt)
if numBytes == len(payload):
break
numFields += 1
if len(desc) == numFields:
break
# verify enough bytes
if len(payload) < numBytes:
raise ValueError("not enough bytes for HR")
# separate string to parse from remainder
hrstring = ''.join([chr(b) for b in payload[:numBytes]])
remainder = payload[numBytes:]
# apply the format string
fields = struct.unpack(fmt, hrstring)
for (d, v) in zip(desc, fields):
returnVal[d[0]] = v
return remainder, returnVal
|
{
"content_hash": "9112e5ef208eeb7948d6ffc28854ea41",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 88,
"avg_line_length": 29.452229299363058,
"alnum_prop": 0.5140570934256056,
"repo_name": "realms-team/sol",
"id": "9227973374a26dc4433ec12517a714a16e0efbb8",
"size": "9266",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sensorobjectlibrary/hr_parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3408"
},
{
"name": "Makefile",
"bytes": "439"
},
{
"name": "Python",
"bytes": "202675"
}
],
"symlink_target": ""
}
|
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
**Related Flags**
:instances_path: Where instances are kept on disk
:base_dir_name: Where cached images are stored under instances_path
:compute_driver: Name of class that is used to handle virtualization, loaded
by :func:`nova.openstack.common.importutils.import_object`
"""
import contextlib
import functools
import socket
import sys
import time
import traceback
from eventlet import greenthread
from nova import block_device
from nova import compute
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import consoleauth
import nova.context
from nova import exception
from nova import flags
from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import driver
from nova import volume
compute_opts = [
cfg.StrOpt('instances_path',
default='$state_path/instances',
help='where instances are stored on disk'),
cfg.StrOpt('base_dir_name',
default='_base',
help="Where cached images are stored under $instances_path."
"This is NOT the full path - just a folder name."
"For per-compute-host cached images, set to _base_$my_ip"),
cfg.StrOpt('console_host',
default=socket.gethostname(),
help='Console proxy host to use to connect '
'to instances on this host.'),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
"stuck in a rebooting state longer than N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("instance_build_timeout",
default=0,
help="Amount of time in seconds an instance can be in BUILD "
"before going into ERROR status."
"Set to 0 to disable."),
cfg.IntOpt("rescue_timeout",
default=0,
help="Automatically unrescue an instance after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("resize_confirm_window",
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt('host_state_interval',
default=120,
help='Interval in seconds for querying the host status'),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
"instance should be considered eligible for cleanup."),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=30,
help="Number of periodic scheduler ticks to wait between "
"runs of the cleanup task."),
cfg.StrOpt("running_deleted_instance_action",
default="log",
help="Action to take if a running deleted instance is detected."
"Valid options are 'noop', 'log' and 'reap'. "
"Set to 'noop' to disable."),
cfg.IntOpt("image_cache_manager_interval",
default=0,
help="Number of periodic scheduler ticks to wait between "
"runs of the image cache manager."),
cfg.IntOpt("heal_instance_info_cache_interval",
default=60,
help="Number of seconds between instance info_cache self "
"healing updates"),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="Generate periodic compute.instance.exists notifications"),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(compute_opts)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
def publisher_id(host=None):
return notifier.publisher_id("compute", host)
def reverts_task_state(function):
"""Decorator to revert task_state on failure"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError:
LOG.exception(_("Possibly task preempted."))
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
raise
except Exception:
with excutils.save_and_reraise_exception():
try:
self._instance_update(context,
kwargs['instance']['uuid'],
task_state=None)
except Exception:
pass
return decorated_function
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception, e:
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
kwargs['instance']['uuid'], e, sys.exc_info())
return decorated_function
def _get_image_meta(context, image_ref):
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
RPC_API_VERSION = '2.2'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
# TODO(vish): sync driver creation logic with the rest of the system
# and re-document the module docstring
if not compute_driver:
compute_driver = FLAGS.compute_driver
LOG.info(_("Loading compute driver '%s'") % compute_driver)
try:
self.driver = utils.check_isinstance(
importutils.import_object_ns('nova.virt', compute_driver),
driver.ComputeDriver)
except ImportError as e:
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
self.network_api = network.API()
self.volume_api = volume.API()
self.network_manager = importutils.import_object(
FLAGS.network_manager, host=kwargs.get('host', None))
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._last_info_cache_heal = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
self.resource_tracker = resource_tracker.ResourceTracker(self.host,
self.driver)
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
(old_ref, instance_ref) = self.db.instance_update_and_get_original(
context, instance_uuid, kwargs)
self.resource_tracker.update_usage(context, instance_ref)
notifications.send_update(context, old_ref, instance_ref)
return instance_ref
def _set_instance_error_state(self, context, instance_uuid):
try:
self._instance_update(context, instance_uuid,
vm_state=vm_states.ERROR)
except exception.InstanceNotFound:
LOG.debug(_('Instance has been destroyed from under us while '
'trying to set it to ERROR'),
instance_uuid=instance_uuid)
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = self.db.instance_get_all_by_host(context, self.host)
if FLAGS.defer_iptables_apply:
self.driver.filter_defer_apply_on()
try:
for instance in instances:
db_state = instance['power_state']
drv_state = self._get_power_state(context, instance)
closing_vm_states = (vm_states.DELETED,
vm_states.SOFT_DELETED)
# instance was supposed to shut down - don't attempt
# recovery in any case
if instance['vm_state'] in closing_vm_states:
continue
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug(_('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.'), locals(), instance=instance)
net_info = compute_utils.get_nw_info_for_instance(instance)
# We're calling plug_vifs to ensure bridge and iptables
# rules exist. This needs to be called for each instance.
legacy_net_info = self._legacy_nw_info(net_info)
self.driver.plug_vifs(instance, legacy_net_info)
if ((expect_running and FLAGS.resume_guests_state_on_host_boot)
or FLAGS.start_guests_on_host_boot):
LOG.info(
_('Rebooting instance after nova-compute restart.'),
locals(), instance=instance)
block_device_info = \
self._get_instance_volume_block_device_info(
context, instance['uuid'])
try:
self.driver.resume_state_on_host_boot(
context,
instance,
self._legacy_nw_info(net_info),
block_device_info)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'resume guests'), instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we
# set the instance to error and attempt
# to continue.
LOG.warning(_('Failed to resume instance'),
instance=instance)
self._set_instance_error_state(context,
instance['uuid'])
elif drv_state == power_state.RUNNING:
# VMWareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance,
self._legacy_nw_info(net_info))
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'firewall rules'), instance=instance)
finally:
if FLAGS.defer_iptables_apply:
self.driver.filter_defer_apply_off()
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug(_('Checking state'), instance=instance)
try:
return self.driver.get_info(instance)["state"]
except exception.NotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
#TODO(mdragon): perhaps make this variable by console_type?
return rpc.queue_get_for(context,
FLAGS.console_topic,
FLAGS.console_host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_security_group_members(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group members.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_instance_security_rules(instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_provider_fw_rules(self, context):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance."""
# get the network info from network
network_info = self.network_api.get_instance_nw_info(context,
instance)
return network_info
def _legacy_nw_info(self, network_info):
"""Converts the model nw_info object to legacy style"""
if self.driver.legacy_nwinfo():
network_info = network_info.legacy()
return network_info
def _setup_block_device_mapping(self, context, instance):
"""setup volumes for block device mapping"""
block_device_mapping = []
swap = None
ephemerals = []
for bdm in self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']):
LOG.debug(_('Setting up bdm %s'), bdm, instance=instance)
if bdm['no_device']:
continue
if bdm['virtual_name']:
virtual_name = bdm['virtual_name']
device_name = bdm['device_name']
assert block_device.is_swap_or_ephemeral(virtual_name)
if virtual_name == 'swap':
swap = {'device_name': device_name,
'swap_size': bdm['volume_size']}
elif block_device.is_ephemeral(virtual_name):
eph = {'num': block_device.ephemeral_num(virtual_name),
'virtual_name': virtual_name,
'device_name': device_name,
'size': bdm['volume_size']}
ephemerals.append(eph)
continue
if ((bdm['snapshot_id'] is not None) and
(bdm['volume_id'] is None)):
# TODO(yamahata): default name and description
snapshot = self.volume_api.get_snapshot(context,
bdm['snapshot_id'])
vol = self.volume_api.create(context, bdm['volume_size'],
'', '', snapshot)
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
while True:
volume = self.volume_api.get(context, vol['id'])
if volume['status'] != 'creating':
break
greenthread.sleep(1)
self.db.block_device_mapping_update(
context, bdm['id'], {'volume_id': vol['id']})
bdm['volume_id'] = vol['id']
if bdm['volume_id'] is not None:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attach(context, volume)
cinfo = self._attach_volume_boot(context,
instance,
volume,
bdm['device_name'])
self.db.block_device_mapping_update(
context, bdm['id'],
{'connection_info': jsonutils.dumps(cinfo)})
bdmap = {'connection_info': cinfo,
'mount_device': bdm['device_name'],
'delete_on_termination': bdm['delete_on_termination']}
block_device_mapping.append(bdmap)
return {
'root_device_name': instance['root_device_name'],
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping
}
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance):
"""Launch a new instance with specified options."""
context = context.elevated()
try:
self._check_instance_not_already_created(context, instance)
image_meta = self._check_image_size(context, instance)
extra_usage_info = {"image_name": image_meta['name']}
self._start_building(context, instance)
self._notify_about_instance_usage(
context, instance, "create.start",
extra_usage_info=extra_usage_info)
network_info = None
try:
limits = filter_properties.get('limits', {})
with self.resource_tracker.resource_claim(context, instance,
limits):
network_info = self._allocate_network(context, instance,
requested_networks)
block_device_info = self._prep_block_device(context,
instance)
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password)
except exception.InstanceNotFound:
# the instance got deleted during the spawn
try:
self._deallocate_network(context, instance)
except Exception:
msg = _('Failed to dealloc network for deleted instance')
LOG.exception(msg, instance=instance)
raise
except Exception:
# try to re-schedule instance:
self._reschedule_or_reraise(context, instance,
requested_networks, admin_password, injected_files,
is_first_time, request_spec, filter_properties)
else:
# Spawn success:
if (is_first_time and not instance['access_ip_v4']
and not instance['access_ip_v6']):
self._update_access_ip(context, instance, network_info)
self._notify_about_instance_usage(context, instance,
"create.end", network_info=network_info,
extra_usage_info=extra_usage_info)
except Exception:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance['uuid'])
def _reschedule_or_reraise(self, context, instance, requested_networks,
admin_password, injected_files, is_first_time,
request_spec, filter_properties):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
type_, value, tb = sys.exc_info() # save original exception
rescheduled = False
instance_uuid = instance['uuid']
def _log_original_error():
LOG.error(_('Build error: %s') %
traceback.format_exception(type_, value, tb),
instance_uuid=instance_uuid)
try:
self._deallocate_network(context, instance)
except Exception:
# do not attempt retry if network de-allocation failed:
_log_original_error()
raise
try:
rescheduled = self._reschedule(context, instance_uuid,
requested_networks, admin_password, injected_files,
is_first_time, request_spec, filter_properties)
except Exception:
rescheduled = False
LOG.exception(_("Error trying to reschedule"),
instance_uuid=instance_uuid)
if rescheduled:
# log the original build error
_log_original_error()
else:
# not re-scheduling
raise type_, value, tb
def _reschedule(self, context, instance_uuid, requested_networks,
admin_password, injected_files, is_first_time, request_spec,
filter_properties):
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug(_("Retry info not present, will not reschedule"),
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug(_("No request spec, will not reschedule"),
instance_uuid=instance_uuid)
return
request_spec['instance_uuids'] = [instance_uuid]
LOG.debug(_("Re-scheduling instance: attempt %d"),
retry['num_attempts'], instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance_uuid,
task_state=task_states.SCHEDULING)
self.scheduler_rpcapi.run_instance(context,
request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties)
return True
@manager.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = FLAGS.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING}
building_insts = self.db.instance_get_all_by_filters(context, filters)
for instance in building_insts:
if timeutils.is_older_than(instance['created_at'], timeout):
self._set_instance_error_state(context, instance['uuid'])
LOG.warn(_("Instance build timed out. Set to error state."),
instance=instance)
def _update_access_ip(self, context, instance, nw_info):
"""Update the access ip values for a given instance.
If FLAGS.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose from,
an arbitrary one will be chosen.
"""
network_name = FLAGS.default_access_ip_network_name
if not network_name:
return
update_info = {}
for vif in nw_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
update_info['access_ip_v4'] = ip['address']
if ip['version'] == 6:
update_info['access_ip_v6'] = ip['address']
if update_info:
self.db.instance_update(context, instance.uuid, update_info)
notifications.send_update(context, instance, instance)
def _check_instance_not_already_created(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance['name']):
_msg = _("Instance has already been created")
raise exception.Invalid(_msg)
def _check_image_size(self, context, instance):
"""Ensure image is smaller than the maximum size allowed by the
instance_type.
The image stored in Glance is potentially compressed, so we use two
checks to ensure that the size isn't exceeded:
1) This one - checks compressed size, this a quick check to
eliminate any images which are obviously too large
2) Check uncompressed size in nova.virt.xenapi.vm_utils. This
is a slower check since it requires uncompressing the entire
image, but is accurate because it reflects the image's
actual size.
"""
image_meta = _get_image_meta(context, instance['image_ref'])
try:
size_bytes = image_meta['size']
except KeyError:
# Size is not a required field in the image service (yet), so
# we are unable to rely on it being there even though it's in
# glance.
# TODO(jk0): Should size be required in the image service?
return image_meta
instance_type_id = instance['instance_type_id']
instance_type = instance_types.get_instance_type(instance_type_id)
allowed_size_gb = instance_type['root_gb']
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
if not allowed_size_gb:
return image_meta
allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
image_id = image_meta['id']
LOG.debug(_("image_id=%(image_id)s, image_size_bytes="
"%(size_bytes)d, allowed_size_bytes="
"%(allowed_size_bytes)d") % locals(),
instance=instance)
if size_bytes > allowed_size_bytes:
LOG.info(_("Image '%(image_id)s' size %(size_bytes)d exceeded"
" instance_type allowed size "
"%(allowed_size_bytes)d")
% locals(), instance=instance)
raise exception.ImageTooLarge()
return image_meta
def _start_building(self, context, instance):
"""Save the host and launched_on fields and log appropriately."""
LOG.audit(_('Starting instance...'), context=context,
instance=instance)
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=None,
expected_task_state=(task_states.SCHEDULING,
None))
def _allocate_network(self, context, instance, requested_networks):
"""Allocate networks for an instance and return the network info"""
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING,
expected_task_state=None)
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
# allocate and get network info
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks)
except Exception:
LOG.exception(_('Instance failed network setup'),
instance=instance)
raise
LOG.debug(_('Instance network_info: |%s|'), network_info,
instance=instance)
return network_info
def _prep_block_device(self, context, instance):
"""Set up the block device for an instance with error logging"""
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
return self._setup_block_device_mapping(context, instance)
except Exception:
LOG.exception(_('Instance failed block device setup'),
instance=instance)
raise
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password):
"""Spawn an instance with error logging and update its power state"""
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.SPAWNING,
expected_task_state=task_states.
BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
self._legacy_nw_info(network_info),
block_device_info)
except Exception:
LOG.exception(_('Instance failed to spawn'), instance=instance)
raise
current_power_state = self._get_power_state(context, instance)
return self._instance_update(context, instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=task_states.SPAWNING,
launched_at=timeutils.utcnow())
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None):
# NOTE(sirp): The only thing this wrapper function does extra is handle
# the passing in of `self.host`. Ordinarily this will just be
# `FLAGS.host`, but `Manager`'s gets a chance to override this in its
# `__init__`.
compute_utils.notify_about_instance_usage(
context, instance, event_suffix, network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, host=self.host)
def _deallocate_network(self, context, instance):
LOG.debug(_('Deallocating network for instance'), instance=instance)
self.network_api.deallocate_for_instance(context, instance)
def _get_instance_volume_bdms(self, context, instance_uuid):
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance_uuid)
return [bdm for bdm in bdms if bdm['volume_id']]
def _get_instance_volume_bdm(self, context, instance_uuid, volume_id):
bdms = self._get_instance_volume_bdms(context, instance_uuid)
for bdm in bdms:
# NOTE(vish): Comparing as strings because the os_api doesn't
# convert to integer and we may wish to support uuids
# in the future.
if str(bdm['volume_id']) == str(volume_id):
return bdm
def _get_instance_volume_block_device_info(self, context, instance_uuid,
bdms=None):
if bdms is None:
bdms = self._get_instance_volume_bdms(context, instance_uuid)
block_device_mapping = []
for bdm in bdms:
try:
cinfo = jsonutils.loads(bdm['connection_info'])
if cinfo and 'serial' not in cinfo:
cinfo['serial'] = bdm['volume_id']
bdmap = {'connection_info': cinfo,
'mount_device': bdm['device_name'],
'delete_on_termination': bdm['delete_on_termination']}
block_device_mapping.append(bdmap)
except TypeError:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
pass
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
return {'block_device_mapping': block_device_mapping}
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def run_instance(self, context, instance, request_spec=None,
filter_properties=None, requested_networks=None,
injected_files=None, admin_password=None,
is_first_time=False):
if filter_properties is None:
filter_properties = {}
if injected_files is None:
injected_files = []
@utils.synchronized(instance['uuid'])
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance)
do_run_instance()
def _shutdown_instance(self, context, instance):
"""Shutdown an instance on this host."""
context = context.elevated()
LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "shutdown.start")
# get network info before tearing down
try:
network_info = self._get_instance_nw_info(context, instance)
except exception.NetworkNotFound:
network_info = network_model.NetworkInfo()
# tear down allocated network structure
self._deallocate_network(context, instance)
# NOTE(vish) get bdms before destroying the instance
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'], bdms=bdms)
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
for bdm in bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell nova-volume that we are done with it.
volume = self.volume_api.get(context, bdm['volume_id'])
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
volume,
connector)
self.volume_api.detach(context, volume)
except exception.DiskNotFound as exc:
LOG.warn(_('Ignoring DiskNotFound: %s') % exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.warn(_('Ignoring VolumeNotFound: %s') % exc,
instance=instance)
self._notify_about_instance_usage(context, instance, "shutdown.end")
def _cleanup_volumes(self, context, instance_uuid):
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance_uuid)
for bdm in bdms:
LOG.debug(_("terminating bdm %s") % bdm,
instance_uuid=instance_uuid)
if bdm['volume_id'] and bdm['delete_on_termination']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.delete(context, volume)
# NOTE(vish): bdms will be deleted on instance destroy
def _delete_instance(self, context, instance):
"""Delete an instance on this host."""
instance_uuid = instance['uuid']
self.db.instance_info_cache_delete(context, instance_uuid)
self._notify_about_instance_usage(context, instance, "delete.start")
self._shutdown_instance(context, instance)
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It would
# be nice to let the user know somehow that the volume
# deletion failed, but it is not acceptable to have an
# instance that can not be deleted. Perhaps this could
# be reworked in the future to set an instance fault
# the first time and to only ignore the failure if the
# instance is already in ERROR.
try:
self._cleanup_volumes(context, instance_uuid)
except Exception as exc:
LOG.warn(_("Ignoring volume cleanup failure due to %s") % exc,
instance_uuid=instance_uuid)
# if a delete task succeed, always update vm state and task state
# without expecting task state to be DELETING
instance = self._instance_update(context,
instance_uuid,
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=timeutils.utcnow())
self.db.instance_destroy(context, instance_uuid)
system_meta = self.db.instance_system_metadata_get(context,
instance_uuid)
# ensure block device mappings are not leaked
for bdm in self._get_instance_volume_bdms(context, instance_uuid):
self.db.block_device_mapping_destroy(context, bdm['id'])
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
if FLAGS.vnc_enabled:
self.consoleauth_rpcapi.delete_tokens_for_instance(context,
instance["uuid"])
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def terminate_instance(self, context, instance):
"""Terminate an instance on this host. """
# Note(eglynn): we do not decorate this action with reverts_task_state
# because a failure during termination should leave the task state as
# DELETING, as a signal to the API layer that a subsequent deletion
# attempt should not result in a further decrement of the quota_usages
# in_use count (see bug 1046236).
elevated = context.elevated()
@utils.synchronized(instance['uuid'])
def do_terminate_instance(instance):
try:
self._delete_instance(context, instance)
except exception.InstanceTerminationFailure as error:
msg = _('%s. Setting instance vm_state to ERROR')
LOG.error(msg % error, instance=instance)
self._set_instance_error_state(context, instance['uuid'])
except exception.InstanceNotFound as e:
LOG.warn(e, instance=instance)
do_terminate_instance(instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def stop_instance(self, context, instance):
"""Stopping an instance on this host.
Alias for power_off_instance for compatibility"""
self.power_off_instance(context, instance,
final_state=vm_states.STOPPED)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host.
Alias for power_on_instance for compatibility"""
self.power_on_instance(context, instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def power_off_instance(self, context, instance,
final_state=vm_states.SOFT_DELETED):
"""Power off an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=final_state,
expected_task_state=(task_states.POWERING_OFF,
task_states.STOPPING),
task_state=None)
self._notify_about_instance_usage(context, instance, "power_off.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def power_on_instance(self, context, instance):
"""Power on an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=(task_states.POWERING_ON,
task_states.STARTING))
self._notify_about_instance_usage(context, instance, "power_on.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata=None):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance dict
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
"""
context = context.elevated()
with self._error_out_instance_on_exception(context, instance['uuid']):
LOG.audit(_("Rebuilding instance"), context=context,
instance=instance)
image_meta = _get_image_meta(context, image_ref)
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
orig_image_ref_url = utils.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': image_meta['name']}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
task_state=task_states.REBUILDING,
expected_task_state=task_states.REBUILDING)
network_info = self._get_instance_nw_info(context, instance)
self.driver.destroy(instance, self._legacy_nw_info(network_info))
instance = self._instance_update(context,
instance['uuid'],
task_state=task_states.
REBUILD_BLOCK_DEVICE_MAPPING,
expected_task_state=task_states.REBUILDING)
instance.injected_files = injected_files
network_info = self.network_api.get_instance_nw_info(context,
instance)
device_info = self._setup_block_device_mapping(context, instance)
instance = self._instance_update(context,
instance['uuid'],
task_state=task_states.
REBUILD_SPAWNING,
expected_task_state=task_states.
REBUILD_BLOCK_DEVICE_MAPPING)
# pull in new password here since the original password isn't in
# the db
admin_password = new_pass
self.driver.spawn(context, instance, image_meta,
[], admin_password,
self._legacy_nw_info(network_info),
device_info)
current_power_state = self._get_power_state(context, instance)
instance = self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=task_states.
REBUILD_SPAWNING,
launched_at=timeutils.utcnow())
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def reboot_instance(self, context, instance, reboot_type="SOFT"):
"""Reboot an instance on this host."""
context = context.elevated()
LOG.audit(_("Rebooting instance"), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
current_power_state = self._get_power_state(context, instance)
self._instance_update(context, instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE)
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
'instance: (state: %(state)s '
'expected: %(running)s)') % locals(),
context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
try:
self.driver.reboot(instance, self._legacy_nw_info(network_info),
reboot_type, block_device_info)
except Exception, exc:
LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance['uuid'], exc, sys.exc_info())
# Fall through and reset task_state to None
current_power_state = self._get_power_state(context, instance)
self._instance_update(context, instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
self._notify_about_instance_usage(context, instance, "reboot.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def snapshot_instance(self, context, image_id, instance,
image_type='snapshot', backup_type=None,
rotation=None):
"""Snapshot an instance on this host.
:param context: security context
:param instance: an Instance dict
:param image_id: glance.db.sqlalchemy.models.Image.Id
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state)
LOG.audit(_('instance snapshotting'), context=context,
instance=instance)
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
'instance: (state: %(state)s '
'expected: %(running)s)') % locals(),
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
self.driver.snapshot(context, instance, image_id)
if image_type == 'snapshot':
expected_task_state = task_states.IMAGE_SNAPSHOT
elif image_type == 'backup':
expected_task_state = task_states.IMAGE_BACKUP
self._instance_update(context, instance['uuid'], task_state=None,
expected_task_state=expected_task_state)
if image_type == 'snapshot' and rotation:
raise exception.ImageRotationNotAllowed()
elif image_type == 'backup' and rotation >= 0:
self._rotate_backups(context, instance, backup_type, rotation)
elif image_type == 'backup':
raise exception.RotationRequiredForBackup()
self._notify_about_instance_usage(
context, instance, "snapshot.end")
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
# NOTE(jk0): Eventually extract this out to the ImageService?
def fetch_images():
images = []
marker = None
while True:
if marker is not None:
batch = image_service.detail(context, filters=filters,
marker=marker, sort_key='created_at',
sort_dir='desc')
else:
batch = image_service.detail(context, filters=filters,
sort_key='created_at', sort_dir='desc')
if not batch:
break
images += batch
marker = batch[-1]['id']
return images
image_service = glance.get_default_image_service()
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance['uuid']}
images = fetch_images()
num_images = len(images)
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)")
% locals(), instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug(_("Rotating out %d backups") % excess,
instance=instance)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug(_("Deleting image %s") % image_id,
instance=instance)
image_service.delete(context, image_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass=None):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password(FLAGS.password_length)
max_tries = 10
for i in xrange(max_tries):
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
self._instance_update(context, instance['uuid'],
task_state=None,
expected_task_state=task_states.
UPDATING_PASSWORD)
_msg = _('Failed to set admin password. Instance %s is not'
' running') % instance["uuid"]
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
else:
try:
self.driver.set_admin_password(instance, new_pass)
LOG.audit(_("Root password set"), instance=instance)
self._instance_update(context,
instance['uuid'],
task_state=None,
expected_task_state=task_states.
UPDATING_PASSWORD)
break
except NotImplementedError:
# NOTE(dprince): if the driver doesn't implement
# set_admin_password we break to avoid a loop
_msg = _('set_admin_password is not implemented '
'by this driver.')
LOG.warn(_msg, instance=instance)
self._instance_update(context,
instance['uuid'],
task_state=None,
expected_task_state=task_states.
UPDATING_PASSWORD)
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception, e:
# Catch all here because this could be anything.
LOG.exception(_('set_admin_password failed: %s') % e,
instance=instance)
if i == max_tries - 1:
self._set_instance_error_state(context,
instance['uuid'])
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
time.sleep(1)
continue
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warn(_('trying to inject a file into a non-running '
'(state: %(current_power_state)s '
'expected: %(expected_state)s)') % locals(),
instance=instance)
LOG.audit(_('injecting file to %(path)s') % locals(),
instance=instance)
self.driver.inject_file(instance, path, file_contents)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password=None):
"""
Rescue an instance on this host.
:param rescue_password: password to set on rescue instance
"""
context = context.elevated()
LOG.audit(_('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password(FLAGS.password_length))
network_info = self._get_instance_nw_info(context, instance)
image_meta = _get_image_meta(context, instance['image_ref'])
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.rescue(context, instance,
self._legacy_nw_info(network_info), image_meta,
admin_password)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
vm_state=vm_states.RESCUED,
task_state=None,
power_state=current_power_state,
expected_task_state=task_states.RESCUING)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def unrescue_instance(self, context, instance):
"""Rescue an instance on this host."""
context = context.elevated()
LOG.audit(_('Unrescuing'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.unrescue(instance,
self._legacy_nw_info(network_info))
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=task_states.UNRESCUING,
power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug(_("Changing instance metadata according to %(diff)r") %
locals(), instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def confirm_resize(self, context, migration_id, instance,
reservations=None):
"""Destroys the source instance."""
migration_ref = self.db.migration_get(context, migration_id)
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration_ref['source_compute'], teardown=True)
network_info = self._get_instance_nw_info(context, instance)
self.driver.confirm_migration(migration_ref, instance,
self._legacy_nw_info(network_info))
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
self._quota_commit(context, reservations)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def revert_resize(self, context, instance, migration_id,
reservations=None):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
migration_ref = self.db.migration_get(context, migration_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
# Terminate volume connections.
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.terminate_connection(context, volume,
connector)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration_ref['id'], migration_ref['source_compute'],
reservations)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def finish_revert_resize(self, context, migration_id, instance,
reservations=None):
"""Finishes the second half of reverting a resize.
Power back on the source instance and revert the resized attributes
in the database.
"""
migration_ref = self.db.migration_get(context, migration_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
instance = self._instance_update(context,
instance['uuid'],
host=migration_ref['source_compute'])
self.network_api.setup_networks_on_host(context, instance,
migration_ref['source_compute'])
old_instance_type = migration_ref['old_instance_type_id']
instance_type = instance_types.get_instance_type(old_instance_type)
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.initialize_connection(context, volume,
connector)
self.driver.finish_revert_migration(instance,
self._legacy_nw_info(network_info),
block_device_info)
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
self._instance_update(context,
instance['uuid'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
root_gb=instance_type['root_gb'],
ephemeral_gb=instance_type['ephemeral_gb'],
instance_type_id=instance_type['id'],
launched_at=timeutils.utcnow(),
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=task_states.
RESIZE_REVERTING)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
self._quota_commit(context, reservations)
@staticmethod
def _quota_commit(context, reservations):
if reservations:
QUOTAS.commit(context, reservations)
@staticmethod
def _quota_rollback(context, reservations):
if reservations:
QUOTAS.rollback(context, reservations)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations=None):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
context = context.elevated()
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
compute_utils.notify_usage_exists(
context, instance, current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
if not instance['host']:
self._set_instance_error_state(context, instance['uuid'])
msg = _('Instance has no source host')
raise exception.MigrationError(msg)
same_host = instance['host'] == self.host
if same_host and not FLAGS.allow_resize_to_same_host:
self._set_instance_error_state(context, instance['uuid'])
msg = _('destination same as source!')
raise exception.MigrationError(msg)
# TODO(russellb): no-db-compute: Send the old instance type info
# that is needed via rpc so db access isn't required here.
old_instance_type_id = instance['instance_type_id']
old_instance_type = instance_types.get_instance_type(
old_instance_type_id)
migration_ref = self.db.migration_create(context,
{'instance_uuid': instance['uuid'],
'source_compute': instance['host'],
'dest_compute': self.host,
'dest_host': self.driver.get_host_ip_addr(),
'old_instance_type_id': old_instance_type['id'],
'new_instance_type_id': instance_type['id'],
'status': 'pre-migrating'})
LOG.audit(_('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(context, instance,
migration_ref['id'], image, reservations)
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def resize_instance(self, context, instance,
migration_id, image, reservations=None):
"""Starts the migration of a running instance to another host."""
migration_ref = self.db.migration_get(context, migration_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
instance_type_ref = self.db.instance_type_get(context,
migration_ref.new_instance_type_id)
network_info = self._get_instance_nw_info(context, instance)
self.db.migration_update(context,
migration_id,
{'status': 'migrating'})
self._instance_update(context, instance['uuid'],
task_state=task_states.RESIZE_MIGRATING,
expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration_ref['dest_host'],
instance_type_ref, self._legacy_nw_info(network_info),
block_device_info)
# Terminate volume connections.
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.terminate_connection(context, volume,
connector)
self.db.migration_update(context,
migration_id,
{'status': 'post-migrating'})
self._instance_update(context, instance['uuid'],
host=migration_ref['dest_compute'],
task_state=task_states.RESIZE_MIGRATED,
expected_task_state=task_states.
RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance, migration_id,
image, disk_info, migration_ref['dest_compute'], reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
def _finish_resize(self, context, instance, migration_ref, disk_info,
image):
resize_instance = False
old_instance_type_id = migration_ref['old_instance_type_id']
new_instance_type_id = migration_ref['new_instance_type_id']
if old_instance_type_id != new_instance_type_id:
instance_type = instance_types.get_instance_type(
new_instance_type_id)
instance = self._instance_update(
context,
instance['uuid'],
instance_type_id=instance_type['id'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
root_gb=instance_type['root_gb'],
ephemeral_gb=instance_type['ephemeral_gb'])
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration_ref['dest_compute'])
network_info = self._get_instance_nw_info(context, instance)
self._instance_update(context, instance['uuid'],
task_state=task_states.RESIZE_FINISH,
expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'], bdms=bdms)
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.initialize_connection(context, volume,
connector)
self.driver.finish_migration(context, migration_ref, instance,
disk_info,
self._legacy_nw_info(network_info),
image, resize_instance,
block_device_info)
instance = self._instance_update(context,
instance['uuid'],
vm_state=vm_states.RESIZED,
launched_at=timeutils.utcnow(),
task_state=None,
expected_task_state=task_states.
RESIZE_FINISH)
self.db.migration_update(context, migration_ref.id,
{'status': 'finished'})
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def finish_resize(self, context, migration_id, disk_info, image,
instance, reservations=None):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
migration_ref = self.db.migration_get(context, migration_id)
try:
self._finish_resize(context, instance, migration_ref,
disk_info, image)
self._quota_commit(context, reservations)
except Exception as error:
with excutils.save_and_reraise_exception():
try:
self._quota_rollback(context, reservations)
except Exception as qr_error:
reason = _("Failed to rollback quota for failed "
"finish_resize: %(qr_error)s")
LOG.exception(reason % locals(), instance=instance)
LOG.error(_('%s. Setting instance vm_state to ERROR') % error,
instance=instance)
self._set_instance_error_state(context, instance['uuid'])
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
network_info = self._inject_network_info(context, instance=instance)
self.reset_network(context, instance)
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
network_info = self._inject_network_info(context,
instance=instance)
self.reset_network(context, instance)
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.audit(_('Pausing'), context=context, instance=instance)
self.driver.pause(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.PAUSED,
task_state=None,
expected_task_state=task_states.PAUSING)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.audit(_('Unpausing'), context=context, instance=instance)
self.driver.unpause(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=task_states.UNPAUSING)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def host_power_action(self, context, host=None, action=None):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(host, action)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
return self.driver.host_maintenance_mode(host, mode)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def set_host_enabled(self, context, host=None, enabled=None):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(host, enabled)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime(host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.audit(_("Retrieving diagnostics"), context=context,
instance=instance)
return self.driver.get_diagnostics(instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.suspend(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.SUSPENDED,
task_state=None,
expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend')
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.audit(_('Resuming'), context=context, instance=instance)
self.driver.resume(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
self._notify_about_instance_usage(context, instance, 'resume')
@reverts_task_state
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug(_('Reset network'), context=context, instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance):
"""Inject network info for the given instance."""
LOG.debug(_('Inject network info'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
LOG.debug(_('network_info to inject: |%s|'), network_info,
instance=instance)
self.driver.inject_network_info(instance,
self._legacy_nw_info(network_info))
return network_info
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
self._inject_network_info(context, instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length=None):
"""Send the console output for the given instance."""
context = context.elevated()
LOG.audit(_("Get console output"), context=context,
instance=instance)
output = self.driver.get_console_output(instance)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return ''
else:
return '\n'.join(log.split('\n')[-int(length):])
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug(_("Getting vnc console"), instance=instance)
token = str(utils.gen_uuid())
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (FLAGS.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (FLAGS.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
# Retrieve connect info from driver, and then decorate with our
# access info token
connect_info = self.driver.get_vnc_console(instance)
connect_info['token'] = token
connect_info['access_url'] = access_url
return connect_info
def _attach_volume_boot(self, context, instance, volume, mountpoint):
"""Attach a volume to an instance at boot time. So actual attach
is done by instance creation"""
instance_id = instance['id']
instance_uuid = instance['uuid']
volume_id = volume['id']
context = context.elevated()
LOG.audit(_('Booting with volume %(volume_id)s at %(mountpoint)s'),
locals(), context=context, instance=instance)
connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
volume,
connector)
self.volume_api.attach(context, volume, instance_uuid, mountpoint)
return connection_info
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
console_info = self.driver.get_vnc_console(instance)
return console_info['port'] == port
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device):
@utils.synchronized(instance['uuid'])
def do_reserve():
result = compute_utils.get_device_name_for_instance(context,
instance,
device)
# NOTE(vish): create bdm here to avoid race condition
values = {'instance_uuid': instance['uuid'],
'volume_id': 'reserved',
'device_name': result}
self.db.block_device_mapping_create(context, values)
return result
return do_reserve()
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint, instance):
"""Attach a volume to an instance."""
try:
return self._attach_volume(context, volume_id,
mountpoint, instance)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance.get('uuid'), mountpoint)
def _attach_volume(self, context, volume_id, mountpoint, instance):
volume = self.volume_api.get(context, volume_id)
context = context.elevated()
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
locals(), context=context, instance=instance)
try:
connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
volume,
connector)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
msg = _("Failed to connect to volume %(volume_id)s "
"while attaching at %(mountpoint)s")
LOG.exception(msg % locals(), context=context,
instance=instance)
self.volume_api.unreserve_volume(context, volume)
if 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
self.driver.attach_volume(connection_info,
instance['name'],
mountpoint)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
msg = _("Failed to attach volume %(volume_id)s "
"at %(mountpoint)s")
LOG.exception(msg % locals(), context=context,
instance=instance)
self.volume_api.terminate_connection(context,
volume,
connector)
self.volume_api.attach(context,
volume,
instance['uuid'],
mountpoint)
values = {
'instance_uuid': instance['uuid'],
'connection_info': jsonutils.dumps(connection_info),
'device_name': mountpoint,
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': volume_id,
'volume_size': None,
'no_device': None}
self.db.block_device_mapping_update_or_create(context, values)
def _detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
mp = bdm['device_name']
volume_id = bdm['volume_id']
LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
locals(), context=context, instance=instance)
if instance['name'] not in self.driver.list_instances():
LOG.warn(_('Detaching volume from unknown instance'),
context=context, instance=instance)
connection_info = jsonutils.loads(bdm['connection_info'])
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
self.driver.detach_volume(connection_info,
instance['name'],
mp)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
msg = _("Faild to detach volume %(volume_id)s from %(mp)s")
LOG.exception(msg % locals(), context=context,
instance=instance)
volume = self.volume_api.get(context, volume_id)
self.volume_api.roll_detaching(context, volume)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance):
"""Detach a volume from an instance."""
bdm = self._get_instance_volume_bdm(context, instance['uuid'],
volume_id)
self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume, connector)
self.volume_api.detach(context.elevated(), volume)
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance['uuid'], volume_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api"""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
try:
bdm = self._get_instance_volume_bdm(context,
instance['uuid'],
volume_id)
self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume, connector)
except exception.NotFound:
pass
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
Returns a mapping of values required in case of block migration
and None otherwise.
"""
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, block_migration, disk_over_commit)
try:
self.compute_rpcapi.check_can_live_migrate_source(ctxt,
instance, dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
if dest_check_data and 'migrate_data' in dest_check_data:
return dest_check_data['migrate_data']
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
"""
self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data)
def pre_live_migration(self, context, instance,
block_migration=False, disk=None):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
"""
# If any volume is mounted, prepare here.
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
if not block_device_info['block_device_mapping']:
LOG.info(_('Instance has no volume.'), instance=instance)
# assign the volume to host system
# needed by the lefthand volume driver and maybe others
connector = self.driver.get_volume_connector(instance)
for bdm in self._get_instance_volume_bdms(context, instance['uuid']):
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.initialize_connection(context, volume, connector)
network_info = self._get_instance_nw_info(context, instance)
# TODO(tr3buchet): figure out how on the earth this is necessary
fixed_ips = network_info.fixed_ips()
if not fixed_ips:
raise exception.FixedIpNotFoundForInstance(
instance_uuid=instance['uuid'])
self.driver.pre_live_migration(context, instance,
block_device_info,
self._legacy_nw_info(network_info))
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
self._legacy_nw_info(network_info))
# Preparation for block migration
if block_migration:
self.driver.pre_block_migration(context, instance, disk)
def live_migration(self, context, dest, instance,
block_migration=False, migrate_data=None):
"""Executing live migration.
:param context: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: implementation specific params
"""
try:
if block_migration:
disk = self.driver.get_instance_disk_info(instance['name'])
else:
disk = None
self.compute_rpcapi.pre_live_migration(context, instance,
block_migration, disk, dest)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Pre live migration failed at %(dest)s'),
locals(), instance=instance)
self._rollback_live_migration(context, instance, dest,
block_migration)
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
def _post_live_migration(self, ctxt, instance_ref,
dest, block_migration=False):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest: destination host
:param block_migration: if true, prepare for block migration
"""
LOG.info(_('_post_live_migration() is started..'),
instance=instance_ref)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance_ref)
for bdm in self._get_instance_volume_bdms(ctxt, instance_ref['uuid']):
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
volume = self.volume_api.get(ctxt, bdm['volume_id'])
self.volume_api.terminate_connection(ctxt, volume, connector)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self._get_instance_nw_info(ctxt, instance_ref)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance_ref,
self._legacy_nw_info(network_info))
# Database updating.
# NOTE(jkoelker) This needs to be converted to network api calls
# if nova wants to support floating_ips in
# quantum/melange
try:
# Not return if floating_ip is not found, otherwise,
# instance never be accessible..
floating_ip = self.db.instance_get_floating_address(ctxt,
instance_ref['id'])
if not floating_ip:
LOG.info(_('No floating_ip found'), instance=instance_ref)
else:
floating_ip_ref = self.db.floating_ip_get_by_address(ctxt,
floating_ip)
self.db.floating_ip_update(ctxt,
floating_ip_ref['address'],
{'host': dest})
except exception.NotFound:
LOG.info(_('No floating_ip found.'), instance=instance_ref)
except Exception, e:
LOG.error(_('Live migration: Unexpected error: cannot inherit '
'floating ip.\n%(e)s'), locals(),
instance=instance_ref)
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance_ref, block_migration, dest)
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
if block_migration:
self.driver.destroy(instance_ref,
self._legacy_nw_info(network_info))
else:
# self.driver.destroy() usually performs vif unplugging
# but we must do it explicitly here when block_migration
# is false, as the network devices at the source must be
# torn down
self.driver.unplug_vifs(instance_ref,
self._legacy_nw_info(network_info))
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(ctxt, instance_ref,
self.host, teardown=True)
LOG.info(_('Migrating instance to %(dest)s finished successfully.'),
locals(), instance=instance_ref)
LOG.info(_("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance_ref)
def post_live_migration_at_destination(self, context, instance,
block_migration=False):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info(_('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
network_info = self._get_instance_nw_info(context, instance)
self.driver.post_live_migration_at_destination(context, instance,
self._legacy_nw_info(network_info),
block_migration)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
host=self.host,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=task_states.MIGRATING)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
def _rollback_live_migration(self, context, instance_ref,
dest, block_migration):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
"""
host = instance_ref['host']
self._instance_update(context,
instance_ref['uuid'],
host=host,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance_ref,
self.host)
for bdm in self._get_instance_volume_bdms(context,
instance_ref['uuid']):
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
self.compute_rpcapi.remove_volume_connection(context, instance_ref,
volume['id'], dest)
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
# any empty images has to be deleted.
if block_migration:
self.compute_rpcapi.rollback_live_migration_at_destination(context,
instance_ref, dest)
def rollback_live_migration_at_destination(self, context, instance):
""" Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: an Instance dict sent over rpc
"""
network_info = self._get_instance_nw_info(context, instance)
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
@manager.periodic_task
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors, we don't care. It's possible the instance
has been deleted, etc.
"""
heal_interval = FLAGS.heal_instance_info_cache_interval
if not heal_interval:
return
curr_time = time.time()
if self._last_info_cache_heal + heal_interval > curr_time:
return
self._last_info_cache_heal = curr_time
instance_uuids = getattr(self, '_instance_uuids_to_heal', None)
instance = None
while not instance or instance['host'] != self.host:
if instance_uuids:
try:
instance = self.db.instance_get_by_uuid(context,
instance_uuids.pop(0))
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
else:
# No more in our copy of uuids. Pull from the DB.
db_instances = self.db.instance_get_all_by_host(
context, self.host)
if not db_instances:
# None.. just return.
return
instance = db_instances.pop(0)
instance_uuids = [inst['uuid'] for inst in db_instances]
self._instance_uuids_to_heal = instance_uuids
# We have an instance now and it's ours
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self.network_api.get_instance_nw_info(context, instance)
LOG.debug(_('Updated the info_cache for instance'),
instance=instance)
except Exception:
# We don't care about any failures
pass
@manager.periodic_task
def _poll_rebooting_instances(self, context):
if FLAGS.reboot_timeout > 0:
self.driver.poll_rebooting_instances(FLAGS.reboot_timeout)
@manager.periodic_task
def _poll_rescued_instances(self, context):
if FLAGS.rescue_timeout > 0:
self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
@manager.periodic_task
def _poll_unconfirmed_resizes(self, context):
if FLAGS.resize_confirm_window > 0:
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, FLAGS.resize_confirm_window, self.host)
migrations_info = dict(migration_count=len(migrations),
confirm_window=FLAGS.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration_id, reason, **kwargs):
msg = _("Setting migration %(migration_id)s to error: "
"%(reason)s") % locals()
LOG.warn(msg, **kwargs)
self.db.migration_update(context, migration_id,
{'status': 'error'})
for migration in migrations:
# NOTE(comstud): Yield to other greenthreads. Putting this
# at the top so we make sure to do it on each iteration.
greenthread.sleep(0)
migration_id = migration['id']
instance_uuid = migration['instance_uuid']
LOG.info(_("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
locals())
try:
instance = self.db.instance_get_by_uuid(context,
instance_uuid)
except exception.InstanceNotFound:
reason = _("Instance %(instance_uuid)s not found")
_set_migration_to_error(migration_id, reason % locals())
continue
if instance['vm_state'] == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration_id, reason % locals(),
instance=instance)
continue
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state != vm_states.RESIZED or task_state is not None:
reason = _("In states %(vm_state)s/%(task_state)s, not"
"RESIZED/None")
_set_migration_to_error(migration_id, reason % locals(),
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance)
except Exception, e:
msg = _("Error auto-confirming resize: %(e)s. "
"Will retry later.")
LOG.error(msg % locals(), instance=instance)
@manager.periodic_task
def _instance_usage_audit(self, context):
if FLAGS.instance_usage_audit:
if not compute_utils.has_audit_been_run(context, self.host):
begin, end = utils.last_completed_audit_period()
instances = self.db.instance_get_active_by_window_joined(
context,
begin,
end,
host=self.host)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances.") % dict(host=self.host,
begin_time=begin,
end_time=end,
number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
begin, end,
self.host, num_instances)
for instance in instances:
try:
compute_utils.notify_usage_exists(
context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_('Failed to generate usage '
'audit for instance '
'on host %s') % self.host,
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
begin, end,
self.host, errors,
"Instance usage audit ran "
"for host %s, %s instances "
"in %s seconds." % (
self.host,
num_instances,
time.time() - start_time))
@manager.periodic_task
def _poll_bandwidth_usage(self, context, start_time=None, stop_time=None):
if not start_time:
start_time = utils.last_completed_audit_period()[1]
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
FLAGS.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_("Updating bandwidth usage cache"))
instances = self.db.instance_get_all_by_host(context, self.host)
try:
bw_usage = self.driver.get_all_bw_usage(instances, start_time,
stop_time)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
return
refreshed = timeutils.utcnow()
for usage in bw_usage:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
self.db.bw_usage_update(context,
usage['uuid'],
usage['mac_address'],
start_time,
usage['bw_in'], usage['bw_out'],
last_refreshed=refreshed)
@manager.periodic_task
def _report_driver_status(self, context):
curr_time = time.time()
if curr_time - self._last_host_check > FLAGS.host_state_interval:
self._last_host_check = curr_time
LOG.info(_("Updating host status"))
# This will grab info about the host and queue it
# to be sent to the Schedulers.
capabilities = self.driver.get_host_stats(refresh=True)
capabilities['host_ip'] = FLAGS.my_ip
self.update_service_capabilities(capabilities)
@manager.periodic_task(ticks_between_runs=10)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database. We call eventlet.sleep(0) after
each loop to allow the periodic task eventlet to do other work.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
db_instances = self.db.instance_get_all_by_host(context, self.host)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warn(_("Found %(num_db_instances)s in the database and "
"%(num_vm_instances)s on the hypervisor.") % locals())
for db_instance in db_instances:
# Allow other periodic tasks to do some work...
greenthread.sleep(0)
db_power_state = db_instance['power_state']
if db_instance['task_state'] is not None:
LOG.info(_("During sync_power_state the instance has a "
"pending task. Skip."), instance=db_instance)
continue
# No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance['state']
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
u = self.db.instance_get_by_uuid(context,
db_instance['uuid'])
db_power_state = u["power_state"]
vm_state = u['vm_state']
if self.host != u['host']:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s") %
{'src': self.host,
'dst': u['host']},
instance=db_instance)
continue
elif u['task_state'] is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_("During sync_power_state the instance has a "
"pending task. Skip."), instance=db_instance)
continue
if vm_power_state != db_power_state:
# power_state is always updated from hypervisor to db
self._instance_update(context,
db_instance['uuid'],
power_state=vm_power_state)
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.PAUSED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warn(_("Instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warn(_("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warn(_("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condidtion.
LOG.warn(_("Instance is unexpectedly not found. Ignore."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warn(_("Instance is not stopped. Calling "
"the stop API."), instance=db_instance)
try:
# Note(maoy): this assumes that the stop API is
# idempotent.
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warn(_("Instance is not (soft-)deleted."),
instance=db_instance)
@manager.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = FLAGS.reclaim_instance_interval
if interval <= 0:
LOG.debug(_("FLAGS.reclaim_instance_interval <= 0, skipping..."))
return
instances = self.db.instance_get_all_by_host(context, self.host)
for instance in instances:
old_enough = (not instance.deleted_at or
timeutils.is_older_than(instance.deleted_at,
interval))
soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if soft_deleted and old_enough:
LOG.info(_('Reclaiming deleted instance'), instance=instance)
self._delete_instance(context, instance)
@manager.periodic_task
def update_available_resource(self, context):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
"""
self.resource_tracker.update_available_resource(context)
@manager.periodic_task(
ticks_between_runs=FLAGS.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = FLAGS.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
if action == "log":
name = instance['name']
LOG.warning(_("Detected instance with name label "
"'%(name)s' which is marked as "
"DELETED but still present on host."),
locals(), instance=instance)
elif action == 'reap':
name = instance['name']
LOG.info(_("Destroying instance with name label "
"'%(name)s' which is marked as "
"DELETED but still present on host."),
locals(), instance=instance)
self._shutdown_instance(context, instance)
self._cleanup_volumes(context, instance['uuid'])
else:
raise Exception(_("Unrecognized value '%(action)s'"
" for FLAGS.running_deleted_"
"instance_action"), locals(),
instance=instance)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running. This method
should be pushed down to the virt layer for efficiency.
"""
def deleted_instance(instance):
timeout = FLAGS.running_deleted_instance_timeout
present = instance.name in present_name_labels
erroneously_running = instance.deleted and present
old_enough = (not instance.deleted_at or
timeutils.is_older_than(instance.deleted_at,
timeout))
if erroneously_running and old_enough:
return True
return False
present_name_labels = set(self.driver.list_instances())
instances = self.db.instance_get_all_by_host(context, self.host)
return [i for i in instances if deleted_instance(i)]
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance_uuid,
reservations=None):
try:
yield
except Exception, error:
self._quota_rollback(context, reservations)
with excutils.save_and_reraise_exception():
msg = _('%s. Setting instance vm_state to ERROR')
LOG.error(msg % error, instance_uuid=instance_uuid)
self._set_instance_error_state(context, instance_uuid)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def add_aggregate_host(self, context, aggregate_id, host, slave_info=None):
"""Notify hypervisor of change (for hypervisor pools)."""
aggregate = self.db.aggregate_get(context, aggregate_id)
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(context,
self.db.aggregate_host_delete,
aggregate.id, host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_aggregate_host(self, context, aggregate_id,
host, slave_info=None):
"""Removes a host from a physical hypervisor pool."""
aggregate = self.db.aggregate_get(context, aggregate_id)
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context, self.db.aggregate_host_add,
aggregate.id, host,
isinstance(e, exception.AggregateError))
@manager.periodic_task(
ticks_between_runs=FLAGS.image_cache_manager_interval)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if FLAGS.image_cache_manager_interval == 0:
return
try:
self.driver.manage_image_cache(context)
except NotImplementedError:
pass
|
{
"content_hash": "a5b70ae8dbc2d3514108837485c04fd1",
"timestamp": "",
"source": "github",
"line_count": 2950,
"max_line_length": 79,
"avg_line_length": 46.784745762711864,
"alnum_prop": 0.5495706988370829,
"repo_name": "paulmathews/nova",
"id": "bbb71ddec74b6ddd27eb5b9d9ab36622ec839e8e",
"size": "138830",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/folsom",
"path": "nova/compute/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7293434"
},
{
"name": "Shell",
"bytes": "16910"
}
],
"symlink_target": ""
}
|
"""onnx shape inference. Shape inference is not guaranteed to be
complete.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import onnx
import onnx.onnx_cpp2py_export.shape_inference as C
from onnx import ModelProto
"""Apply shape inference to the provided ModelProto.
Inferred shapes are added to the value_info field of the graph.
If the inferred values conflict with values already provided in the
graph, that means that the provided values are invalid (or there is a
bug in shape inference), and the result is unspecified.
Arguments:
input (ModelProto): ModelProto
Return:
return (ModelProto) model with inferred shape information
"""
def infer_shapes(model): # type: (ModelProto) -> ModelProto
if not isinstance(model, ModelProto):
raise ValueError('Shape inference only accepts ModelProto, '
'incorrect type: {}'.format(type(model)))
model_str = model.SerializeToString()
inferred_model_str = C.infer_shapes(model_str)
return onnx.load_from_string(inferred_model_str)
|
{
"content_hash": "a85266a45821c33e1603469dabc230ff",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 30.972972972972972,
"alnum_prop": 0.7373472949389179,
"repo_name": "mlperf/training_results_v0.6",
"id": "83d2495e0b1fbc384e26047406fc43880d478daa",
"size": "1146",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/shape_inference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
}
|
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _redfishobject import RedfishObject
from redfish.rest.v1 import ServerDownOrUnreachableError
def ex28_set_ilo_timezone(redfishobj, olson_timezone):
sys.stdout.write("\nEXAMPLE 28: Set iLO's Timezone\n")
sys.stdout.write("\tNOTE: This only works if iLO is NOT configured to " \
"take time settings from DHCP v4 or v6\n")
if redfishobj.typepath.defs.isgen9:
hpilodatetimetype = "HpiLODateTime."
else:
hpilodatetimetype = "#HpeiLODateTime."
instances = redfishobj.search_for_type(hpilodatetimetype)
for instance in instances:
response = redfishobj.redfish_get(instance["@odata.id"])
for timezone in response.dict["TimeZoneList"]:
if timezone["Name"].startswith(olson_timezone):
body = {"TimeZone": {"Index": timezone["Index"]}}
response = redfishobj.redfish_patch(instance["@odata.id"], body)
redfishobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_https_url, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex28_set_ilo_timezone(REDFISH_OBJ, "America/Chicago")
|
{
"content_hash": "36d65410e9f9459f2549e2380c3cf405",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 41.78125,
"alnum_prop": 0.6443530291697831,
"repo_name": "HewlettPackard/python-proliant-sdk",
"id": "89282598f68a840f50fe6a99b5b85a756ac69696",
"size": "2674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/Redfish/ex28_set_ilo_timezone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import sqlite3
def create_table():
conn = sqlite3.connect('swipeat.db')
c = conn.cursor()
c.execute("CREATE TABLE swipeat_accounts (username text, password text)")
conn.commit()
# Check if account exists
def account_exists(username):
# Connection and cursor
conn = sqlite3.connect('swipeat.db')
c = conn.cursor()
try:
# Query
c.execute('''SELECT * FROM swipeat_accounts WHERE username like \'''' + username + "'")
all_rows = c.fetchall()
except:
# missing database?
create_table()
c.execute('''SELECT * FROM swipeat_accounts WHERE username like \'''' + username + "'")
all_rows = c.fetchall()
# Check if exists
if len(all_rows) == 0:
return False
else:
return True
# Create an account
def create_account(username, password):
# Connection and cursor
conn = sqlite3.connect('swipeat.db')
c = conn.cursor()
# If account exists => exit
if account_exists(username):
return False, "Account already exists"
else:
c.execute('''INSERT INTO swipeat_accounts(username, password) VALUES(:username,:password)''', {'username' : username, 'password' : password})
conn.commit()
return True, "Account created"
# Check login and password
def check_login(username, password):
# Connection and cursor
conn = sqlite3.connect('swipeat.db')
c = conn.cursor()
# No account?
if not account_exists(username):
return False, "Username does not exists"
else:
# Get info
c.execute('''SELECT * FROM swipeat_accounts WHERE username like \'''' + username + "'")
row = c.fetchone()
if row[1] == password:
return True, "Login succeeded"
else:
return False, "Wrong password"
|
{
"content_hash": "58524b6a0f7b9db89bba3d59a26a1674",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 149,
"avg_line_length": 28.453125,
"alnum_prop": 0.6106534870950028,
"repo_name": "swipeat/back",
"id": "9d6160995b5009b40f2d9358a43bed90cfbf111d",
"size": "1822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db/user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31506"
}
],
"symlink_target": ""
}
|
import re
from .base import BasePlugin
class JiwonPlugin(BasePlugin):
PATTERN = re.compile(r'jiwon\?\s*(.*)')
def process_message(self, data):
if ('text' in data) and ('subtype' not in data):
matched = re.match(self.PATTERN, data['text'])
if matched:
try:
message = ' '.join(map(str, multiplication(int(matched.group(1)))))
except ValueError:
message = '오류) 1 이상 19 이하의 숫자로 입력해주세요.'
self.outputs.append([data['channel'], message])
def multiplication(x):
if 1 <= x <= 19:
result = []
for n in range(1, 20):
result.append(x * n)
return result
else:
raise ValueError
|
{
"content_hash": "91bf36295daae82780ea663749641809",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 87,
"avg_line_length": 27.77777777777778,
"alnum_prop": 0.5266666666666666,
"repo_name": "askdjango/python-rtmbot",
"id": "cd54e45bcf290b6aaa73fa6cee88ae2a676f1063",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/jiwon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7030"
}
],
"symlink_target": ""
}
|
"""
BOSH OpenStack Ironic CPI
"""
# Python 2 and 3 compatibility
from __future__ import unicode_literals
import sys
from ironic_cpi.settings import CPISettings as CPISettings
def is_py3():
"""Check if data can be converted to boolean"""
return sys.version_info[0] == 3
def boolean(data):
"""Check if data can be converted to boolean"""
if isinstance(data, bool):
return data
elif ((is_py3() and isinstance(data, str)) or
((not is_py3()) and isinstance(data, basestring))):
return data.lower() in CPISettings._string_booleans_true
else:
return bool(data)
def is_macaddr(value):
"""Check if value is a MAC address"""
if CPISettings._re_macaddr.match(value) is None:
return False
else:
return True
def greater_or_equal(base, value):
"""Compare string or figures"""
if isinstance(base, bool):
return base == bool(value)
elif ((is_py3() and isinstance(base, str)) or
((not is_py3()) and isinstance(base, basestring))):
return base == value
else:
return float(base) >= float(value)
|
{
"content_hash": "712d49524643c20eec4f4a67da45430c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 64,
"avg_line_length": 23.851063829787233,
"alnum_prop": 0.6342551293487957,
"repo_name": "jriguera/bosh-ironic-cpi-release",
"id": "412becff0eb448c77c47ecffeccdd8f20e31f0da",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bosh_ironic_cpi/ironic_cpi/actions/utils/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13005"
},
{
"name": "Lua",
"bytes": "55086"
},
{
"name": "Python",
"bytes": "111012"
},
{
"name": "Shell",
"bytes": "22086"
}
],
"symlink_target": ""
}
|
"""
@copyright Copyright (c) 2013 Submit Consulting
@author Angel Sullon (@asullom)
@package utils
Descripcion: Clases para controlar la seguridad de la información en la nube
"""
#from apps.utils.messages import Message
from django.utils.translation import ugettext as _, ungettext
from django.contrib import messages
import datetime
import random
import hashlib
from array import *
from django.shortcuts import redirect
#import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
from django.contrib.auth.models import User, Group, Permission
from django.db.models import Q
from django.http import HttpResponse
from django.db import models
from django.utils.encoding import force_text
from django.contrib.admin.util import NestedObjects, get_deleted_objects
from django.db import transaction, DEFAULT_DB_ALIAS, router
from django.utils.text import capfirst, get_text_list
from django.core.exceptions import ValidationError
def get_dep_objects(instance, using=DEFAULT_DB_ALIAS):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect([instance])
def format_callback(obj):
no_edit_link = '%s: %s' % (capfirst(force_text(obj._meta.verbose_name)),
force_text(obj))
return no_edit_link
def format_callback2(obj):
no_edit_link = '%s' % (capfirst(force_text(obj._meta.verbose_name)))
return no_edit_link
#ver_objs = collector.nested(format_callback)
objects = collector.nested()
# print objects
deps = []
try:
for x in objects[1]:
if type(x) is not list:
deps.append(x)
except:
pass
# obteniendo mensaje para eliminar
msg_del = ''
if deps:
objs = []
for p in deps:
if not 'relationship' in force_text(p._meta.verbose_name):
objs.append(
_(u'<br>%(class_name)s: "%(instance)s"') % {
'class_name': capfirst(force_text(p._meta.verbose_name)),
'instance': force_text(p) + ' (' + force_text(p.pk) + ')'}
)
params = {
'class_name': capfirst(force_text(instance._meta.verbose_name)),
'instance': force_text(instance),
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require deleting the following "
"protected related objects: %(related_objects)s")
msgx = _("Deleting the %(object_name)s '%(escaped_object)s' would require deleting the "
"following protected related objects:")
#raise ValidationError(force_text(msg), code='deleting_protected', params=params)
#raise Exception(msg)
# messages.success(self.request, (', ').join(deps)# )
msg_del = force_text(msg % params)
msg_delx = msgx % {
'object_name': capfirst(force_text(instance._meta.verbose_name)),
'escaped_object': get_text_list(objs, _('and'))}
return deps, msg_del
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def log_params(request):
return {
'path': request.get_full_path(),
'ip': request.META['REMOTE_ADDR'],
'user': request.user
}
class SecurityKey:
"""
Clase que permite crear llave de seguridad en las url.
"""
TEXT_KEY = 'lyHyRajh987r.P~CFCcJ[AvFKdz|86'
# Método para generar las llaves de seguridad
@staticmethod
def get_key(id, action_name):
"""
Genera una llave de seguridad válida durante todo el día %Y-%m-%d
Entrada::
id=1
action_name="user_upd"
Salida::
1.dfad09debee34f8e85fccc5adaa2dadb
"""
key = "%s%s" % (
SecurityKey.TEXT_KEY, datetime.datetime.now().strftime('%Y-%m-%d'))
m = hashlib.md5("%s%s%s" % (id, key, action_name))
key = m.hexdigest()
return u"%s.%s" % (id, key)
# Método para verificar si la llave es válida
@staticmethod
def is_valid_key(request, key_value, action_name):
"""
Genera una llave de seguridad válida durante todo el día %Y-%m-%d
Entrada::
key_value=1.dfad09debee34f8e85fccc5adaa2dadb
action_name="user_upd"
Salida::
1
"""
key = key_value.split('.')
_id = key[0]
valid_key = SecurityKey.get_key(_id, action_name)
valid = (True if valid_key == key_value else False)
if not valid:
#raise Exception(("Acceso denegado. La llave de seguridad es incorrecta."))
messages.warning(
request, _('Access denied. The security key is incorrect.'))
# Message.error(
# request, ('Acceso denegado. La llave de seguridad es
# incorrecta.'))
return False
# print 'key_value(%s) = valid_key(%s)' % (key_value, valid_key)
# Message.info(request,('key_value(%s) = valid_key(%s)' % (key_value, valid_key)))
return _id
class UserToken:
"""
Clase que permite almacenar y recuperar los permisos a datos de las empresas solicitados por los usuarios.
"""
@staticmethod
def set_association_id(request, association_id):
request.session['association_id'] = association_id
@staticmethod
def get_association_id(session):
return session.get('association_id', False)
@staticmethod
def set_enterprise_id(request, enterprise_id):
request.session['enterprise_id'] = enterprise_id
@staticmethod
def get_enterprise_id(session):
return session.get('enterprise_id', False)
@staticmethod
def set_headquar_id(request, headquar_id):
request.session['headquar_id'] = headquar_id
@staticmethod
def get_headquar_id(session):
return session.get('headquar_id', False)
@staticmethod
def set_grupo_id_list(request , grupo_id_list):
request.session['grupo_id_list'] = grupo_id_list
@staticmethod
def get_grupo_id_list(session):
return session.get('grupo_id_list', False)
'''
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
'''
class xxxRedirect:
"""
Clase que permite re-dirigir a un controller, cuaya solicitud se haya realizado con ajax o no
Antes::
if request.is_ajax():
request.path="/params/locality/index/" #/app/controller_path/action/$params
return locality_index(request)
else:
return redirect("/params/locality/index/")
Ahora solo use (Example)::
return Redirect.to(request, "/sad/user/index/")
return Redirect.to_action(request, "index")
"""
@staticmethod
def to(request, route, params=None):
"""
route_list[0] = app
route_list[1] = controller
route_list[2] = action
"""
route = route.strip("/")
route_list = route.split("/")
app_name = route_list[0]
controller_name = ""
action_name = ""
if len(route_list) > 1:
controller_name = route_list[1]
else:
raise Exception(("Route no tiene controller"))
if len(route_list) > 2:
action_name = route_list[2]
app = ("apps.%s.views") % app_name
path = "/%s/%s/" % (app_name, controller_name)
func = "%s" % (controller_name)
if action_name:
path = "/%s/%s/%s/" % (app_name, controller_name, action_name)
func = "%s_%s" % (controller_name, action_name)
if request.is_ajax():
mod = __import__(app, fromlist=[func])
methodToCall = getattr(mod, func)
# Message.error(request, "ajax %s"%path)
request.path = path # /app/controller_path/action/$params
return methodToCall(request)
else:
# Message.error(request, "noajax %s"%path)
return redirect(path)
@staticmethod
def to_action(request, action_name, params=None):
"""
route_list[0] = app
route_list[1] = controller
route_list[2] = action
"""
route = request.path
route = route.strip("/")
route_list = route.split("/")
app_name = route_list[0]
controller_name = ""
# action_name=""
if len(route_list) > 1:
controller_name = route_list[1]
else:
raise Exception(("Route no tiene controller"))
# if len(route_list) > 2:
# action_name = route_list[2]
app = ("apps.%s.views") % app_name
path = "/%s/%s/" % (app_name, controller_name)
func = "%s" % (controller_name)
if action_name:
path = "/%s/%s/%s/" % (app_name, controller_name, action_name)
func = "%s_%s" % (controller_name, action_name)
# Message.error(request, "path= %s"%path)
# Message.error(request, "func= %s"%func)
if request.is_ajax():
mod = __import__(app, fromlist=[func])
methodToCall = getattr(mod, func)
# Message.error(request, "ajax %s"%path)
request.path = path # /app/controller_path/action/$params
return methodToCall(request)
else:
# Message.error(request, "noajax %s"%path)
return redirect(path)
|
{
"content_hash": "4e4ff899348dc91f3dc1d6f890514853",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 110,
"avg_line_length": 31.254641909814325,
"alnum_prop": 0.5833828396842909,
"repo_name": "tiposaurio/venton",
"id": "576376c66d842ae0ba32c1590f67f2a8675e7c2c",
"size": "11823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/utils/security.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "199258"
},
{
"name": "JavaScript",
"bytes": "549065"
},
{
"name": "Python",
"bytes": "368009"
}
],
"symlink_target": ""
}
|
import zmq
import sys
import argparse
from datetime import datetime
def get_arguments():
ap = argparse.ArgumentParser(description="ZMQ Proxy tool")
def common_arguments(ap):
ap.add_argument(
"--front-bind",
metavar="ADDR",
action="append",
help="Binds frontend socket to specified address",
)
ap.add_argument(
"--front-connect",
metavar="ADDR",
action="append",
help="Connects frontend socket to specified address",
)
ap.add_argument(
"--back-bind",
metavar="ADDR",
action="append",
help="Binds backend socket to specified address",
)
ap.add_argument(
"--back-connect",
metavar="ADDR",
action="append",
help="Connects backend socket to specified address",
)
ap.add_argument(
"--monitor-bind",
metavar="ADDR",
action="append",
help="Creates and binds monitor socket" " to specified address",
)
ap.add_argument(
"--monitor-connect",
metavar="ADDR",
action="append",
help="Creates and connects monitor socket" " to specified address",
)
parsers = ap.add_subparsers(title="Commands", help="ZMQ Proxy tool commands")
sub = parsers.add_parser(
"queue",
help="Creates Shared Queue proxy"
" (frontend/backend sockets are ZMQ_ROUTER/ZMQ_DEALER)",
)
sub.set_defaults(sock_types=(zmq.ROUTER, zmq.DEALER), action=serve_proxy)
common_arguments(sub)
sub = parsers.add_parser(
"forwarder",
help="Creates Forwarder proxy"
" (frontend/backend sockets are ZMQ_XSUB/ZMQ_XPUB)",
)
sub.set_defaults(sock_types=(zmq.XSUB, zmq.XPUB), action=serve_proxy)
common_arguments(sub)
sub = parsers.add_parser(
"streamer",
help="Creates Streamer proxy"
" (frontend/backend sockets are ZMQ_PULL/ZMQ_PUSH)",
)
sub.set_defaults(sock_types=(zmq.PULL, zmq.PUSH), action=serve_proxy)
common_arguments(sub)
sub = parsers.add_parser(
"monitor", help="Connects/binds to monitor socket and dumps all traffic"
)
sub.set_defaults(action=monitor)
sub.add_argument("--connect", metavar="ADDR", help="Connect to monitor socket")
sub.add_argument("--bind", metavar="ADDR", help="Bind monitor socket")
return ap
def main():
ap = get_arguments()
options = ap.parse_args()
options.action(options)
def serve_proxy(options):
if not (options.front_connect or options.front_bind):
print("No frontend socket address specified!", file=sys.stderr)
sys.exit(1)
if not (options.back_connect or options.back_bind):
print("No backend socket address specified!", file=sys.stderr)
sys.exit(1)
ctx = zmq.Context.instance()
front_type, back_type = options.sock_types
front = ctx.socket(front_type)
back = ctx.socket(back_type)
if options.monitor_bind or options.monitor_connect:
monitor = ctx.socket(zmq.PUB)
bind_connect(monitor, options.monitor_bind, options.monitor_connect)
else:
monitor = None
bind_connect(front, options.front_bind, options.front_connect)
bind_connect(back, options.back_bind, options.back_connect)
try:
if monitor:
zmq.proxy(front, back, monitor)
else:
zmq.proxy(front, back)
except Exception:
return
finally:
front.close()
back.close()
def bind_connect(sock, bind=None, connect=None):
if bind:
for address in bind:
sock.bind(address)
if connect:
for address in connect:
sock.connect(address)
def monitor(options):
ctx = zmq.Context.instance()
sock = ctx.socket(zmq.SUB)
bind = [options.bind] if options.bind else []
connect = [options.connect] if options.connect else []
bind_connect(sock, bind, connect)
sock.setsockopt(zmq.SUBSCRIBE, b"")
try:
while True:
try:
data = sock.recv()
except KeyboardInterrupt:
break
except Exception as err:
print("Error receiving message: {!r}".format(err))
else:
print(datetime.now().isoformat(), "Message received: {!r}".format(data))
finally:
sock.close()
ctx.term()
if __name__ == "__main__":
main()
|
{
"content_hash": "5afdaa928f61c0fe1ca6ec0f7f776497",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 88,
"avg_line_length": 28.936708860759495,
"alnum_prop": 0.586832895888014,
"repo_name": "aio-libs/aiozmq",
"id": "39db9b8bf3375bbdd63d9c141049137983af5c34",
"size": "4572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiozmq/cli/proxy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "889"
},
{
"name": "Python",
"bytes": "287673"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.