text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import unittest
from unittest.mock import ANY, MagicMock, Mock, PropertyMock, patch
import pytest
from google.api_core.gapic_v1.method import DEFAULT
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.operators.text_to_speech import CloudTextToSpeechSynthesizeOperator
PROJECT_ID = "project-id"
GCP_CONN_ID = "gcp-conn-id"
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
INPUT = {"text": "text"}
VOICE = {"language_code": "en-US"}
AUDIO_CONFIG = {"audio_encoding": "MP3"}
TARGET_BUCKET_NAME = "target_bucket_name"
TARGET_FILENAME = "target_filename"
class TestGcpTextToSpeech(unittest.TestCase):
@patch("airflow.providers.google.cloud.operators.text_to_speech.GCSHook")
@patch("airflow.providers.google.cloud.operators.text_to_speech.CloudTextToSpeechHook")
def test_synthesize_text_green_path(self, mock_text_to_speech_hook, mock_gcp_hook):
mocked_response = Mock()
type(mocked_response).audio_content = PropertyMock(return_value=b"audio")
mocked_context = MagicMock()
mock_text_to_speech_hook.return_value.synthesize_speech.return_value = mocked_response
mock_gcp_hook.return_value.upload.return_value = True
CloudTextToSpeechSynthesizeOperator(
project_id=PROJECT_ID,
gcp_conn_id=GCP_CONN_ID,
input_data=INPUT,
voice=VOICE,
audio_config=AUDIO_CONFIG,
target_bucket_name=TARGET_BUCKET_NAME,
target_filename=TARGET_FILENAME,
task_id="id",
impersonation_chain=IMPERSONATION_CHAIN,
).execute(context=mocked_context)
mock_text_to_speech_hook.assert_called_once_with(
gcp_conn_id="gcp-conn-id",
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_gcp_hook.assert_called_once_with(
gcp_conn_id="gcp-conn-id",
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_text_to_speech_hook.return_value.synthesize_speech.assert_called_once_with(
input_data=INPUT, voice=VOICE, audio_config=AUDIO_CONFIG, retry=DEFAULT, timeout=None
)
mock_gcp_hook.return_value.upload.assert_called_once_with(
bucket_name=TARGET_BUCKET_NAME, object_name=TARGET_FILENAME, filename=ANY
)
@parameterized.expand(
[
("input_data", "", VOICE, AUDIO_CONFIG, TARGET_BUCKET_NAME, TARGET_FILENAME),
("voice", INPUT, "", AUDIO_CONFIG, TARGET_BUCKET_NAME, TARGET_FILENAME),
("audio_config", INPUT, VOICE, "", TARGET_BUCKET_NAME, TARGET_FILENAME),
("target_bucket_name", INPUT, VOICE, AUDIO_CONFIG, "", TARGET_FILENAME),
("target_filename", INPUT, VOICE, AUDIO_CONFIG, TARGET_BUCKET_NAME, ""),
]
)
@patch("airflow.providers.google.cloud.operators.text_to_speech.GCSHook")
@patch("airflow.providers.google.cloud.operators.text_to_speech.CloudTextToSpeechHook")
def test_missing_arguments(
self,
missing_arg,
input_data,
voice,
audio_config,
target_bucket_name,
target_filename,
mock_text_to_speech_hook,
mock_gcp_hook,
):
mocked_context = Mock()
with pytest.raises(AirflowException) as ctx:
CloudTextToSpeechSynthesizeOperator(
project_id="project-id",
input_data=input_data,
voice=voice,
audio_config=audio_config,
target_bucket_name=target_bucket_name,
target_filename=target_filename,
task_id="id",
).execute(context=mocked_context)
err = ctx.value
assert missing_arg in str(err)
mock_text_to_speech_hook.assert_not_called()
mock_gcp_hook.assert_not_called()
|
{
"content_hash": "e3e5dddfe9140bd8b6069f76029a8609",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 103,
"avg_line_length": 39.95876288659794,
"alnum_prop": 0.6457688338493293,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "6acbd60d250141b3b48b1619565596d900faa5c5",
"size": "4664",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/operators/test_text_to_speech.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "instapix.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
{
"content_hash": "190a7e3182de3adf73a7418c24e004db",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.6211734693877551,
"repo_name": "peromo93/instapix",
"id": "26516d08c8c232d585fbb3287d389e2054a4a77f",
"size": "806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "18019"
},
{
"name": "JavaScript",
"bytes": "14643"
},
{
"name": "Python",
"bytes": "28631"
}
],
"symlink_target": ""
}
|
import pytest
from plenum.test.delayers import ppDelay, icDelay
from plenum.test.freshness.helper import has_freshness_instance_change
from plenum.test.helper import freshness
from plenum.test.stasher import delay_rules
from stp_core.loop.eventually import eventually
FRESHNESS_TIMEOUT = 5
@pytest.fixture(scope="module")
def tconf(tconf):
with freshness(tconf, enabled=True, timeout=FRESHNESS_TIMEOUT):
yield tconf
def test_freshness_instance_changes_are_sent_continuosly(looper, tconf, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle):
current_view_no = txnPoolNodeSet[0].viewNo
for node in txnPoolNodeSet:
assert node.viewNo == current_view_no
def check_instance_change_messages(count=1):
for node in txnPoolNodeSet:
assert has_freshness_instance_change(node, count)
stashers = [n.nodeIbStasher for n in txnPoolNodeSet]
with delay_rules(stashers, ppDelay(), icDelay()):
looper.run(eventually(check_instance_change_messages, 3, timeout=FRESHNESS_TIMEOUT * 5))
for node in txnPoolNodeSet:
all_instance_changes = node.master_replica.\
_view_change_trigger_service.spylog.getAll('_send_instance_change')
freshness_instance_changes = [ic for ic in all_instance_changes
if ic.params['suspicion'].code == 43]
# Ensure that all instance change messages were due to freshness
assert len(all_instance_changes) == len(freshness_instance_changes)
# Ensure that all instance change messages are for same view
for ic in freshness_instance_changes:
assert ic.params['view_no'] == current_view_no + 1
# Ensure instance change messages had sensible interval
for ic1, ic2 in zip(freshness_instance_changes[1:], freshness_instance_changes):
interval = ic2.starttime - ic1.starttime
assert 0.9 * FRESHNESS_TIMEOUT < interval < 2.1 * FRESHNESS_TIMEOUT
|
{
"content_hash": "12746709c4ba7204a3b62c1bbffc0a1a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 96,
"avg_line_length": 43.416666666666664,
"alnum_prop": 0.663147792706334,
"repo_name": "evernym/zeno",
"id": "eb1b57f304c5e31048dadf29317df3cd428f5ad5",
"size": "2084",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/freshness/test_freshness_instance_changes_are_sent_continuosly.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "531061"
}
],
"symlink_target": ""
}
|
'''
Basic processing procedures for analog signals (e.g., performing a z-score of a signal, or filtering a signal).
:copyright: Copyright 2014-2015 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
'''
from __future__ import division, print_function
import numpy as np
import scipy.signal
import quantities as pq
import neo
def zscore(signal, inplace=True):
'''
Apply a z-score operation to one or several AnalogSignalArray objects.
The z-score operation subtracts the mean :math:`\\mu` of the signal, and
divides by its standard deviation :math:`\\sigma`:
.. math::
Z(x(t))= \\frac{x(t)-\\mu}{\\sigma}
If an AnalogSignalArray containing multiple signals is provided, the
z-transform is always calculated for each signal individually.
If a list of AnalogSignalArray objects is supplied, the mean and standard
deviation are calculated across all objects of the list. Thus, all list
elements are z-transformed by the same values of :math:`\\mu` and
:math:`\\sigma`. For AnalogSignalArrays, each signal of the array is
treated separately across list elements. Therefore, the number of signals
must be identical for each AnalogSignalArray of the list.
Parameters
----------
signal : neo.AnalogSignalArray or list of neo.AnalogSignalArray
Signals for which to calculate the z-score.
inplace : bool
If True, the contents of the input signal(s) is replaced by the
z-transformed signal. Otherwise, a copy of the original
AnalogSignalArray(s) is returned. Default: True
Returns
-------
neo.AnalogSignalArray or list of neo.AnalogSignalArray
The output format matches the input format: for each supplied
AnalogSignalArray object a corresponding object is returned containing
the z-transformed signal with the unit dimensionless.
Use Case
--------
You may supply a list of AnalogSignalArray objects, where each object in
the list contains the data of one trial of the experiment, and each signal
of the AnalogSignalArray corresponds to the recordings from one specific
electrode in a particular trial. In this scenario, you will z-transform the
signal of each electrode separately, but transform all trials of a given
electrode in the same way.
Examples
--------
>>> a = neo.AnalogSignalArray(
... np.array([1, 2, 3, 4, 5, 6]).reshape(-1,1)*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> b = neo.AnalogSignalArray(
... np.transpose([[1, 2, 3, 4, 5, 6], [11, 12, 13, 14, 15, 16]])*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> c = neo.AnalogSignalArray(
... np.transpose([[21, 22, 23, 24, 25, 26], [31, 32, 33, 34, 35, 36]])*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> print zscore(a)
[[-1.46385011]
[-0.87831007]
[-0.29277002]
[ 0.29277002]
[ 0.87831007]
[ 1.46385011]] dimensionless
>>> print zscore(b)
[[-1.46385011 -1.46385011]
[-0.87831007 -0.87831007]
[-0.29277002 -0.29277002]
[ 0.29277002 0.29277002]
[ 0.87831007 0.87831007]
[ 1.46385011 1.46385011]] dimensionless
>>> print zscore([b,c])
[<AnalogSignalArray(array([[-1.11669108, -1.08361877],
[-1.0672076 , -1.04878252],
[-1.01772411, -1.01394628],
[-0.96824063, -0.97911003],
[-0.91875714, -0.94427378],
[-0.86927366, -0.90943753]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>,
<AnalogSignalArray(array([[ 0.78170952, 0.84779261],
[ 0.86621866, 0.90728682],
[ 0.9507278 , 0.96678104],
[ 1.03523694, 1.02627526],
[ 1.11974608, 1.08576948],
[ 1.20425521, 1.1452637 ]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>]
'''
# Transform input to a list
if type(signal) is not list:
signal = [signal]
# Calculate mean and standard deviation
m = np.mean(np.concatenate(signal), axis=0, keepdims=True)
s = np.std(np.concatenate(signal), axis=0, keepdims=True)
if not inplace:
# Create new signal instance
result = []
for sig in signal:
sig_dimless = sig.duplicate_with_new_array(
(sig.magnitude - m.magnitude) / s.magnitude) / sig.units
result.append(sig_dimless)
else:
result = []
# Overwrite signal
for sig in signal:
sig[:] = pq.Quantity(
(sig.magnitude - m.magnitude) / s.magnitude,
units=sig.units)
sig_dimless = sig / sig.units
result.append(sig_dimless)
# Return single object, or list of objects
if len(result) == 1:
return result[0]
else:
return result
def butter(signal, highpass_freq=None, lowpass_freq=None, order=4,
filter_function='filtfilt', fs=1.0, axis=-1):
"""
Butterworth filtering function for neo.AnalogSignalArray. Filter type is
determined according to how values of `highpass_freq` and `lowpass_freq`
are given (see Parameters section for details).
Parameters
----------
signal : AnalogSignalArray or Quantity array or NumPy ndarray
Time series data to be filtered. When given as Quantity array or NumPy
ndarray, the sampling frequency should be given through the keyword
argument `fs`.
highpass_freq, lowpass_freq : Quantity or float
High-pass and low-pass cut-off frequencies, respectively. When given as
float, the given value is taken as frequency in Hz.
Filter type is determined depending on values of these arguments:
* highpass_freq only (lowpass_freq = None): highpass filter
* lowpass_freq only (highpass_freq = None): lowpass filter
* highpass_freq < lowpass_freq: bandpass filter
* highpass_freq > lowpass_freq: bandstop filter
order : int
Order of Butterworth filter. Default is 4.
filter_function : string
Filtering function to be used. Either 'filtfilt'
(`scipy.signal.filtfilt()`) or 'lfilter' (`scipy.signal.lfilter()`). In
most applications 'filtfilt' should be used, because it doesn't bring
about phase shift due to filtering. Default is 'filtfilt'.
fs : Quantity or float
The sampling frequency of the input time series. When given as float,
its value is taken as frequency in Hz. When the input is given as neo
AnalogSignalArray, its attribute is used to specify the sampling
frequency and this parameter is ignored. Default is 1.0.
axis : int
Axis along which filter is applied. Default is -1.
Returns
-------
filtered_signal : AnalogSignalArray or Quantity array or NumPy ndarray
Filtered input data. The shape and type is identical to those of the
input.
"""
def _design_butterworth_filter(Fs, hpfreq=None, lpfreq=None, order=4):
# set parameters for filter design
Fn = Fs / 2.
# - filter type is determined according to the values of cut-off
# frequencies
if lpfreq and hpfreq:
if hpfreq < lpfreq:
Wn = (hpfreq / Fn, lpfreq / Fn)
btype = 'bandpass'
else:
Wn = (lpfreq / Fn, hpfreq / Fn)
btype = 'bandstop'
elif lpfreq:
Wn = lpfreq / Fn
btype = 'lowpass'
elif hpfreq:
Wn = hpfreq / Fn
btype = 'highpass'
else:
raise ValueError(
"Either highpass_freq or lowpass_freq must be given"
)
# return filter coefficients
return scipy.signal.butter(order, Wn, btype=btype)
# design filter
Fs = signal.sampling_rate.rescale(pq.Hz).magnitude \
if hasattr(signal, 'sampling_rate') else fs
Fh = highpass_freq.rescale(pq.Hz).magnitude \
if isinstance(highpass_freq, pq.quantity.Quantity) else highpass_freq
Fl = lowpass_freq.rescale(pq.Hz).magnitude \
if isinstance(lowpass_freq, pq.quantity.Quantity) else lowpass_freq
b, a = _design_butterworth_filter(Fs, Fh, Fl, order)
# When the input is AnalogSignalArray, the axis for time index (i.e. the
# first axis) needs to be rolled to the last
data = np.asarray(signal)
if isinstance(signal, neo.AnalogSignalArray):
data = np.rollaxis(data, 0, len(data.shape))
# apply filter
if filter_function is 'lfilter':
filtered_data = scipy.signal.lfilter(b, a, data, axis=axis)
elif filter_function is 'filtfilt':
filtered_data = scipy.signal.filtfilt(b, a, data, axis=axis)
else:
raise ValueError(
"filter_func must to be either 'filtfilt' or 'lfilter'"
)
if isinstance(signal, neo.AnalogSignalArray):
return signal.duplicate_with_new_array(filtered_data.T)
elif isinstance(signal, pq.quantity.Quantity):
return filtered_data * signal.units
else:
return filtered_data
|
{
"content_hash": "5e5fa9921d185c894f2ff360d68f40d7",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 111,
"avg_line_length": 38.78389830508475,
"alnum_prop": 0.6317054517644488,
"repo_name": "btel/elephant",
"id": "f138cf5721ba8388cf6e918ba793b0c1f4cd725a",
"size": "9153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elephant/signal_processing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "452164"
},
{
"name": "Shell",
"bytes": "3891"
}
],
"symlink_target": ""
}
|
import argparse
import os
from stawebg.data import Project, version
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="static website generator")
parser.add_argument("directory", nargs="?", default=os.getcwd(),
help='the web project root directory')
parser.add_argument("-t", "--test", action='store_true',
help='write output to test directory')
parser.add_argument("-o", "--output", metavar="output",
type=str, default=None,
help='write output to this directory')
parser.add_argument("-v", "--version", action="version",
version="%(prog)s " + version)
args = parser.parse_args()
project = Project(args.directory, args.test, args.output)
|
{
"content_hash": "5e8a398ab32a1ab137414b9ec0aa91ca",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 42.68421052631579,
"alnum_prop": 0.5881627620221949,
"repo_name": "svenhertle/stawebg",
"id": "9ce3e4d6c50fca87a5d883581460d0710a9e2c04",
"size": "831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stawebg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4174"
},
{
"name": "Python",
"bytes": "44755"
},
{
"name": "Shell",
"bytes": "19"
}
],
"symlink_target": ""
}
|
""" This module provides a simple task monitoring capability to track usage
statistics for tasks.
"""
import re
import os
import datetime
try:
import simplejson as json
except ImportError:
import json
from focus.plugin import base
from focus import common, errors
MINS_IN_HOUR = 60
MINS_IN_DAY = MINS_IN_HOUR * 24
class Stats(base.Plugin):
""" Prints usage statistics about tasks.
"""
name = 'Stats'
version = '0.1'
target_version = '>=0.1'
events = ['task_end']
command = 'stat'
def _sdir(self, base_dir):
""" Return path to stats directory.
`base_dir`
Base directory.
Returns string.
"""
return os.path.join(base_dir, '.stats')
def _setup_dir(self, base_dir):
""" Creates stats directory for storing stat files.
`base_dir`
Base directory.
"""
stats_dir = self._sdir(base_dir)
if not os.path.isdir(stats_dir):
try:
os.mkdir(stats_dir)
except OSError:
raise errors.DirectorySetupFail()
def _log_task(self, task):
""" Logs task record to file.
`task`
``Task`` instance.
"""
if not task.duration:
return
self._setup_dir(task.base_dir)
stats_dir = self._sdir(task.base_dir)
duration = task.duration
while duration > 0:
# build filename
date = (datetime.datetime.now() -
datetime.timedelta(minutes=duration))
date_str = date.strftime('%Y%m%d')
filename = os.path.join(stats_dir, '{0}.json'.format(date_str))
with open(filename, 'a+') as file_:
# fetch any existing data
try:
file_.seek(0)
data = json.loads(file_.read())
except (ValueError, OSError):
data = {}
if not task.name in data:
data[task.name] = 0
# how much total time for day
try:
total_time = sum(int(x) for x in data.values())
if total_time > MINS_IN_DAY:
total_time = MINS_IN_DAY
except ValueError:
total_time = 0
# constrain to single day
amount = duration
if amount + total_time > MINS_IN_DAY:
amount = MINS_IN_DAY - total_time
# invalid or broken state, bail
if amount <= 0:
break
data[task.name] += amount
duration -= amount
# write file
try:
file_.seek(0)
file_.truncate(0)
file_.write(json.dumps(data))
except (ValueError, OSError):
pass
def _fuzzy_time_parse(self, value):
""" Parses a fuzzy time value into a meaningful interpretation.
`value`
String value to parse.
"""
value = value.lower().strip()
today = datetime.date.today()
if value in ('today', 't'):
return today
else:
kwargs = {}
if value in ('y', 'yesterday'):
kwargs['days'] = -1
elif value in ('w', 'wk', 'week', 'last week'):
kwargs['days'] = -7
else:
# match days
match = re.match(r'(\d+)\s*(d|day|days)\s*(ago)?$', value)
if match:
kwargs['days'] = -int(match.groups(1)[0])
else:
# match weeks
match = re.match(r'(\d+)\s*(w|wk|week|weeks)\s*(ago)?$',
value)
if match:
kwargs['weeks'] = -int(match.groups(1)[0])
if kwargs:
return today + datetime.timedelta(**kwargs)
return None
def _get_stats(self, task, start_date):
""" Fetches statistic information for given task and start range.
"""
stats = []
stats_dir = self._sdir(task.base_dir)
date = start_date
end_date = datetime.date.today()
delta = datetime.timedelta(days=1)
while date <= end_date:
date_str = date.strftime('%Y%m%d')
filename = os.path.join(stats_dir, '{0}.json'.format(date_str))
if os.path.exists(filename):
try:
# fetch stats content
with open(filename, 'r') as file_:
data = json.loads(file_.read())
# sort descending by time
stats.append((date, sorted(data.iteritems(),
key=lambda x: x[1],
reverse=True)))
except (json.JSONDecodeError, OSError):
pass
date += delta # next day
return stats
def _print_stats(self, env, stats):
""" Prints statistic information using io stream.
`env`
``Environment`` object.
`stats`
Tuple of task stats for each date.
"""
def _format_time(mins):
""" Generates formatted time string.
"""
mins = int(mins)
if mins < MINS_IN_HOUR:
time_str = '0:{0:02}'.format(mins)
else:
hours = mins // MINS_IN_HOUR
mins %= MINS_IN_HOUR
if mins > 0:
time_str = '{0}:{1:02}'.format(hours, mins)
else:
time_str = '{0}'.format(hours)
return time_str
if not stats:
env.io.write('No stats found.')
return
for date, tasks in stats:
env.io.write('')
total_mins = float(sum(v[1] for v in tasks))
env.io.write('[ {0} ]'.format(date.strftime('%Y-%m-%d')))
env.io.write('')
for name, mins in tasks:
# format time
time_str = _format_time(mins)
# generate stat line
line = ' {0:>5}'.format(time_str)
line += ' ({0:2.0f}%) - '.format(mins * 100.0 / total_mins)
if len(name) > 55:
name = name[:55] + '...'
line += name
env.io.write(line)
# generate total line
env.io.write('_' * len(line))
time_str = _format_time(total_mins)
env.io.write(' {0:>5} (total)'.format(time_str))
env.io.write('')
def setup_parser(self, parser):
""" Setup the argument parser.
`parser`
``FocusArgParser`` object.
"""
parser.add_argument('start', nargs='?',
help='starting period. defaults to today',
default='today')
def execute(self, env, args):
""" Prints task information.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser.
"""
start = self._fuzzy_time_parse(args.start)
if not start:
raise errors.FocusError(u'Invalid start period provided')
stats = self._get_stats(env.task, start)
self._print_stats(env, stats)
def on_taskend(self, task):
""" Logs task usage stats when task ends.
"""
self._log_task(task)
|
{
"content_hash": "b63de7a630be02abf375c5d21bc45c62",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 76,
"avg_line_length": 28.95571955719557,
"alnum_prop": 0.4515101312603543,
"repo_name": "xtrementl/focus",
"id": "254fd560cabcd35086e3ee9a0292977b3ad534cf",
"size": "7847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "focus/plugin/modules/stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "341755"
}
],
"symlink_target": ""
}
|
import clr
import rpw
from rpw.ui.forms import Button, ComboBox, FlexForm, CheckBox, Label, TextBox, Separator
from rpw import doc
import os
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import SaveAsOptions, Transaction, IFamilyLoadOptions
def get_family_parameter_value(familydoc, name):
famman = familydoc.FamilyManager
for p in famman.Parameters:
if p.Definition.Name == name:
return famman.CurrentType.AsValueString(p)
def set_family_parameter_value(familydoc, param, value):
famman = familydoc.FamilyManager
with rpw.db.Transaction('Set parameter', doc=familydoc):
famman.Set(param, value)
def get_family_parameter(familydoc, name):
famman = familydoc.FamilyManager
for p in famman.Parameters:
if p.Definition.Name == name:
return p
def associate_family_parameter(doc, nested_instance, param_name):
nested_param = nested_instance.LookupParameter(param_name)
for p in doc.FamilyManager.GetParameters():
if p.Definition.Name == param_name:
host_param = p
with rpw.db.Transaction('Associate parameter {}'.format(param_name), doc=doc):
doc.FamilyManager.AssociateElementParameterToFamilyParameter(nested_param, host_param)
def main():
pile_width = int(ff.values['pile_width'])
piles_along_length = int(ff.values['piles_along_length'])
piles_along_width = int(ff.values['piles_along_width'])
pile_spacing_along_length = float(ff.values['pile_spacing_along_length'])
pile_spacing_along_width = float(ff.values['pile_spacing_along_width'])
length_cover = float(ff.values['length_cover'])
width_cover = float(ff.values['width_cover'])
thickness = float(ff.values['thickness'])
pile_cutoff = float(ff.values['pile_cutoff'])
foundation = rpw.db.Element.from_id(ff.values['foundation_id'])
pile = rpw.db.Element.from_id(ff.values['pile_id'])
foundation_famdoc = doc.EditFamily(foundation.unwrap())
pile_famdoc = doc.EditFamily(pile.unwrap())
saveas_path = os.path.expanduser('~')
save_options = SaveAsOptions()
save_options.OverwriteExistingFile = True
pile_famdoc.SaveAs(os.path.join(saveas_path, '{}'.format(pile_famdoc.Title)), save_options)
with rpw.db.Transaction('Load pile into foundation', doc=foundation_famdoc):
foundation_famdoc.LoadFamily(pile_famdoc.PathName)
pile_famdoc.Close(False)
with rpw.db.Transaction('Activate pile family symbol', doc=foundation_famdoc):
pile_family_symbol = rpw.db.Collector(of_class='FamilySymbol',
doc=foundation_famdoc,
where=lambda x: x.FamilyName==pile.Name)[0]
# http://thebuildingcoder.typepad.com/blog/2014/08/activate-your-family-symbol-before-using-it.html
if not pile_family_symbol.IsActive:
pile_family_symbol.Activate()
### Place piles
first_x = -pile_spacing_along_length*(piles_along_length-1)/2
first_y = -pile_spacing_along_width*(piles_along_width-1)/2
z = pile_cutoff - thickness
pile_locations = []
for i in range(piles_along_length):
for j in range(piles_along_width):
x = first_x + i * pile_spacing_along_length
y = first_y + j * pile_spacing_along_width
pile_locations.append((x, y, z))
with rpw.db.Transaction('Insert pile instances', doc=foundation_famdoc):
for x, y, z in pile_locations:
x = x / 304.8
y = y / 304.8
z = z / 304.8
foundation_famdoc.FamilyCreate.NewFamilyInstance(rpw.DB.XYZ(x,y,z),
pile_family_symbol,
rpw.DB.Structure.StructuralType.NonStructural)
created_piles = list(rpw.db.Collector(of_category='OST_StructuralFoundation',
of_class='FamilyInstance',
doc=foundation_famdoc))
parameters = ['Number of pile segments', 'D', 'L1', 'L2', 'L3', 'L4', 'L5', 'L6', 'L7']
for pile in created_piles:
for param_name in parameters:
associate_family_parameter(foundation_famdoc, pile, param_name)
length_param = get_family_parameter(foundation_famdoc, 'Length')
width_param = get_family_parameter(foundation_famdoc, 'Width')
thickness_param = get_family_parameter(foundation_famdoc, 'Foundation Thickness')
length = pile_spacing_along_length*(piles_along_length-1) + 2 * length_cover
width = pile_spacing_along_length*(piles_along_width-1) + 2 * width_cover
set_family_parameter_value(foundation_famdoc, length_param, length / 304.8)
set_family_parameter_value(foundation_famdoc, width_param, width / 304.8)
set_family_parameter_value(foundation_famdoc, thickness_param, thickness / 304.8)
set_family_parameter_value(foundation_famdoc, get_family_parameter(foundation_famdoc, 'D'), pile_width/304.8)
new_filename = ff.values['new_foundation_family_name'] or 'new_{}'.format(foundation_famdoc.Title)
foundation_famdoc.SaveAs(os.path.join(saveas_path, '{}.rfa'.format(new_filename)),
save_options)
foundation_rfa_path = foundation_famdoc.PathName
foundation_famdoc.Close(False)
rpw.ui.forms.Alert('Finished creating foundation, saved at {}'.format(foundation_rfa_path))
os.startfile(foundation_rfa_path)
foundations = rpw.db.Collector(of_class='Family',
where=lambda x: x.FamilyCategory.Name=='Structural Foundations')
components = [Label('Select Pile Cap:'),
ComboBox('foundation_id', {f.Name: f.Id for f in foundations}),
Label('Select Rectangular Pile:'),
ComboBox('pile_id', {f.Name: f.Id for f in foundations}),
Label('Pile width (D):'),
TextBox('pile_width'),
Label('Number of piles along Length:'),
TextBox('piles_along_length'),
Label('Number of piles along Width:'),
TextBox('piles_along_width'),
Label('Pile spacing along Length:'),
TextBox('pile_spacing_along_length'),
Label('Pile spacing along Width:'),
TextBox('pile_spacing_along_width'),
Label('Length cover:'),
TextBox('length_cover'),
Label('Width cover:'),
TextBox('width_cover'),
Label('Foundation Thickness:'),
TextBox('thickness'),
Label('Pile Cut-off:'),
TextBox('pile_cutoff'),
Label('New Foundation Family Name:'),
TextBox('new_foundation_family_name'),
Separator(),
Button('Create foundation')]
ff = FlexForm("Modify Structural Foundation Family", components)
ff.show()
if ff.values:
main()
|
{
"content_hash": "176000cf4bf1a753e9dc8af86fd18e10",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 113,
"avg_line_length": 43.89808917197452,
"alnum_prop": 0.6353743470690656,
"repo_name": "htlcnn/pyrevitscripts",
"id": "190d4b8cf9bd45698a4b22595df19f73043de8a7",
"size": "6916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HTL.tab/Foundation.panel/Auto Foundation.pushbutton/script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70018"
}
],
"symlink_target": ""
}
|
from scipy.special import binom
print(int(binom(20 + 20, 20)))
|
{
"content_hash": "8634b5faf9db1c11071240de53405911",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 31,
"avg_line_length": 31.5,
"alnum_prop": 0.7301587301587301,
"repo_name": "EndlessDex/euler",
"id": "c5d492c071aa910f88d22c32a9f572b66a059a2a",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "15-latticePaths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19341"
}
],
"symlink_target": ""
}
|
from zoo.tfpark.tfnet import TFNet
from zoo.tfpark.tf_optimizer import BigDLMetric, TFModel
from zoo.pipeline.api.keras import metrics as zmetrics
def to_bigdl_metric(metric):
metric = metric.lower()
if metric == "accuracy" or metric == "acc":
return zmetrics.Accuracy()
elif metric == "top5accuracy" or metric == "top5acc":
return zmetrics.Top5Accuracy()
elif metric == "mae":
from bigdl.optim.optimizer import MAE
return MAE()
elif metric == "auc":
return zmetrics.AUC()
elif metric == "treennaccuracy":
from bigdl.optim.optimizer import TreeNNAccuracy
return TreeNNAccuracy()
else:
raise TypeError("Unsupported metric: %s" % metric)
def evaluate_string_metrics(*,
sess,
string_metrics,
dataset,
inputs,
targets=None,
outputs=None,
loss=None,
):
metrics = {}
for i, metric in enumerate(string_metrics):
if metric == "loss":
assert loss is not None, "loss tensor should not be None if one of the metrics is loss"
metrics["loss"] = loss
else:
assert outputs is not None, "outputs should not be None if non loss metrics exists"
assert targets is not None, "targets should not be None if non loss metrics exists"
method = to_bigdl_metric(metric)
metrics[metric] = BigDLMetric(method,
outputs,
targets)
result = evaluate_metrics(inputs, sess, dataset, metrics)
return result
def evaluate_metrics(inputs, sess, dataset, metrics):
import tensorflow as tf
if dataset.batch_per_thread > 0:
batch_size = dataset.batch_per_thread * dataset.get_num_partitions()
else:
batch_size = dataset.batch_size
real_batch_size = tf.shape(inputs[0])[0]
outputs, eval_methods = TFModel._process_metrics(inputs[0].graph,
metrics=metrics,
real_batch_size=real_batch_size)
tfnet = TFNet.from_session(sess, inputs=inputs, outputs=outputs)
results = tfnet.evaluate(dataset, batch_size, eval_methods)
final_result = dict([(r.method, r.result) for r in results])
return final_result
|
{
"content_hash": "0eb8115fa9c2339ef7b2d594f98c4c8b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 99,
"avg_line_length": 37.161764705882355,
"alnum_prop": 0.5587653343886031,
"repo_name": "intel-analytics/analytics-zoo",
"id": "605338f2b0638df442a9f17dcb4a5337898fc222",
"size": "3117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/zoo/tfpark/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
}
|
class foo():
def __str__(self):
return "hello"
if str(foo()) != "hello":
print "str(foo()) should have been 'hello', not", str(foo())
|
{
"content_hash": "24629ca301674277027bf65b94b291e2",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 62,
"avg_line_length": 23.833333333333332,
"alnum_prop": 0.5664335664335665,
"repo_name": "sburnett/seattle",
"id": "76a134e21d9c62ab8966108ebd7bd754bd32e18e",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "repy/tests/ut_repytests_test--str--.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "85039"
},
{
"name": "CSS",
"bytes": "44140"
},
{
"name": "Java",
"bytes": "178864"
},
{
"name": "JavaScript",
"bytes": "791008"
},
{
"name": "Perl",
"bytes": "36791"
},
{
"name": "Python",
"bytes": "4683648"
},
{
"name": "Scala",
"bytes": "2587"
},
{
"name": "Shell",
"bytes": "87609"
}
],
"symlink_target": ""
}
|
"""
Date: 29 Feburary 2016
Author: James Boocock
Description: Main NGaDNAP script.
"""
import argparse
try:
import ConfigParser
except:
import configparser as ConfigParser
import logging
import os
import sys
from ngadnap.create_ngadnap_graph import CreateNGaDNAPGraph
from ngadnap.dependency_graph.job_queue import JobQueue
from ngadnap.__version__ import __VERSION__
from ngadnap.check_paths import check_paths
from ngadnap.utils.environment import set_environment
def parse_config(args):
""" Parse config file
Reads a config and parses the
arguments into a dictionary.
"""
config = ConfigParser.ConfigParser()
config.read(args.config_file)
config_parsed = {}
for section in config.sections():
opts = config.options(section)
config_parsed[section] = {}
for op in opts:
try:
config_parsed[section][op] = config.get(section, op)
except:
logging.info("exception on {0}".format(op))
config_parsed[section][op] = None
return config_parsed
def parse_arguments():
"""
Parse all the comandline arguments for the ancient DNA pipeline.
"""
parser = argparse.ArgumentParser(description="Options for the ancient DNA pipeline")
adapt_remove = parser.add_argument_group('AdapterRemoval')
alignment = parser.add_argument_group('Alignment')
sample_qc = parser.add_argument_group("Sample QC")
ancient_filter = parser.add_argument_group('Ancient Filtering')
variant_calling = parser.add_argument_group('Variant Calling')
vcf_qc = parser.add_argument_group("VCF QC")
parser.add_argument('-l','--log-file', dest="log_file", default="pipeline_run.log",
help="Log file for the ancient DNA pipeline")
parser.add_argument("-a","--ancient-dna", dest="ancient_dna",
help="Ancient or modern DNA", action="store_true", default=True)
parser.add_argument('--use-map-damage', dest="no_map_damage", action="store_false", default=True)
parser.add_argument("fastq_files", nargs="*",help="Unzipped fastq files in the following format \
<SAMPLEID>.*.R1.fastq <SAMPLEID>.*.R2.fastq")
parser.add_argument('-c','--config-file', dest="config_file",
help="Path to configuration file", default="/etc/ancient_dna_pipeline.cfg")
parser.add_argument('-v', '--verbose',
action="store_true", dest='verbose', default=False,
help="Verbose command output useful for debugging")
parser.add_argument("--version", dest="version", help="Print program version",
action="store_true", default=False)
parser.add_argument('-d', '--directory', dest="running_directory",
help="Directory where the output file should be placed")
parser.add_argument("-t", "--temp-directory", dest="temp_directory", help="Temporary directory", default="tmp_dir")
parser.add_argument("-b", "--library-type", dest="library_type", help="Type of Sequencing library: args are double-stranded (ds) and single-stranded (ss)")
parser.add_argument("--imputation", dest="imputation", help="Perform BEAGLE imputation of the VCF file",
default=False, action="store_true")
parser.add_argument("-m","--use-merged_reads", help="Use the unmergable reads",
dest="use_unmerged_reads", action="store_true", default=True)
adapt_remove.add_argument('--minimum-length',help="Minimum read length to process for analysis",
dest="adapt_min_length", default=25)
adapt_remove.add_argument('--minimum-quality', help="Minimum base quality",
dest="adapt_min_qual", default=20)
adapt_remove.add_argument('--mismatch-rate', help="Mismatch fraction (If >1 set to 1/<mismatch rate>",
dest="adapt_mismatch_rate", default=3)
adapt_remove.add_argument("--min-length", help="Minimum alignment length for merging",
dest="adapt_alignment_length", default=11)
alignment.add_argument("--max-edit-distance", help="Maxmimum edit distance (-n aln)",
dest="bwa_edit_distance", default=0.03)
alignment.add_argument("--max-gap-opens", help="Maximum number of gap opens (-o aln)",
dest="bwa_gap_opens", default=2)
alignment.add_argument("--seeding", help="Should seeding be enabled (disabled by default -l 1024 aln)",
dest="bwa_seeding", default=False, action="store_true")
sample_qc.add_argument("--min-sample-coverage-percent", help="Minimum sample coverage (percentage)",
dest="min_coverage_percent", default=0.95)
ancient_filter.add_argument('--downweight-number', help="Number of C->T transitions at start and G->A transitions at the end of read to downweight",
dest="ancient_downweight_number" ,default="2")
variant_calling.add_argument('--min-depth',help="Minimum variant read-depth",
dest="vcf_minimum_depth", default=2)
variant_calling.add_argument('--min-mapping-quality', help="Minimum mapping quality",
dest="vcf_min_mapping_quality",default=20)
args = parser.parse_args()
if(args.verbose):
args.level = (logging.DEBUG)
else:
args.level = (logging.ERROR)
if(args.version):
logging.info("Version: {0}".format(__version__))
sys.exit(1)
if not os.path.isfile(args.config_file):
logging.info("Could not find config file: {0}".format(args.config_file))
sys.exit(1)
return args
def main():
""" The main function
Runs the selection pipeline.
"""
args = parse_arguments()
config = parse_config(args)
set_environment(config['environment'])
logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s',
filename=args.log_file, filemode='w',
level=args.level)
check_paths(config)
try:
cores = int(config['system']['cores_avaliable'])
except:
logging.error("Could not read cores_avaliable from the config file")
# TODO - add real error messages
sys.exit(1)
logging.info("Creating Job Queue")
job_queue = JobQueue(cores)
logging.info("Success")
ngadnap_graph = CreateNGaDNAPGraph(args=args, config=config, job_queue=job_queue)
job_queue.set_command_graph(ngadnap_graph.command_graph)
ngadnap_graph.populate()
ngadnap_graph.run()
print("NGaDNAP ran successfully")
if __name__ == "__main__":
main()
|
{
"content_hash": "0787c80fba7af7ad9765ed13862d55cb",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 159,
"avg_line_length": 49.7007299270073,
"alnum_prop": 0.6243207519459539,
"repo_name": "theboocock/NGaDNAP",
"id": "96d22db7361128f0a9e536004b2f51dfb9ebf9fe",
"size": "6809",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ngadnap/pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60226"
}
],
"symlink_target": ""
}
|
from content_plugin import ContentPlugin
from gluon import URL, XML, CAT, I
import os
import json
class ContentPhotoset(ContentPlugin):
"""
Photo set item
"""
def get_icon(self):
return I(_class="fa fa-object-group")
def get_name(self):
return self.T('Photo Set')
def create_content(self, item):
self.db.plugin_photoset_content.insert(
credit_line="{} {}".format(
self.auth.user.first_name,
self.auth.user.last_name
),
item_id=item.unique_id,
photoset=[]
)
def export(self, item, export_dir):
db = self.db
ct_table = db.plugin_photoset_content
ct_photo = db.plugin_photoset_photo
content = ct_table(item_id=item.unique_id)
with open(os.path.join(export_dir, 'photoset.json'), 'w') as f:
f.write(content.as_json())
for p_id in content.photoset:
pic = ct_photo(p_id)
pic_dir = os.path.join(export_dir, str(p_id))
os.mkdir(pic_dir)
with open(os.path.join(pic_dir, 'photo.json'), 'w') as f:
f.write(json.dumps({
'id': pic.id,
'picture': URL(
'default','download', args=[pic.picture],
scheme=True, host=True)
}))
return
def get_item_url(self, item):
return URL('plugin_photoset', 'index.html', args=[item.unique_id])
def get_changelog_url(self, item):
return URL('plugin_photoset', 'changelog', args=[item.unique_id])
def get_full_text(self, item):
"""Return full text document, mean for plugins"""
photoset_content = self.db.plugin_photoset_content(
item_id=item.unique_id)
output = self.response.render(
'plugin_photoset/full_text.txt',
dict(photoset_content=photoset_content, item=item))
return unicode(output.decode('utf-8'))
def preview(self, item):
super(ContentPhotoset, self).preview(item)
photoset_content = self.db.plugin_photoset_content(
item_id=item.unique_id
)
return XML(
self.response.render(
'plugin_photoset/preview.html',
dict(item=item, photoset_content=photoset_content))
)
|
{
"content_hash": "41744fecb4c827839ff47c86a7378163",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 31.263157894736842,
"alnum_prop": 0.553030303030303,
"repo_name": "ybenitezf/nstock",
"id": "7b4e8721cc07f53c1fa8c6658f3c4e44f4ae905b",
"size": "2400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/plugin_photoset/content_photoset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105585"
},
{
"name": "HTML",
"bytes": "332414"
},
{
"name": "JavaScript",
"bytes": "160918"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "2835776"
}
],
"symlink_target": ""
}
|
"""
Useful constants
"""
import logging, os, sys
from logging.handlers import RotatingFileHandler
# Basic Configuration
APPNAME = 'PKMeter'
VERSION = '0.7'
CONFIGDIR = os.path.join(os.getenv('HOME'), '.config', 'pkmeter')
CONFIGPATH = os.path.join(CONFIGDIR, 'config.json')
STATUSFILE = os.path.join(CONFIGDIR, 'status.json')
WORKDIR = os.path.dirname(os.path.dirname(__file__))
PLUGINDIR = os.path.join(WORKDIR, 'pkm', 'plugins')
SHAREDIR = os.path.join(WORKDIR, 'share')
THEMEDIR = os.path.join(SHAREDIR, 'themes')
HOMEPAGE = 'http://pushingkarma.com'
# Logging Configuration
log = logging.getLogger('pkm')
logfile = os.path.join(CONFIGDIR, 'pkmeter.log')
logformat = logging.Formatter('%(asctime)s %(module)12s:%(lineno)-4s %(levelname)-9s %(message)s')
logdir = os.path.dirname(logfile)
os.makedirs(logdir, exist_ok=True)
filehandler = RotatingFileHandler(logfile, 'a', 512000, 3)
filehandler.setFormatter(logformat)
streamhandler = logging.StreamHandler(sys.stdout)
streamhandler.setFormatter(logformat)
log.addHandler(filehandler)
log.addHandler(streamhandler)
log.setLevel(logging.INFO)
|
{
"content_hash": "2e7cc4a6a0e9f52a0c036d07e1fa7578",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 98,
"avg_line_length": 33.27272727272727,
"alnum_prop": 0.7531876138433515,
"repo_name": "mjs7231/pkmeter",
"id": "1e263b0e46f4e9a3c2d924a50ae5f6208b32af19",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pkm/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6371"
},
{
"name": "HTML",
"bytes": "35689"
},
{
"name": "Python",
"bytes": "123311"
}
],
"symlink_target": ""
}
|
import json
import os
import sys
import getopt
from os import listdir
from os.path import join, isfile, exists
_verbose = False
_helpMessage = '''
Usage: khooshe_output.py [-d <dir of JSON files with Tika locations>] [-o <output directory for sample_pts.csv>]
Operation:
-o --outdir
Output directory for sample_pts.csv to feed into Khooshe.
-d --dir
The directory of JSON files with Tika Locations.
-v --verbose
Work verbosely.
'''
def generateGeoCsv(dirpath, outdirpath):
outfile = "sample_points.csv"
onlyfiles = [ f for f in listdir(dirpath) if isfile(join(dirpath,f)) ]
print "[INFO] Read "+str(len(onlyfiles))+" json files from ["+dirpath+"]"
if not os.path.exists(outdirpath):
print "[INFO] Creating ["+outdirpath+"]"
os.makedirs(outdirpath)
outfilename = os.path.join(outdirpath, outfile)
print "[INFO] Creating ["+outfilename+"]"
numPoints = 0
with open(outfilename, 'w') as of:
for f in onlyfiles:
filename = os.path.join(dirpath, f)
outfilename = os.path.join(outdirpath, outfile)
with open(filename, 'r') as fd:
jsonDoc = json.load(fd)
if "tika_location" in jsonDoc:
tikaLoc = jsonDoc["tika_location"]
geoLat = None
geoLng = None
if "geo_lat" in tikaLoc:
geoLng = tikaLoc["geo_lat"] #workaround, flipped
if "geo_lng" in tikaLoc:
geoLat = tikaLoc["geo_lng"] #workaround, flipped
if geoLat != None and geoLng != None:
verboseLog("Writing pt: ("+geoLng+","+geoLat+") from JSON: ["+filename+"]")
of.write(geoLng+","+geoLat+"\n")
numPoints += 1
print "[INFO] Khooshe Output complete: generated "+str(numPoints)+" points."
def verboseLog(message):
if _verbose:
print >>sys.stderr, message
class _Usage(Exception):
'''An error for problems with arguments on the command line.'''
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:],'hvd:o:',['help', 'verbose', 'dir=', 'outdir='])
except getopt.error, msg:
raise _Usage(msg)
if len(opts) == 0:
raise _Usage(_helpMessage)
dir=None
outdir=None
for option, value in opts:
if option in ('-h', '--help'):
raise _Usage(_helpMessage)
elif option in ('-v', '--verbose'):
global _verbose
_verbose = True
elif option in ('-o', '--outdir'):
outdir = value
elif option in ('-d', '--dir'):
dir = value
if dir == None or outdir == None:
raise _Usage(_helpMessage)
generateGeoCsv(dir, outdir)
except _Usage, err:
print >>sys.stderr, sys.argv[0].split('/')[-1] + ': ' + str(err.msg)
return 2
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "fe375a2708770ba796191ddb09e167a2",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 112,
"avg_line_length": 30.066666666666666,
"alnum_prop": 0.5432372505543237,
"repo_name": "chrismattmann/memex-autonomy",
"id": "ddc8f78d5b217edd54019e179b36cdb11725e862",
"size": "3969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tika-geo/khooshe_output.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18476"
},
{
"name": "Shell",
"bytes": "2827"
}
],
"symlink_target": ""
}
|
"""Decorator service for the media_player.play_media service."""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA)
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE,
DOMAIN as MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_CUSTOMIZE_ENTITIES = 'customize'
CONF_DEFAULT_STREAM_QUERY = 'default_query'
DEFAULT_STREAM_QUERY = 'best'
DOMAIN = 'media_extractor'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_DEFAULT_STREAM_QUERY): cv.string,
vol.Optional(CONF_CUSTOMIZE_ENTITIES):
vol.Schema({cv.entity_id: vol.Schema({cv.string: cv.string})}),
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the media extractor service."""
def play_media(call):
"""Get stream URL and send it to the play_media service."""
MediaExtractor(hass, config[DOMAIN], call.data).extract_and_send()
hass.services.register(DOMAIN, SERVICE_PLAY_MEDIA, play_media,
schema=MEDIA_PLAYER_PLAY_MEDIA_SCHEMA)
return True
class MEDownloadException(Exception):
"""Media extractor download exception."""
pass
class MEQueryException(Exception):
"""Media extractor query exception."""
pass
class MediaExtractor:
"""Class which encapsulates all extraction logic."""
def __init__(self, hass, component_config, call_data):
"""Initialize media extractor."""
self.hass = hass
self.config = component_config
self.call_data = call_data
def get_media_url(self):
"""Return media content url."""
return self.call_data.get(ATTR_MEDIA_CONTENT_ID)
def get_entities(self):
"""Return list of entities."""
return self.call_data.get(ATTR_ENTITY_ID, [])
def extract_and_send(self):
"""Extract exact stream format for each entity_id and play it."""
try:
stream_selector = self.get_stream_selector()
except MEDownloadException:
_LOGGER.error("Could not retrieve data for the URL: %s",
self.get_media_url())
else:
entities = self.get_entities()
if not entities:
self.call_media_player_service(stream_selector, None)
for entity_id in entities:
self.call_media_player_service(stream_selector, entity_id)
def get_stream_selector(self):
"""Return format selector for the media URL."""
from youtube_dl import YoutubeDL
from youtube_dl.utils import DownloadError, ExtractorError
ydl = YoutubeDL({'quiet': True, 'logger': _LOGGER})
try:
all_media = ydl.extract_info(self.get_media_url(), process=False)
except DownloadError:
# This exception will be logged by youtube-dl itself
raise MEDownloadException()
if 'entries' in all_media:
_LOGGER.warning(
"Playlists are not supported, looking for the first video")
entries = list(all_media['entries'])
if entries:
selected_media = entries[0]
else:
_LOGGER.error("Playlist is empty")
raise MEDownloadException()
else:
selected_media = all_media
def stream_selector(query):
"""Find stream URL that matches query."""
try:
ydl.params['format'] = query
requested_stream = ydl.process_ie_result(
selected_media, download=False)
except (ExtractorError, DownloadError):
_LOGGER.error(
"Could not extract stream for the query: %s", query)
raise MEQueryException()
return requested_stream['url']
return stream_selector
def call_media_player_service(self, stream_selector, entity_id):
"""Call Media player play_media service."""
stream_query = self.get_stream_query_for_entity(entity_id)
try:
stream_url = stream_selector(stream_query)
except MEQueryException:
_LOGGER.error("Wrong query format: %s", stream_query)
return
else:
data = {k: v for k, v in self.call_data.items()
if k != ATTR_ENTITY_ID}
data[ATTR_MEDIA_CONTENT_ID] = stream_url
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
self.hass.async_create_task(
self.hass.services.async_call(
MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, data)
)
def get_stream_query_for_entity(self, entity_id):
"""Get stream format query for entity."""
default_stream_query = self.config.get(
CONF_DEFAULT_STREAM_QUERY, DEFAULT_STREAM_QUERY)
if entity_id:
media_content_type = self.call_data.get(ATTR_MEDIA_CONTENT_TYPE)
return self.config \
.get(CONF_CUSTOMIZE_ENTITIES, {}) \
.get(entity_id, {}) \
.get(media_content_type, default_stream_query)
return default_stream_query
|
{
"content_hash": "eecbb2646d667679df64ccd9e3ba98bc",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 77,
"avg_line_length": 33.17791411042945,
"alnum_prop": 0.6037352071005917,
"repo_name": "DavidLP/home-assistant",
"id": "e6456401ba4db0c1559561e5ee4ed410cece8d2a",
"size": "5408",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_extractor/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15309293"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyOCD.target.target import Target
import logging
from struct import unpack
from time import time
from binascii import crc32
# Number of bytes in a page to read to quickly determine if the page has the same data
PAGE_ESTIMATE_SIZE = 32
PAGE_READ_WEIGHT = 0.3
DATA_TRANSFER_B_PER_S = 40 * 1000 # ~40KB/s, depends on clock speed, theoretical limit for HID is 56,000 B/s
class ProgrammingInfo(object):
def __init__(self):
self.program_type = None # Type of programming performed - FLASH_PAGE_ERASE or FLASH_CHIP_ERASE
self.program_time = None # Total programming time
self.analyze_type = None # Type of flash analysis performed - FLASH_ANALYSIS_CRC32 or FLASH_ANALYSIS_PARTIAL_PAGE_READ
self.analyze_time = None # Time to analyze flash contents
def _same(d1, d2):
assert len(d1) == len(d2)
for i in range(len(d1)):
if d1[i] != d2[i]:
return False
return True
def _erased(d):
for i in range(len(d)):
if d[i] != 0xFF:
return False
return True
def _stub_progress(percent):
pass
class flash_page(object):
def __init__(self, addr, size, data, erase_weight, program_weight):
self.addr = addr
self.size = size
self.data = data
self.erase_weight = erase_weight
self.program_weight = program_weight
self.erased = None
self.same = None
def getProgramWeight(self):
"""
Get time to program a page including the data transfer
"""
return self.program_weight + \
float(len(self.data)) / float(DATA_TRANSFER_B_PER_S)
def getEraseProgramWeight(self):
"""
Get time to erase and program a page including data transfer time
"""
return self.erase_weight + self.program_weight + \
float(len(self.data)) / float(DATA_TRANSFER_B_PER_S)
def getVerifyWeight(self):
"""
Get time to verify a page
"""
return float(self.size) / float(DATA_TRANSFER_B_PER_S)
class flash_operation(object):
def __init__(self, addr, data):
self.addr = addr
self.data = data
class FlashBuilder(object):
# Type of flash operation
FLASH_PAGE_ERASE = 1
FLASH_CHIP_ERASE = 2
# Type of flash analysis
FLASH_ANALYSIS_CRC32 = "CRC32"
FLASH_ANALYSIS_PARTIAL_PAGE_READ = "PAGE_READ"
def __init__(self, flash, base_addr=0):
self.flash = flash
self.flash_start = base_addr
self.flash_operation_list = []
self.page_list = []
self.perf = ProgrammingInfo()
self.enable_double_buffering = True
self.max_errors = 10
def enableDoubleBuffer(self, enable):
self.enable_double_buffering = enable
def setMaxErrors(self, count):
self.max_errors = count
def addData(self, addr, data):
"""
Add a block of data to be programmed
Note - programming does not start until the method
program is called.
"""
# Sanity check
if addr < self.flash_start:
raise Exception("Invalid flash address 0x%x is before flash start 0x%x" % (addr, self.flash_start))
# Add operation to list
self.flash_operation_list.append(flash_operation(addr, data))
# Keep list sorted
self.flash_operation_list = sorted(self.flash_operation_list, key=lambda operation: operation.addr)
# Verify this does not overlap
prev_flash_operation = None
for operation in self.flash_operation_list:
if prev_flash_operation != None:
if prev_flash_operation.addr + len(prev_flash_operation.data) > operation.addr:
raise ValueError("Error adding data - Data at 0x%x..0x%x overlaps with 0x%x..0x%x"
% (prev_flash_operation.addr, prev_flash_operation.addr + len(prev_flash_operation.data),
operation.addr, operation.addr + len(operation.data)))
prev_flash_operation = operation
def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_verify=False):
"""
Determine fastest method of flashing and then run flash programming.
Data must have already been added with addData
"""
# Assumptions
# 1. Page erases must be on page boundaries ( page_erase_addr % page_size == 0 )
# 2. Page erase can have a different size depending on location
# 3. It is safe to program a page with less than a page of data
# Examples
# - lpc4330 -Non 0 base address
# - nRF51 -UICR location far from flash (address 0x10001000)
# - LPC1768 -Different sized pages
program_start = time()
if progress_cb is None:
progress_cb = _stub_progress
# There must be at least 1 flash operation
if len(self.flash_operation_list) == 0:
logging.warning("No pages were programmed")
return
# Convert the list of flash operations into flash pages
flash_addr = self.flash_operation_list[0].addr
info = self.flash.getPageInfo(flash_addr)
page_addr = flash_addr - (flash_addr % info.size)
current_page = flash_page(page_addr, info.size, [], info.erase_weight, info.program_weight)
self.page_list.append(current_page)
for flash_operation in self.flash_operation_list:
pos = 0
while pos < len(flash_operation.data):
# Check if operation is in next page
flash_addr = flash_operation.addr + pos
if flash_addr >= current_page.addr + current_page.size:
info = self.flash.getPageInfo(flash_addr)
page_addr = flash_addr - (flash_addr % info.size)
current_page = flash_page(page_addr, info.size, [], info.erase_weight, info.program_weight)
self.page_list.append(current_page)
# Fill the page gap if there is one
page_data_end = current_page.addr + len(current_page.data)
if flash_addr != page_data_end:
old_data = self.flash.target.readBlockMemoryUnaligned8(page_data_end, flash_addr - page_data_end)
current_page.data.extend(old_data)
# Copy data to page and increment pos
space_left_in_page = info.size - len(current_page.data)
space_left_in_data = len(flash_operation.data) - pos
amount = min(space_left_in_page, space_left_in_data)
current_page.data.extend(flash_operation.data[pos:pos + amount])
#increment position
pos += amount
# If smart flash was set to false then mark all pages
# as requiring programming
if not smart_flash:
self._mark_all_pages_for_programming()
# If the first page being programmed is not the first page
# in ROM then don't use a chip erase
if self.page_list[0].addr > self.flash_start:
if chip_erase is None:
chip_erase = False
elif chip_erase is True:
logging.warning('Chip erase used when flash address 0x%x is not the same as flash start 0x%x', self.page_list[0].addr, self.flash_start)
self.flash.init()
chip_erase_count, chip_erase_program_time = self._compute_chip_erase_pages_and_weight()
page_erase_min_program_time = self._compute_page_erase_pages_weight_min()
# If chip_erase hasn't been specified determine if chip erase is faster
# than page erase regardless of contents
if (chip_erase is None) and (chip_erase_program_time < page_erase_min_program_time):
chip_erase = True
# If chip erase isn't True then analyze the flash
if chip_erase != True:
analyze_start = time()
if self.flash.getFlashInfo().crc_supported:
sector_erase_count, page_program_time = self._compute_page_erase_pages_and_weight_crc32(fast_verify)
self.perf.analyze_type = FlashBuilder.FLASH_ANALYSIS_CRC32
else:
sector_erase_count, page_program_time = self._compute_page_erase_pages_and_weight_sector_read()
self.perf.analyze_type = FlashBuilder.FLASH_ANALYSIS_PARTIAL_PAGE_READ
analyze_finish = time()
self.perf.analyze_time = analyze_finish - analyze_start
logging.debug("Analyze time: %f" % (analyze_finish - analyze_start))
# If chip erase hasn't been set then determine fastest method to program
if chip_erase is None:
logging.debug("Chip erase count %i, Page erase est count %i" % (chip_erase_count, sector_erase_count))
logging.debug("Chip erase weight %f, Page erase weight %f" % (chip_erase_program_time, page_program_time))
chip_erase = chip_erase_program_time < page_program_time
if chip_erase:
if self.flash.isDoubleBufferingSupported() and self.enable_double_buffering:
logging.debug("Using double buffer chip erase program")
flash_operation = self._chip_erase_program_double_buffer(progress_cb)
else:
flash_operation = self._chip_erase_program(progress_cb)
else:
if self.flash.isDoubleBufferingSupported() and self.enable_double_buffering:
logging.debug("Using double buffer page erase program")
flash_operation = self._page_erase_program_double_buffer(progress_cb)
else:
flash_operation = self._page_erase_program(progress_cb)
self.flash.target.resetStopOnReset()
program_finish = time()
self.perf.program_time = program_finish - program_start
self.perf.program_type = flash_operation
return self.perf
def getPerformance(self):
return self.perf
def _mark_all_pages_for_programming(self):
for page in self.page_list:
page.erased = False
page.same = False
def _compute_chip_erase_pages_and_weight(self):
"""
Compute the number of erased pages.
Determine how many pages in the new data are already erased.
"""
chip_erase_count = 0
chip_erase_weight = 0
chip_erase_weight += self.flash.getFlashInfo().erase_weight
for page in self.page_list:
if page.erased is None:
page.erased = _erased(page.data)
if not page.erased:
chip_erase_count += 1
chip_erase_weight += page.getProgramWeight()
self.chip_erase_count = chip_erase_count
self.chip_erase_weight = chip_erase_weight
return chip_erase_count, chip_erase_weight
def _compute_page_erase_pages_weight_min(self):
page_erase_min_weight = 0
for page in self.page_list:
page_erase_min_weight += page.getVerifyWeight()
return page_erase_min_weight
def _compute_page_erase_pages_and_weight_sector_read(self):
"""
Estimate how many pages are the same.
Quickly estimate how many pages are the same. These estimates are used
by page_erase_program so it is recommended to call this before beginning programming
This is done automatically by smart_program.
"""
# Quickly estimate how many pages are the same
page_erase_count = 0
page_erase_weight = 0
for page in self.page_list:
# Analyze pages that haven't been analyzed yet
if page.same is None:
size = min(PAGE_ESTIMATE_SIZE, len(page.data))
data = self.flash.target.readBlockMemoryUnaligned8(page.addr, size)
page_same = _same(data, page.data[0:size])
if page_same is False:
page.same = False
# Put together page and time estimate
for page in self.page_list:
if page.same is False:
page_erase_count += 1
page_erase_weight += page.getEraseProgramWeight()
elif page.same is None:
# Page is probably the same but must be read to confirm
page_erase_weight += page.getVerifyWeight()
elif page.same is True:
# Page is confirmed to be the same so no programming weight
pass
self.page_erase_count = page_erase_count
self.page_erase_weight = page_erase_weight
return page_erase_count, page_erase_weight
def _compute_page_erase_pages_and_weight_crc32(self, assume_estimate_correct=False):
"""
Estimate how many pages are the same.
Quickly estimate how many pages are the same. These estimates are used
by page_erase_program so it is recommended to call this before beginning programming
This is done automatically by smart_program.
If assume_estimate_correct is set to True, then pages with matching CRCs
will be marked as the same. There is a small chance that the CRCs match even though the
data is different, but the odds of this happing are low: ~1/(2^32) = ~2.33*10^-8%.
"""
# Build list of all the pages that need to be analyzed
sector_list = []
page_list = []
for page in self.page_list:
if page.same is None:
# Add sector to computeCrcs
sector_list.append((page.addr, page.size))
page_list.append(page)
# Compute CRC of data (Padded with 0xFF)
data = list(page.data)
pad_size = page.size - len(page.data)
if pad_size > 0:
data.extend([0xFF] * pad_size)
page.crc = crc32(bytearray(data)) & 0xFFFFFFFF
# Analyze pages
page_erase_count = 0
page_erase_weight = 0
if len(page_list) > 0:
crc_list = self.flash.computeCrcs(sector_list)
for page, crc in zip(page_list, crc_list):
page_same = page.crc == crc
if assume_estimate_correct:
page.same = page_same
elif page_same is False:
page.same = False
# Put together page and time estimate
for page in self.page_list:
if page.same is False:
page_erase_count += 1
page_erase_weight += page.getEraseProgramWeight()
elif page.same is None:
# Page is probably the same but must be read to confirm
page_erase_weight += page.getVerifyWeight()
elif page.same is True:
# Page is confirmed to be the same so no programming weight
pass
self.page_erase_count = page_erase_count
self.page_erase_weight = page_erase_weight
return page_erase_count, page_erase_weight
def _chip_erase_program(self, progress_cb=_stub_progress):
"""
Program by first performing a chip erase.
"""
logging.debug("Smart chip erase")
logging.debug("%i of %i pages already erased", len(self.page_list) - self.chip_erase_count, len(self.page_list))
progress_cb(0.0)
progress = 0
self.flash.eraseAll()
progress += self.flash.getFlashInfo().erase_weight
for page in self.page_list:
if not page.erased:
self.flash.programPage(page.addr, page.data)
progress += page.getProgramWeight()
progress_cb(float(progress) / float(self.chip_erase_weight))
progress_cb(1.0)
return FlashBuilder.FLASH_CHIP_ERASE
def _next_unerased_page(self, i):
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
while page.erased:
i += 1
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
return page, i + 1
def _chip_erase_program_double_buffer(self, progress_cb=_stub_progress):
"""
Program by first performing a chip erase.
"""
logging.debug("Smart chip erase")
logging.debug("%i of %i pages already erased", len(self.page_list) - self.chip_erase_count, len(self.page_list))
progress_cb(0.0)
progress = 0
self.flash.eraseAll()
progress += self.flash.getFlashInfo().erase_weight
# Set up page and buffer info.
error_count = 0
current_buf = 0
next_buf = 1
page, i = self._next_unerased_page(0)
assert page is not None
# Load first page buffer
self.flash.loadPageBuffer(current_buf, page.addr, page.data)
while page is not None:
# Kick off this page program.
current_addr = page.addr
current_weight = page.getProgramWeight()
self.flash.startProgramPageWithBuffer(current_buf, current_addr)
# Get next page and load it.
page, i = self._next_unerased_page(i)
if page is not None:
self.flash.loadPageBuffer(next_buf, page.addr, page.data)
# Wait for the program to complete.
result = self.flash.waitForCompletion()
# check the return code
if result != 0:
logging.error('programPage(0x%x) error: %i', current_addr, result)
error_count += 1
if error_count > self.max_errors:
logging.error("Too many page programming errors, aborting program operation")
break
# Swap buffers.
temp = current_buf
current_buf = next_buf
next_buf = temp
# Update progress.
progress += current_weight
progress_cb(float(progress) / float(self.chip_erase_weight))
progress_cb(1.0)
return FlashBuilder.FLASH_CHIP_ERASE
def _page_erase_program(self, progress_cb=_stub_progress):
"""
Program by performing sector erases.
"""
actual_page_erase_count = 0
actual_page_erase_weight = 0
progress = 0
progress_cb(0.0)
for page in self.page_list:
# If the page is not the same
if page.same is False:
progress += page.getEraseProgramWeight()
# Read page data if unknown - after this page.same will be True or False
if page.same is None:
data = self.flash.target.readBlockMemoryUnaligned8(page.addr, len(page.data))
page.same = _same(page.data, data)
progress += page.getVerifyWeight()
# Program page if not the same
if page.same is False:
self.flash.erasePage(page.addr)
self.flash.programPage(page.addr, page.data)
actual_page_erase_count += 1
actual_page_erase_weight += page.getEraseProgramWeight()
# Update progress
if self.page_erase_weight > 0:
progress_cb(float(progress) / float(self.page_erase_weight))
progress_cb(1.0)
logging.debug("Estimated page erase count: %i", self.page_erase_count)
logging.debug("Actual page erase count: %i", actual_page_erase_count)
return FlashBuilder.FLASH_PAGE_ERASE
def _scan_pages_for_same(self, progress_cb=_stub_progress):
"""
Program by performing sector erases.
"""
progress = 0
count = 0
same_count = 0
for page in self.page_list:
# Read page data if unknown - after this page.same will be True or False
if page.same is None:
data = self.flash.target.readBlockMemoryUnaligned8(page.addr, len(page.data))
page.same = _same(page.data, data)
progress += page.getVerifyWeight()
count += 1
if page.same:
same_count += 1
# Update progress
progress_cb(float(progress) / float(self.page_erase_weight))
return progress
def _next_nonsame_page(self, i):
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
while page.same:
i += 1
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
return page, i + 1
def _page_erase_program_double_buffer(self, progress_cb=_stub_progress):
"""
Program by performing sector erases.
"""
actual_page_erase_count = 0
actual_page_erase_weight = 0
progress = 0
progress_cb(0.0)
# Fill in same flag for all pages. This is done up front so we're not trying
# to read from flash while simultaneously programming it.
progress = self._scan_pages_for_same(progress_cb)
# Set up page and buffer info.
error_count = 0
current_buf = 0
next_buf = 1
page, i = self._next_nonsame_page(0)
# Make sure there are actually pages to program differently from current flash contents.
if page is not None:
# Load first page buffer
self.flash.loadPageBuffer(current_buf, page.addr, page.data)
while page is not None:
assert page.same is not None
# Kick off this page program.
current_addr = page.addr
current_weight = page.getEraseProgramWeight()
self.flash.erasePage(current_addr)
self.flash.startProgramPageWithBuffer(current_buf, current_addr) #, erase_page=True)
actual_page_erase_count += 1
actual_page_erase_weight += page.getEraseProgramWeight()
# Get next page and load it.
page, i = self._next_nonsame_page(i)
if page is not None:
self.flash.loadPageBuffer(next_buf, page.addr, page.data)
# Wait for the program to complete.
result = self.flash.waitForCompletion()
# check the return code
if result != 0:
logging.error('programPage(0x%x) error: %i', current_addr, result)
error_count += 1
if error_count > self.max_errors:
logging.error("Too many page programming errors, aborting program operation")
break
# Swap buffers.
temp = current_buf
current_buf = next_buf
next_buf = temp
# Update progress
progress += current_weight
if self.page_erase_weight > 0:
progress_cb(float(progress) / float(self.page_erase_weight))
progress_cb(1.0)
logging.debug("Estimated page erase count: %i", self.page_erase_count)
logging.debug("Actual page erase count: %i", actual_page_erase_count)
return FlashBuilder.FLASH_PAGE_ERASE
|
{
"content_hash": "56eb9fa182a787ac2c74f99ed4f3ec8b",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 152,
"avg_line_length": 39.370065789473685,
"alnum_prop": 0.5883778251242846,
"repo_name": "bridadan/pyOCD",
"id": "50b9f915f40a3a53c5dab7ac20373fb9d2376277",
"size": "23937",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyOCD/flash/flash_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "849"
},
{
"name": "C",
"bytes": "3924"
},
{
"name": "Python",
"bytes": "550255"
},
{
"name": "Shell",
"bytes": "407"
}
],
"symlink_target": ""
}
|
import ioc
import os
class Extension(ioc.component.Extension):
def load(self, config, container_builder):
path = os.path.dirname(os.path.abspath(__file__))
loader = ioc.loader.YamlLoader()
loader.load("%s/resources/config/wkhtmltopdf.yml" % path, container_builder)
container_builder.parameters.set('element.plugins.wkhtmltopdf.data_path', config.get_all('data_path', '%project.root_folder%/data/wkhtmltopdf'))
|
{
"content_hash": "61982b3e9f2c68449edc47cec46f6304",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 152,
"avg_line_length": 40.90909090909091,
"alnum_prop": 0.7066666666666667,
"repo_name": "rande/python-element",
"id": "42ae716feadf7b1142da5c8a6668aaff27fdb8e1",
"size": "1052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "element/plugins/wkhtmltopdf/di.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "120475"
},
{
"name": "HTML",
"bytes": "93511"
},
{
"name": "JavaScript",
"bytes": "2830"
},
{
"name": "Makefile",
"bytes": "789"
},
{
"name": "Nginx",
"bytes": "410"
},
{
"name": "Perl",
"bytes": "2987"
},
{
"name": "Python",
"bytes": "303084"
}
],
"symlink_target": ""
}
|
from PyQt5 import QtCore, QtWidgets
class TableFitMixin(object):
DEFAULT_SCROLLBAR_EXTRA_SPACE = (16, 16)
def __init__(self, size_hint_only=False, scrollbar_extra_space=None):
if scrollbar_extra_space is None:
self._scrollbar_extra_space = TableFitMixin.DEFAULT_SCROLLBAR_EXTRA_SPACE
else:
self._scrollbar_extra_space = []
for given_value, default_value in zip(scrollbar_extra_space, TableFitMixin.DEFAULT_SCROLLBAR_EXTRA_SPACE):
value = given_value if given_value >= 0 else default_value
self._scrollbar_extra_space.append(value)
self._scrollbar_extra_space = tuple(self._scrollbar_extra_space)
if not size_hint_only:
self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
def sizeHint(self):
horizontal_header = self.horizontalHeader()
vertical_header = self.verticalHeader()
width = horizontal_header.length() + vertical_header.width() + self._scrollbar_extra_space[0]
height = vertical_header.length() + horizontal_header.height() + self._scrollbar_extra_space[1]
return QtCore.QSize(width, height)
|
{
"content_hash": "e8aa1c153a69bd92cb66f72bb1a7b69d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 118,
"avg_line_length": 50.074074074074076,
"alnum_prop": 0.6826923076923077,
"repo_name": "sciapp/pyMolDyn",
"id": "a4c9f2879575d6a9c56e1396ed0ea125f57a82de",
"size": "1377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gui/util/table_fit_mixin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "1998"
},
{
"name": "C",
"bytes": "32038"
},
{
"name": "CSS",
"bytes": "911"
},
{
"name": "HTML",
"bytes": "21678"
},
{
"name": "Makefile",
"bytes": "7526"
},
{
"name": "Python",
"bytes": "664457"
},
{
"name": "Ruby",
"bytes": "516"
},
{
"name": "Shell",
"bytes": "21156"
}
],
"symlink_target": ""
}
|
import sys
from plantcv.plantcv import _version
from plantcv.plantcv import params
def deprecation_warning(warning):
"""Print out deprecation warning
Inputs:
warning = warning message text
:param warning: str
"""
version = _version.get_versions()
warning_msg = f"DeprecationWarning: {warning} Current PlantCV version: {version['version']} released on {version['date']}"
if params.verbose is True:
print(warning_msg, file=sys.stderr)
|
{
"content_hash": "8aba3595273e91bd878da11a75e89a73",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 126,
"avg_line_length": 29.75,
"alnum_prop": 0.7058823529411765,
"repo_name": "danforthcenter/plantcv",
"id": "2186ae2592b38a999e3cdcdbe01ade5111e94122",
"size": "500",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "plantcv/plantcv/deprecation_warning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1112"
},
{
"name": "Python",
"bytes": "898011"
},
{
"name": "R",
"bytes": "1327"
},
{
"name": "Shell",
"bytes": "3348"
}
],
"symlink_target": ""
}
|
import unittest
from numpy.testing import assert_almost_equal
from tick.prox import ProxElasticNet, ProxL1, ProxL2Sq
from tick.prox.tests.prox import TestProx
class ProxElasticNetTest(object):
def test_ProxElasticNet(self):
"""...Test of ProxElasticNet
"""
coeffs = self.coeffs.copy().astype(self.dtype)
l_enet = 3e-2
ratio = .3
t = 1.7
prox_enet = ProxElasticNet(l_enet, ratio=ratio).astype(self.dtype)
prox_l1 = ProxL1(ratio * l_enet).astype(self.dtype)
prox_l2 = ProxL2Sq((1 - ratio) * l_enet).astype(self.dtype)
self.assertAlmostEqual(
prox_enet.value(coeffs),
prox_l1.value(coeffs) + prox_l2.value(coeffs), delta=self.delta)
out = coeffs.copy()
prox_l1.call(out, t, out)
prox_l2.call(out, t, out)
assert_almost_equal(prox_enet.call(coeffs, step=t), out, decimal=10)
prox_enet = ProxElasticNet(l_enet, ratio=ratio,
positive=True).astype(self.dtype)
prox_l1 = ProxL1(ratio * l_enet, positive=True).astype(self.dtype)
prox_l2 = ProxL2Sq((1 - ratio) * l_enet,
positive=True).astype(self.dtype)
self.assertAlmostEqual(
prox_enet.value(coeffs),
prox_l1.value(coeffs) + prox_l2.value(coeffs), delta=self.delta)
out = coeffs.copy()
prox_l1.call(out, t, out)
prox_l2.call(out, t, out)
assert_almost_equal(prox_enet.call(coeffs, step=t), out, decimal=10)
class ProxElasticNetTestFloat32(TestProx, ProxElasticNetTest):
def __init__(self, *args, **kwargs):
TestProx.__init__(self, *args, dtype="float32", **kwargs)
class ProxElasticNetTestFloat64(TestProx, ProxElasticNetTest):
def __init__(self, *args, **kwargs):
TestProx.__init__(self, *args, dtype="float64", **kwargs)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "b00bdb98092b4415e9f2fb882397ace4",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 76,
"avg_line_length": 33.5,
"alnum_prop": 0.6073082861554298,
"repo_name": "Dekken/tick",
"id": "963f493dcd8892da8ba46fca01ab20166a155d5d",
"size": "1968",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tick/prox/tests/prox_elasticnet_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6660"
},
{
"name": "C++",
"bytes": "1181742"
},
{
"name": "CMake",
"bytes": "22073"
},
{
"name": "Dockerfile",
"bytes": "2017"
},
{
"name": "Python",
"bytes": "1450866"
},
{
"name": "Shell",
"bytes": "33446"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sites', '0015_auto_20170429_1525'),
]
operations = [
migrations.RemoveField(
model_name='site',
name='domain_old',
),
]
|
{
"content_hash": "9b32706113cfb76b5b2f632e5c09f950",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 45,
"avg_line_length": 18.823529411764707,
"alnum_prop": 0.58125,
"repo_name": "tjcsl/director",
"id": "c0b01c507ecaab5a119844d1e9e919ebb143e03c",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web3/apps/sites/migrations/0016_remove_site_domain_old.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "139994"
},
{
"name": "HTML",
"bytes": "129326"
},
{
"name": "JavaScript",
"bytes": "313396"
},
{
"name": "Python",
"bytes": "232989"
},
{
"name": "Shell",
"bytes": "11826"
}
],
"symlink_target": ""
}
|
import repeat
print repeat.repeat("qwerty",1,4,5)
try:
print repeat.repeat(1,4,5,6)
except Exception as ex:
print ex
try:
print repeat.repeat("qwerty",1,4,5,6)
except Exception as ex:
print ex
|
{
"content_hash": "9b6dc8964abb8c5f39bc90ac02e31472",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 41,
"avg_line_length": 16.307692307692307,
"alnum_prop": 0.6792452830188679,
"repo_name": "AlexAsh/pycembed15",
"id": "c274be51a8b31d504524524b21f3d6a183f6a438",
"size": "212",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "testrepeat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3262"
},
{
"name": "Makefile",
"bytes": "786"
},
{
"name": "Python",
"bytes": "5986"
}
],
"symlink_target": ""
}
|
import sys
import time
from django.conf import settings
from django.utils.datastructures import DictWrapper
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
return '%x' % (abs(hash(args)) % 4294967296L) # 2**32
def db_type(self, field):
return self._db_type(field, field.get_internal_type())
def related_db_type(self, field):
return self._db_type(field, field.get_related_internal_type())
def _db_type(self, field, internal_type):
data = DictWrapper(field.__dict__, self.connection.ops.quote_name, "qn_")
try:
return self.connection.creation.data_types[internal_type] % data
except KeyError:
return None
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
if not f.null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True))
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style)
if pending:
pr = pending_references.setdefault(f.rel.to, []).append((model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"Return the SQL snippet defining the foreign key reference for a field"
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' + \
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' + \
style.SQL_FIELD(qn(field.rel.to._meta.get_field(field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"Returns any ALTER TABLE statements to add constraints after the fact."
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
qn = self.connection.ops.quote_name
final_output = []
opts = model._meta
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % \
(qn(r_table), qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_for_many_to_many(self, model, style):
"Return the CREATE TABLE statments for all the many-to-many tables defined on a model"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
output = []
for f in model._meta.local_many_to_many:
if model._meta.managed or f.rel.to._meta.managed:
output.extend(self.sql_for_many_to_many_field(model, f, style))
return output
def sql_for_many_to_many_field(self, model, f, style):
"Return the CREATE TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
from django.db.backends.util import truncate_name
output = []
if f.auto_created:
opts = model._meta
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or opts.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace, inline=True)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
table_output = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + \
style.SQL_TABLE(qn(f.m2m_db_table())) + ' (']
table_output.append(' %s %s %s%s,' %
(style.SQL_FIELD(qn('id')),
style.SQL_COLTYPE(models.AutoField(primary_key=True).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL PRIMARY KEY'),
tablespace_sql))
deferred = []
inline_output, deferred = self.sql_for_inline_many_to_many_references(model, f, style)
table_output.extend(inline_output)
table_output.append(' %s (%s, %s)%s' %
(style.SQL_KEYWORD('UNIQUE'),
style.SQL_FIELD(qn(f.m2m_column_name())),
style.SQL_FIELD(qn(f.m2m_reverse_name())),
tablespace_sql))
table_output.append(')')
if opts.db_tablespace:
# f.db_tablespace is only for indices, so ignore its value here.
table_output.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
table_output.append(';')
output.append('\n'.join(table_output))
for r_table, r_col, table, col in deferred:
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table),
qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
# Add any extra SQL needed to support auto-incrementing PKs
autoinc_sql = self.connection.ops.autoinc_sql(f.m2m_db_table(), 'id')
if autoinc_sql:
for stmt in autoinc_sql:
output.append(stmt)
return output
def sql_for_inline_many_to_many_references(self, model, field, style):
"Create the references to other tables required by a many-to-many table"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(opts.db_table)),
style.SQL_FIELD(qn(opts.pk.column)),
self.connection.ops.deferrable_sql()),
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(field.rel.to._meta.db_table)),
style.SQL_FIELD(qn(field.rel.to._meta.pk.column)),
self.connection.ops.deferrable_sql())
]
deferred = []
return table_output, deferred
def sql_indexes_for_model(self, model, style):
"Returns the CREATE INDEX SQL statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
return output
def sql_indexes_for_field(self, model, f, style):
"Return the CREATE INDEX SQL statements for a single model field"
from django.db.backends.util import truncate_name
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
i_name = '%s_%s' % (model._meta.db_table, self._digest(f.column))
output = [style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(i_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' +
"(%s)" % style.SQL_FIELD(qn(f.column)) +
"%s;" % tablespace_sql]
else:
output = []
return output
def sql_destroy_model(self, model, references_to_delete, style):
"Return the DROP TABLE and restraint dropping statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def sql_destroy_many_to_many(self, model, f, style):
"Returns the DROP TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
qn = self.connection.ops.quote_name
output = []
if f.auto_created:
output.append("%s %s;" % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
ds = self.connection.ops.drop_sequence_sql("%s_%s" % (model._meta.db_table, f.column))
if ds:
output.append(ds)
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr)
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Confirm the feature set of the test database
self.connection.features.confirm()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
from django.db import router
if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"Internal implementation - creates the test db tables."
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self.set_autocommit()
try:
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
cursor.execute("DROP DATABASE %s" % qn(test_database_name))
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Destroying test database for alias '%s'%s..." % (self.connection.alias, test_db_repr)
self.connection.settings_dict['NAME'] = old_database_name
self._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"Internal implementation - remove the test db tables."
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self.set_autocommit()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"Make sure a connection is in autocommit mode."
if hasattr(self.connection.connection, "autocommit"):
if callable(self.connection.connection.autocommit):
self.connection.connection.autocommit(True)
else:
self.connection.connection.autocommit = True
elif hasattr(self.connection.connection, "set_isolation_level"):
self.connection.connection.set_isolation_level(0)
def sql_table_creation_suffix(self):
"SQL to append to the end of the test table creation statements"
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
|
{
"content_hash": "679d3722cb357ea303db58696345e4e4",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 148,
"avg_line_length": 44.57905138339921,
"alnum_prop": 0.5851842000265993,
"repo_name": "spawnedc/MeCanBlog",
"id": "9adbf2a7e31cb7d624361d615c78cafe6013acbd",
"size": "22557",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/db/backends/creation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "93603"
},
{
"name": "Python",
"bytes": "4239276"
},
{
"name": "Shell",
"bytes": "5114"
}
],
"symlink_target": ""
}
|
"""
Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI)
This plugin will forward authenticated REST API calls
to the PLUMgrid Network Management System called Director
"""
from plumgridlib import plumlib
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Plumlib(object):
"""
Class PLUMgrid Python Library. This library is a third-party tool
needed by PLUMgrid plugin to implement all core API in Neutron.
"""
def __init__(self):
LOG.info(_('Python PLUMgrid Library Started '))
def director_conn(self, director_plumgrid, director_port, timeout,
director_admin, director_password):
self.plumlib = plumlib.Plumlib(director_plumgrid,
director_port,
timeout,
director_admin,
director_password)
def create_network(self, tenant_id, net_db, network):
self.plumlib.create_network(tenant_id, net_db, network)
def update_network(self, tenant_id, net_id):
self.plumlib.update_network(tenant_id, net_id)
def delete_network(self, net_db, net_id):
self.plumlib.delete_network(net_db, net_id)
def create_subnet(self, sub_db, net_db, ipnet):
self.plumlib.create_subnet(sub_db, net_db, ipnet)
def update_subnet(self, orig_sub_db, new_sub_db, ipnet):
self.plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet)
def delete_subnet(self, tenant_id, net_db, net_id):
self.plumlib.delete_subnet(tenant_id, net_db, net_id)
def create_port(self, port_db, router_db):
self.plumlib.create_port(port_db, router_db)
def update_port(self, port_db, router_db):
self.plumlib.update_port(port_db, router_db)
def delete_port(self, port_db, router_db):
self.plumlib.delete_port(port_db, router_db)
def create_router(self, tenant_id, router_db):
self.plumlib.create_router(tenant_id, router_db)
def update_router(self, router_db, router_id):
self.plumlib.update_router(router_db, router_id)
def delete_router(self, tenant_id, router_id):
self.plumlib.delete_router(tenant_id, router_id)
def add_router_interface(self, tenant_id, router_id, port_db, ipnet):
self.plumlib.add_router_interface(tenant_id, router_id, port_db, ipnet)
def remove_router_interface(self, tenant_id, net_id, router_id):
self.plumlib.remove_router_interface(tenant_id, net_id, router_id)
def create_floatingip(self, floating_ip):
self.plumlib.create_floatingip(floating_ip)
def update_floatingip(self, floating_ip_orig, floating_ip, id):
self.plumlib.update_floatingip(floating_ip_orig, floating_ip, id)
def delete_floatingip(self, floating_ip_orig, id):
self.plumlib.delete_floatingip(floating_ip_orig, id)
def disassociate_floatingips(self, floating_ip, port_id):
self.plumlib.disassociate_floatingips(floating_ip, port_id)
|
{
"content_hash": "907a5ba5af5a301c13c855ccac490ba3",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 37.096385542168676,
"alnum_prop": 0.6576810652809354,
"repo_name": "vbannai/neutron",
"id": "8e2607e857bfb62fb25c029bca2b79d6bf5b1669",
"size": "3815",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/plugins/plumgrid/drivers/plumlib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Python",
"bytes": "9344274"
},
{
"name": "Shell",
"bytes": "9202"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
"""Helpers functions to run log-running tasks."""
from web import utils
from web import webapi as web
def background(func):
"""A function decorator to run a long-running function as a background thread."""
def internal(*a, **kw):
web.data() # cache it
tmpctx = web._context[threading.currentThread()]
web._context[threading.currentThread()] = utils.storage(web.ctx.copy())
def newfunc():
web._context[threading.currentThread()] = tmpctx
func(*a, **kw)
myctx = web._context[threading.currentThread()]
for k in myctx.keys():
if k not in ['status', 'headers', 'output']:
try: del myctx[k]
except KeyError: pass
t = threading.Thread(target=newfunc)
background.threaddb[id(t)] = t
t.start()
web.ctx.headers = []
return seeother(changequery(_t=id(t)))
return internal
background.threaddb = {}
def backgrounder(func):
def internal(*a, **kw):
i = web.input(_method='get')
if '_t' in i:
try:
t = background.threaddb[int(i._t)]
except KeyError:
return web.notfound()
web._context[threading.currentThread()] = web._context[t]
return
else:
return func(*a, **kw)
return internal
|
{
"content_hash": "3bc361dd888c8b66b1922e2c328c221d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 85,
"avg_line_length": 32.372093023255815,
"alnum_prop": 0.5510057471264368,
"repo_name": "minixalpha/SourceLearning",
"id": "8dee9d227356d24f3c6ccee0f7512f5da384b72a",
"size": "1392",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webpy/src/experimental/background.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20027393"
},
{
"name": "C++",
"bytes": "57313"
},
{
"name": "CSS",
"bytes": "28657"
},
{
"name": "DTrace",
"bytes": "964"
},
{
"name": "Emacs Lisp",
"bytes": "8358"
},
{
"name": "Groff",
"bytes": "77746"
},
{
"name": "HTML",
"bytes": "19021709"
},
{
"name": "JavaScript",
"bytes": "741"
},
{
"name": "Makefile",
"bytes": "159157"
},
{
"name": "Perl",
"bytes": "48637"
},
{
"name": "Python",
"bytes": "1265776"
},
{
"name": "Shell",
"bytes": "382380"
},
{
"name": "TeX",
"bytes": "29736"
},
{
"name": "VimL",
"bytes": "5813"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: skippy
callback_type: stdout
requirements:
- set as main display callback
short_description: Ansible screen output that ignores skipped status
version_added: "2.0"
extends_documentation_fragment:
- default_callback
description:
- This callback does the same as the default except it does not output skipped host/task/item status
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'skippy'
def v2_runner_on_skipped(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
|
{
"content_hash": "a04d706a83ebe7ce5bf7c6000bab8176",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 108,
"avg_line_length": 28.142857142857142,
"alnum_prop": 0.7025380710659899,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "92076cc146efd46c0fadb6094123eca483df7ac5",
"size": "1196",
"binary": false,
"copies": "59",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/callback/skippy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
import ctypes
import os
import time
from urllib.request import urlopen, urlretrieve
from xml.etree import ElementTree
from gi.repository import Gio
BING_MARKET = "zh-cn"
SCREEN_RESOLUTION = "UHD"
GALLERY_FOLDER_PATH = os.path.join(os.environ["HOME"], "Pictures/Bing Gallery")
WAITING_TIME_WHEN_SUCCESSFUL = 600
WAITING_TIME_WHEN_UNSUCCESSFUL = 60
def validate_internet_connection(server_URL="https://www.bing.com"):
try:
# Re-read the resolver configuration file
# http://stackoverflow.com/questions/21356781
libc = ctypes.cdll.LoadLibrary("libc.so.6")
libc.__res_init() # pylint: disable=protected-access
with urlopen(server_URL) as _:
return True
except:
return False
extract_urlBase = lambda file_name: file_name.split(".")[0].split("_")[1]
def delete_redundant_files(folder_path):
previous_urlBase_list = []
file_name_list = sorted(os.listdir(folder_path),
key=lambda file_name: int(file_name.split("_")[0]))
for file_name in file_name_list:
urlBase = extract_urlBase(file_name)
if urlBase in previous_urlBase_list:
os.remove(os.path.join(folder_path, file_name))
else:
previous_urlBase_list.append(urlBase)
return previous_urlBase_list
def retrieve_image_detail():
image_detail_list = []
market_argument = "" if BING_MARKET is None else "&mkt={}".format(
BING_MARKET)
for idx in [-1, 100]:
# Compose the query URL
query_URL = "https://www.bing.com/HPImageArchive.aspx?format=xml&idx={}&n=100{}".format(
idx, market_argument)
# Fetch the image metadata
with urlopen(query_URL) as query_connection:
image_metadata_list = ElementTree.parse(
query_connection).getroot().findall("image")
# Get the image detail
for image_metadata in image_metadata_list:
image_name = "{}_{}.jpg".format(
image_metadata.find("startdate").text,
image_metadata.find("urlBase").text.split("/")[-1].split(
"_")[0].split(".")[-1])
image_URL = "https://www.bing.com{}_{}.jpg".format(
image_metadata.find("urlBase").text, SCREEN_RESOLUTION)
# Append the image detail
image_detail = (image_name, image_URL)
if image_detail not in image_detail_list:
image_detail_list.append(image_detail)
return image_detail_list
def change_background(image_path):
def _format_file_path(file_path):
return "file://{}".format(file_path)
def _change_setting(schema, key, value):
setting_object = Gio.Settings.new(schema)
setting_object.set_string(key, value)
setting_object.apply()
_change_setting("org.gnome.desktop.background", "picture-uri",
_format_file_path(image_path))
def run():
# Create the parent directory
if not os.path.isdir(GALLERY_FOLDER_PATH):
os.makedirs(GALLERY_FOLDER_PATH)
while True:
try:
# Validate internet connection
assert validate_internet_connection()
# Delete redundant files
previous_urlBase_list = delete_redundant_files(GALLERY_FOLDER_PATH)
# Iterate over image detail
for image_index, (image_name,
image_URL) in enumerate(retrieve_image_detail()):
# Download the image if necessary
urlBase = extract_urlBase(image_name)
image_path = os.path.join(GALLERY_FOLDER_PATH, image_name)
if urlBase not in previous_urlBase_list and not os.path.isfile(
image_path):
urlretrieve(image_URL, image_path)
assert os.path.isfile(image_path)
# Set today's photo as the background
if image_index == 0:
change_background(image_path)
waiting_time = WAITING_TIME_WHEN_SUCCESSFUL
except:
waiting_time = WAITING_TIME_WHEN_UNSUCCESSFUL
finally:
time.sleep(waiting_time)
if __name__ == "__main__":
run()
|
{
"content_hash": "68e3c5053ccbcb3ca52c70974ebb809c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 96,
"avg_line_length": 34.28,
"alnum_prop": 0.5918319719953326,
"repo_name": "nixingyang/Miscellaneous-Projects",
"id": "9a092c2918e0a417d982f25afd01c44f5ac65a30",
"size": "4285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Bing Photo of the Day/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53957"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20160624_1922'),
]
operations = [
migrations.AddField(
model_name='status_jiraissue',
name='status_particulars_id',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='status_jiraissue',
name='status_type',
field=models.TextField(),
),
]
|
{
"content_hash": "827bdc40aed9f3060ae26e745dde7a9c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 57,
"avg_line_length": 24.791666666666668,
"alnum_prop": 0.5798319327731093,
"repo_name": "sunnytambi/QuickScrum",
"id": "3d1dd198ef632b5713e112fb546c7cafee28d11f",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QuickScrum/app/migrations/0003_auto_20160627_1833.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5598"
},
{
"name": "HTML",
"bytes": "32727"
},
{
"name": "JavaScript",
"bytes": "16910"
},
{
"name": "Python",
"bytes": "265734"
}
],
"symlink_target": ""
}
|
"""Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self, dtype):
# Includes two entries with the form [1, 1, x] : 150.
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 0],
[1, 1, 1], [1, 1, 2], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
@test_util.run_deprecated_v1
def testInt32(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
@test_util.run_deprecated_v1
def testInt64(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
@test_util.run_deprecated_v1
def testHigherRank(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_2x3x4(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103),
(1, 1, 149), (1, 1, 150), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return indices, values
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
return (sparse_tensor.SparseTensor.from_value(indices),
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def _AssertResultsNotSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
self.assertAllEqual(output.values, [-3, 4, 1, 9, 5, 1])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def testInt32AndFloat32(self):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with self.session(use_gpu=False) as sess:
for indices in (indices_v,
sparse_tensor.SparseTensor.from_value(indices_v)):
for values in (values_v,
sparse_tensor.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat32(self):
vocab_size = 50
with self.session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = 50
with self.session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt32AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int32, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat64NonCanonicalOrder(self):
vocab_size = 50
vocab_size_tensor = constant_op.constant(vocab_size, dtypes.int64)
with self.session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size_tensor, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testShouldSetLastDimensionInDynamicShape(self):
with ops.Graph().as_default():
shape = constant_op.constant([2, 2], dtype=dtypes.int64)
dynamic_shape = array_ops.placeholder_with_default(shape, shape=[2])
ids = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[1, 3],
dense_shape=dynamic_shape)
values = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[0.4, 0.7],
dense_shape=dynamic_shape)
merged = sparse_ops.sparse_merge(
sp_ids=ids, sp_values=values, vocab_size=5)
self.assertEqual(5, merged.get_shape()[1])
class SparseMergeHighDimTest(test_util.TensorFlowTestCase):
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices0 = np.array([0, 13, 10, 33, 32, 14])
indices1 = np.array([12, 4, 0, 0, 1, 30])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices0 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices0, indices_dtype), np.array(shape, np.int64))
indices1 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices1, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return ([sparse_tensor.SparseTensor.from_value(indices0),
sparse_tensor.SparseTensor.from_value(indices1)],
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(
output.indices,
[[0, 0, 12], [1, 10, 0], [1, 13, 4], [1, 14, 30], [2, 32, 1],
[2, 33, 0]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3] + vocab_size)
def testInt64AndFloat32(self):
vocab_size = [50, 31]
with self.session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = [50, 31]
with self.session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64Shape(self):
vocab_size = [50, 30]
with self.session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
@test_util.run_deprecated_v1
def testBasic(self):
with self.session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.dense_shape, [5, 6])
def testRetainNone(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.dense_shape, [5, 6])
def testMismatchedRetainShape(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseResetShapeTest(test_util.TensorFlowTestCase):
_IND_2_5_6 = np.array(
[[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]],
dtype=np.int64)
_VAL_2_5_6 = np.array([0, 10, 13, 14, 32, 33], dtype=np.int32)
_SHP_2_5_6 = np.array([2, 5, 6], dtype=np.int64)
def _SparseTensor_2x5x6(self):
return sparse_tensor.SparseTensor(
constant_op.constant(self._IND_2_5_6, dtypes.int64),
constant_op.constant(self._VAL_2_5_6, dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensor_2x5x6_Empty(self):
return sparse_tensor.SparseTensor(
constant_op.constant(
np.empty(shape=[0, 3], dtype=np.int64), dtypes.int64),
constant_op.constant(np.empty(shape=[0], dtype=np.int32), dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensorValue_2x5x6(self):
return sparse_tensor.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
self._SHP_2_5_6)
@test_util.run_deprecated_v1
def testStaticShapeInfoPreservedWhenNewShapeIsProvidedAndStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
self.assertAllEqual([3, 6, 7], sp_output.get_shape())
@test_util.run_deprecated_v1
def testBasic(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
@test_util.run_deprecated_v1
def testInputUnavailableInGraphConstructionOk(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
@test_util.run_deprecated_v1
def testFeedInputUnavailableInGraphConstructionOk(self):
with self.session(use_gpu=False) as sess:
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output,
feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testTightBoundingBox(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [2, 4, 5])
def testTightBoundingBoxEmpty(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6_Empty()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices.shape, [0, 3])
self.assertAllEqual(output.values.shape, [0])
self.assertAllEqual(output.dense_shape, [0, 0, 0])
@test_util.run_deprecated_v1
def testInvalidRank(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7], dtype=np.int64)
with self.assertRaises(ValueError):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
@test_util.run_deprecated_v1
def testInvalidRankNewShapeUnavailableInGraphConstruction(self):
with self.session(use_gpu=False) as sess:
new_shape = array_ops.placeholder(dtype=dtypes.int64)
sp_input = self._SparseTensor_2x5x6()
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x == y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: np.array([3, 7], dtype=np.int64)})
@test_util.run_deprecated_v1
def testInvalidDimensionSizeStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7, 5], dtype=np.int64)
with self.assertRaisesRegexp(ValueError, "should have dimension sizes"):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
@test_util.run_deprecated_v1
def testInvalidDimensionSizeDynamic(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = array_ops.placeholder(dtype=dtypes.int32)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: [3, 7, 5]})
@test_util.run_deprecated_v1
def testInvalidDimensionSizeInputUnavailableInGraphConstruction(self):
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
with self.session(use_gpu=False) as sess:
new_shape = np.array([3, 7, 5], dtype=np.int64)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self, dtype=np.int32):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64), np.array(val, dtype), np.array(
shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
@test_util.run_deprecated_v1
def testFillNumber(self):
with self.session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
@test_util.run_deprecated_v1
def testFillFloat(self):
with self.session(use_gpu=False) as sess:
values = constant_op.constant(
[0.0, 10.0, 13.0, 14.0, 32.0, 33.0], dtype=dtypes.float64)
default_value = constant_op.constant(-1.0, dtype=dtypes.float64)
sp_input = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]),
values=values,
dense_shape=np.array([5, 6]))
sp_output, empty_row_indicator = (sparse_ops.sparse_fill_empty_rows(
sp_input, default_value))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4],
[2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllClose(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
values_grad_err = gradient_checker.compute_gradient_error(
values, values.shape.as_list(), sp_output.values, [8], delta=1e-8)
self.assertGreater(values_grad_err, 0)
self.assertLess(values_grad_err, 1e-8)
default_value_grad_err = gradient_checker.compute_gradient_error(
default_value,
default_value.shape.as_list(),
sp_output.values, [8],
delta=1e-8)
self.assertGreater(default_value_grad_err, 0)
self.assertLess(default_value_grad_err, 1e-8)
@test_util.run_deprecated_v1
def testFillString(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values,
[b"a", b"b", b"c", b"d", b"", b"e", b"f", b""])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
@test_util.run_deprecated_v1
def testNoEmptyRows(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.dense_shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
class SparseAddTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testValuesInVariable(self):
indices = constant_op.constant([[1]], dtype=dtypes.int64)
values = variables.Variable([1], trainable=False, dtype=dtypes.float32)
shape = constant_op.constant([1], dtype=dtypes.int64)
sp_input = sparse_tensor.SparseTensor(indices, values, shape)
sp_output = sparse_ops.sparse_add(sp_input, sp_input)
with self.session(use_gpu=False) as sess:
self.evaluate(variables.global_variables_initializer())
output = self.evaluate(sp_output)
self.assertAllEqual(output.values, [2])
class SparseReduceTest(test_util.TensorFlowTestCase):
# [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
ind = np.array([[0, 0], [0, 2], [1, 1]]).astype(np.int64)
vals = np.array([1, 1, 1]).astype(np.int32)
dense_shape = np.array([2, 3]).astype(np.int64)
def _compare(self, sp_t, reduction_axes, ndims, keep_dims, do_sum):
densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
np_ans = densified
if reduction_axes is None:
if do_sum:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
if do_sum:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, axis=ra, keepdims=keep_dims)
with self.cached_session():
if do_sum:
tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
keep_dims)
else:
tf_dense_ans = sparse_ops.sparse_reduce_max(sp_t, reduction_axes,
keep_dims)
out_dense = self.evaluate(tf_dense_ans)
if do_sum:
tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t,
reduction_axes,
keep_dims)
else:
tf_sparse_ans = sparse_ops.sparse_reduce_max_sparse(sp_t,
reduction_axes,
keep_dims)
# Convert to dense for comparison purposes.
out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans).eval()
self.assertAllClose(np_ans, out_dense)
self.assertAllClose(np_ans, out_sparse)
def _compare_all(self, sp_t, reduction_axes, ndims):
self._compare(sp_t, reduction_axes, ndims, False, False)
self._compare(sp_t, reduction_axes, ndims, False, True)
self._compare(sp_t, reduction_axes, ndims, True, False)
self._compare(sp_t, reduction_axes, ndims, True, True)
@test_util.run_deprecated_v1
def testSimpleAndRandomInputs(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.session(use_gpu=False):
self._compare_all(sp_t, None, ndims=2)
self._compare_all(sp_t, 0, ndims=2)
self._compare_all(sp_t, [1], ndims=2)
self._compare_all(sp_t, [0, 1], ndims=2)
self._compare_all(sp_t, [1, 0], ndims=2)
self._compare_all(sp_t, [-1], ndims=2)
self._compare_all(sp_t, [1, -2], ndims=2)
np.random.seed(1618)
test_dims = [(1618, 1, 11, 7, 1), (1,), (1, 1, 1)]
with self.session(use_gpu=False):
for dims in test_dims:
sp_t, unused_nnz = _sparsify(np.random.randn(*dims))
# reduce all using None
self._compare_all(sp_t, None, ndims=len(dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.session(use_gpu=False):
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_sum(sp_t, -3).eval()
with self.assertRaisesOpError("Invalid reduction dimension 2"):
sparse_ops.sparse_reduce_sum(sp_t, 2).eval()
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_max(sp_t, -3).eval()
with self.assertRaisesOpError("Invalid reduction dimension 2"):
sparse_ops.sparse_reduce_max(sp_t, 2).eval()
@test_util.run_deprecated_v1
def testGradient(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
np.random.seed(8161)
test_dims = [(11, 1, 5, 7, 1), (2, 2)]
with self.session(use_gpu=False):
for dims in test_dims:
sp_t, nnz = _sparsify(np.random.randn(*dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)
err = gradient_checker.compute_gradient_error(
sp_t.values, (nnz,), reduced,
self.evaluate(reduced).shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
err = gradient_checker.compute_gradient_error(
sp_t.values, (nnz,), reduced,
self.evaluate(reduced).shape)
self.assertLess(err, 1e-3)
def _testSparseReduceShape(self, sp_t, reduction_axes, ndims, keep_dims,
do_sum):
densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
np_op = np.sum
tf_op = sparse_ops.sparse_reduce_sum
if not do_sum:
np_op = np.max
tf_op = sparse_ops.sparse_reduce_max
np_ans = densified
if reduction_axes is None:
np_ans = np_op(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
np_ans = np_op(np_ans, axis=ra, keepdims=keep_dims)
tf_ans = tf_op(sp_t, reduction_axes, keep_dims)
self.assertAllEqual(np_ans.shape, tf_ans.get_shape().as_list())
def testSparseReduceSumOrMaxShape(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.session(use_gpu=False):
for do_sum in [True, False]:
for keep_dims in [True, False]:
self._testSparseReduceShape(sp_t, None, 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, 0, 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [1], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [0, 1], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [1, 0], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [-1], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [1, -2], 2, keep_dims, do_sum)
class SparseMathOpsTest(test_util.TensorFlowTestCase):
def _check(self, result_tensor, result_np, input_sp_t):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.dense_shape.eval(),
result_tensor.dense_shape.eval())
res_densified = sparse_ops.sparse_to_dense(result_tensor.indices,
result_tensor.dense_shape,
result_tensor.values).eval()
self.assertAllEqual(result_np, res_densified)
@test_util.run_deprecated_v1
def testCwiseShapeValidation(self):
# Test case for GitHub 24072.
with self.session(use_gpu=False):
a = array_ops.ones([3, 4, 1], dtype=dtypes.int32)
b = sparse_tensor.SparseTensor([[0, 0, 1, 0], [0, 0, 3, 0]], [10, 20],
[1, 1, 4, 2])
c = a * b
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"broadcasts dense to sparse only; got incompatible shapes"):
c.eval()
@test_util.run_deprecated_v1
def testCwiseDivAndMul(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.session(use_gpu=False):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
dense_t = constant_op.constant(dense_vals_np)
self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
# Check commutative.
self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t)
self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t)
if dtype in [np.int32, np.int64]:
res = sp_t / dense_t # should invoke "__truediv__"
self.assertEqual(res.values.eval().dtype, np.float64)
@test_util.run_deprecated_v1
def testCwiseAdd(self):
with self.session(use_gpu=False):
# Identity(2) + AllOnes(2,2). Should be equal to 2 * Identity(2).
indices = [[0, 0], [1, 1]]
vals = [1, 1]
shape = (2, 2)
sp_t = sparse_tensor.SparseTensor(indices, vals, shape)
dense_t = array_ops.ones(shape, dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
# Variant of above, but broadcasts the dense side.
dense_t = array_ops.ones([1], dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
@test_util.run_deprecated_v1
def testGradients(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.session(use_gpu=False):
for dtype in [np.float32, np.float64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, nnz = _sparsify(sp_vals_np, thresh=1.5)
dense_t = constant_op.constant(dense_vals_np)
cmul = sp_t * dense_t
err = gradient_checker.compute_gradient_error([sp_t.values, dense_t],
[(nnz,), dense_shape],
cmul.values, (nnz,))
self.assertLess(err, 1e-4)
cdiv = sp_t / dense_t
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
cdiv.values, (nnz,))
self.assertLess(err, 1e-4)
err = gradient_checker.compute_gradient_error(
dense_t,
dense_shape,
cdiv.values, (nnz,),
x_init_value=dense_vals_np)
self.assertLess(err, 2e-4)
class SparseSoftmaxTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testEquivalentToDensified(self):
np.random.seed(1618)
n, m = np.random.choice(20, size=2)
for dtype in [np.float32, np.float64]:
sp_vals_np = np.random.rand(n, m).astype(dtype)
batched_sp_t, unused_nnz1 = _sparsify(
sp_vals_np.reshape((1, n, m)), thresh=0.) # No masking.
with self.cached_session(use_gpu=False):
densified = constant_op.constant(sp_vals_np)
sp_result = sparse_ops.sparse_softmax(batched_sp_t).eval(
).values.reshape((n, m))
dense_result = nn_ops.softmax(densified)
self.assertAllClose(dense_result.eval(), sp_result)
@test_util.run_deprecated_v1
def testHigherRanks(self):
# For the first shape:
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
#
# The softmax results should be:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
#
# The second shape: same input data, but with a higher-rank shape.
shapes = [[2, 2, 2], [2, 1, 2, 2]]
for shape in shapes:
values = np.asarray(
[0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape)
sp_t, unused_nnz = _sparsify(values, thresh=1e-2)
expected_values = [1., 1., 1., .5, .5]
with self.cached_session(use_gpu=False):
result = sparse_ops.sparse_softmax(sp_t).eval()
self.assertAllEqual(expected_values, result.values)
self.assertAllEqual(sp_t.indices.eval(), result.indices)
self.assertAllEqual(shape, result.dense_shape)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [2, 5, 10]
with self.cached_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
x_np = np.random.randn(*x_shape).astype(dtype)
x_tf, nnz = _sparsify(x_np)
y_tf = sparse_ops.sparse_softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf.values, (nnz,),
y_tf.values, (nnz,))
self.assertLess(err, 1e-4)
class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def _assertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session(use_gpu=False):
# 1-D, values at index 0.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_one).eval()
self._assertSparseTensorValueEqual(sp_one.eval(), max_tf)
self._assertSparseTensorValueEqual(sp_zero.eval(), min_tf)
# Values at different indices.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_zero_2 = sparse_tensor.SparseTensor([[1]], [0], [7])
expected = sparse_tensor.SparseTensor([[0], [1]], [0, 0], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_zero_2).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_zero_2).eval()
self._assertSparseTensorValueEqual(expected.eval(), max_tf)
self._assertSparseTensorValueEqual(expected.eval(), min_tf)
@test_util.run_deprecated_v1
def testRandom(self):
np.random.seed(1618)
shapes = [(13,), (6, 8), (1, 7, 1)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
a_np = np.random.randn(*shape).astype(dtype)
b_np = np.random.randn(*shape).astype(dtype)
sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.cached_session(use_gpu=False):
maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
maximum_tf).eval()
minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
minimum_tf).eval()
a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
self.assertAllEqual(
np.minimum(a_densified, b_densified), minimum_tf_densified)
def testMismatchedShapes(self):
with self.session(use_gpu=False):
sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
class SparseTransposeTest(test.TestCase):
@test_util.run_deprecated_v1
def testTranspose(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
with self.session(use_gpu=False):
np.random.seed(1618)
shapes = [np.random.randint(1, 10, size=rank) for rank in range(1, 6)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float32, np.float64]:
dn_input = np.random.randn(*shape).astype(dtype)
rank = array_ops.rank(dn_input).eval()
perm = np.random.choice(rank, rank, False)
sp_input, unused_a_nnz = _sparsify(dn_input)
sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans).eval()
expected_trans = array_ops.transpose(dn_input, perm=perm).eval()
self.assertAllEqual(expected_trans.shape, sp_trans.get_shape())
self.assertAllEqual(dn_trans, expected_trans)
class SparsePlaceholderTest(test.TestCase):
@test_util.run_deprecated_v1
def testPlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(10, 47))
self.assertAllEqual([10, 47], foo.get_shape())
self.assertAllEqual([None, 2], foo.indices.get_shape().as_list())
@test_util.run_deprecated_v1
def testPartialShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(None, 47))
self.assertAllEqual([None, None], foo.get_shape().as_list())
self.assertAllEqual([None, 2], foo.indices.get_shape().as_list())
@test_util.run_deprecated_v1
def testNoShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=None)
self.assertAllEqual(None, foo.get_shape())
self.assertAllEqual([None, None], foo.indices.get_shape().as_list())
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "b5fc5173e2450055d7605430400afa71",
"timestamp": "",
"source": "github",
"line_count": 1054,
"max_line_length": 80,
"avg_line_length": 40.70967741935484,
"alnum_prop": 0.6191619278456232,
"repo_name": "asimshankar/tensorflow",
"id": "7598991489ce6019352e19cb6c50819d91085b0d",
"size": "43597",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/sparse_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "490070"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "52677142"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39454"
},
{
"name": "Go",
"bytes": "1290930"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "890529"
},
{
"name": "Jupyter Notebook",
"bytes": "2618412"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "68402"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102518"
},
{
"name": "PHP",
"bytes": "5172"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43038983"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "497659"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
"""This code example gets all premium rates.
To create premium rates, run create_premium_rates.py.
Tags: PremiumRateService.getPremiumRatesByStatement
"""
__author__ = 'Nicholas Chen'
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
premium_rate_service = client.GetService('PremiumRateService',
version='v201411')
# Create a filter statement.
statement = dfp.FilterStatement('ORDER BY id ASC')
# Get premium rates by statement.
while True:
response = premium_rate_service.getPremiumRatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for premium_rate in response['results']:
print ('Premium rate with ID \'%s\' of type \'%s\' assigned to '
' rate card with ID \'%s\' was found.\n' % (
premium_rate['id'],
premium_rate['premiumFeature']['PremiumFeature.Type'],
premium_rate['rateCardId']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
{
"content_hash": "0c78604000d14d104a145707e95322cf",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 73,
"avg_line_length": 30.727272727272727,
"alnum_prop": 0.6420118343195266,
"repo_name": "coxmediagroup/googleads-python-lib",
"id": "f1cf5d7c67a7bc34934dd6f794eda086cbd9ad07",
"size": "1970",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201411/premium_rate_service/get_all_premium_rates.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2535137"
}
],
"symlink_target": ""
}
|
import numpy as np
import copy
from ..io.pick import pick_types
from ..viz.decoding import plot_gat_matrix, plot_gat_times
from ..parallel import parallel_func, check_n_jobs
class _DecodingTime(dict):
"""A dictionary to configure the training times that has the following keys:
'slices' : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
'start' : float
Time at which to start decoding (in seconds).
Defaults to min(epochs.times).
'stop' : float
Maximal time at which to stop decoding (in seconds).
Defaults to max(times).
'step' : float
Duration separating the start of subsequent classifiers (in
seconds). Defaults to one time sample.
'length' : float
Duration of each classifier (in seconds). Defaults to one time sample.
If None, empty dict. """
def __repr__(self):
s = ""
if "start" in self:
s += "start: %0.3f (s)" % (self["start"])
if "stop" in self:
s += ", stop: %0.3f (s)" % (self["stop"])
if "step" in self:
s += ", step: %0.3f (s)" % (self["step"])
if "length" in self:
s += ", length: %0.3f (s)" % (self["length"])
if "slices" in self:
# identify depth: training times only contains n_time but
# testing_times can contain n_times or n_times * m_times
depth = [len(ii) for ii in self["slices"]]
if len(np.unique(depth)) == 1: # if all slices have same depth
if depth[0] == 1: # if depth is one
s += ", n_time_windows: %s" % (len(depth))
else:
s += ", n_time_windows: %s x %s" % (len(depth), depth[0])
else:
s += (", n_time_windows: %s x [%s, %s]" %
(len(depth),
min([len(ii) for ii in depth]),
max(([len(ii) for ii in depth]))))
return "<DecodingTime | %s>" % s
class _GeneralizationAcrossTime(object):
""" see GeneralizationAcrossTime
""" # noqa
def __init__(self, picks=None, cv=5, clf=None, train_times=None,
test_times=None, predict_mode='cross-validation', scorer=None,
n_jobs=1):
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
# Store parameters in object
self.cv = cv
# Define training sliding window
self.train_times = (_DecodingTime() if train_times is None
else _DecodingTime(train_times))
# Define testing sliding window. If None, will be set in predict()
if test_times is None:
self.test_times = _DecodingTime()
elif test_times == 'diagonal':
self.test_times = 'diagonal'
else:
self.test_times = _DecodingTime(test_times)
# Default classification pipeline
if clf is None:
scaler = StandardScaler()
estimator = LogisticRegression()
clf = Pipeline([('scaler', scaler), ('estimator', estimator)])
self.clf = clf
self.predict_mode = predict_mode
self.scorer = scorer
self.picks = picks
self.n_jobs = n_jobs
def fit(self, epochs, y=None):
""" Train a classifier on each specified time slice.
Note. This function sets the ``picks_``, ``ch_names``, ``cv_``,
``y_train``, ``train_times_`` and ``estimators_`` attributes.
Parameters
----------
epochs : instance of Epochs
The epochs.
y : list or ndarray of int, shape (n_samples,) or None, optional
To-be-fitted model values. If None, y = epochs.events[:, 2].
Returns
-------
self : GeneralizationAcrossTime
Returns fitted GeneralizationAcrossTime object.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
from sklearn.base import clone
from sklearn.cross_validation import check_cv, StratifiedKFold
# clean attributes
for att in ['picks_', 'ch_names', 'y_train_', 'cv_', 'train_times_',
'estimators_', 'test_times_', 'y_pred_', 'y_true_',
'scores_', 'scorer_']:
if hasattr(self, att):
delattr(self, att)
n_jobs = self.n_jobs
# Extract data from MNE structure
X, y, self.picks_ = _check_epochs_input(epochs, y, self.picks)
self.ch_names = [epochs.ch_names[p] for p in self.picks_]
cv = self.cv
if isinstance(cv, (int, np.int)):
cv = StratifiedKFold(y, cv)
cv = check_cv(cv, X, y, classifier=True)
self.cv_ = cv # update CV
self.y_train_ = y
# Cross validation scheme
# XXX Cross validation should later be transformed into a make_cv, and
# defined in __init__
self.train_times_ = copy.deepcopy(self.train_times)
if 'slices' not in self.train_times_:
self.train_times_ = _sliding_window(epochs.times, self.train_times)
# Parallel across training time
parallel, p_time_gen, n_jobs = parallel_func(_fit_slices, n_jobs)
n_chunks = min(X.shape[2], n_jobs)
splits = np.array_split(self.train_times_['slices'], n_chunks)
def f(x):
return np.unique(np.concatenate(x))
out = parallel(p_time_gen(clone(self.clf),
X[..., f(train_slices_chunk)],
y, train_slices_chunk, cv)
for train_slices_chunk in splits)
# Unpack estimators into time slices X folds list of lists.
self.estimators_ = sum(out, list())
return self
def predict(self, epochs):
""" Test each classifier on each specified testing time slice.
.. note:: This function sets the ``y_pred_`` and ``test_times_``
attributes.
Parameters
----------
epochs : instance of Epochs
The epochs. Can be similar to fitted epochs or not. See
predict_mode parameter.
Returns
-------
y_pred : list of lists of arrays of floats, shape (n_train_t, n_test_t, n_epochs, n_prediction_dims)
The single-trial predictions at each training time and each testing
time. Note that the number of testing times per training time need
not be regular; else
``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs)``.
""" # noqa
# Check that at least one classifier has been trained
if not hasattr(self, 'estimators_'):
raise RuntimeError('Please fit models before trying to predict')
# clean attributes
for att in ['y_pred_', 'test_times_', 'scores_', 'scorer_', 'y_true_']:
if hasattr(self, att):
delattr(self, att)
n_jobs = self.n_jobs
X, y, _ = _check_epochs_input(epochs, None, self.picks_)
# Define testing sliding window
if self.test_times == 'diagonal':
test_times = _DecodingTime()
test_times['slices'] = [[s] for s in self.train_times_['slices']]
test_times['times'] = [[s] for s in self.train_times_['times']]
elif isinstance(self.test_times, dict):
test_times = copy.deepcopy(self.test_times)
else:
raise ValueError('`test_times` must be a dict or "diagonal"')
if 'slices' not in test_times:
# Check that same number of time sample in testing than in training
# (otherwise it won 't be the same number of features')
if 'length' not in test_times:
test_times['length'] = self.train_times_['length']
if test_times['length'] != self.train_times_['length']:
raise ValueError('`train_times` and `test_times` must have '
'identical `length` keys')
# Make a sliding window for each training time.
slices_list = list()
times_list = list()
for t in range(0, len(self.train_times_['slices'])):
test_times_ = _sliding_window(epochs.times, test_times)
times_list += [test_times_['times']]
slices_list += [test_times_['slices']]
test_times = test_times_
test_times['slices'] = slices_list
test_times['times'] = times_list
# Store all testing times parameters
self.test_times_ = test_times
# Prepare parallel predictions
parallel, p_time_gen, _ = parallel_func(_predict_time_loop, n_jobs)
# Loop across estimators (i.e. training times)
self.y_pred_ = parallel(p_time_gen(X, self.estimators_[t_train],
self.cv_, slices, self.predict_mode)
for t_train, slices in
enumerate(self.test_times_['slices']))
return self.y_pred_
def score(self, epochs=None, y=None):
"""Score Epochs
Estimate scores across trials by comparing the prediction estimated for
each trial to its true value.
Calls ``predict()`` if it has not been already.
Note. The function updates the ``scorer_``, ``scores_``, and
``y_true_`` attributes.
Parameters
----------
epochs : instance of Epochs | None, optional
The epochs. Can be similar to fitted epochs or not.
If None, it needs to rely on the predictions ``y_pred_``
generated with ``predict()``.
y : list | ndarray, shape (n_epochs,) | None, optional
True values to be compared with the predictions ``y_pred_``
generated with ``predict()`` via ``scorer_``.
If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
Returns
-------
scores : list of lists of float
The scores estimated by ``scorer_`` at each training time and each
testing time (e.g. mean accuracy of ``predict(X)``). Note that the
number of testing times per training time need not be regular;
else, np.shape(scores) = (n_train_time, n_test_time).
"""
from sklearn.metrics import accuracy_score
# Run predictions if not already done
if epochs is not None:
self.predict(epochs)
else:
if not hasattr(self, 'y_pred_'):
raise RuntimeError('Please predict() epochs first or pass '
'epochs to score()')
# clean gat.score() attributes
for att in ['scores_', 'scorer_', 'y_true_']:
if hasattr(self, att):
delattr(self, att)
# Check scorer
# XXX Need API to identify proper scorer from the clf
self.scorer_ = accuracy_score if self.scorer is None else self.scorer
# If no regressor is passed, use default epochs events
if y is None:
if self.predict_mode == 'cross-validation':
y = self.y_train_
else:
if epochs is not None:
y = epochs.events[:, 2]
else:
raise RuntimeError('y is undefined because '
'predict_mode="mean-prediction" and '
'epochs are missing. You need to '
'explicitly specify y.')
if not np.all(np.unique(y) == np.unique(self.y_train_)):
raise ValueError('Classes (y) passed differ from classes used '
'for training. Please explicitly pass your y '
'for scoring.')
elif isinstance(y, list):
y = np.array(y)
self.y_true_ = y # to be compared with y_pred for scoring
# Preprocessing for parallelization:
n_jobs = min(len(self.y_pred_[0][0]), check_n_jobs(self.n_jobs))
parallel, p_time_gen, n_jobs = parallel_func(_score_loop, n_jobs)
# Score each training and testing time point
scores = parallel(p_time_gen(self.y_true_, self.y_pred_[t_train],
slices, self.scorer_)
for t_train, slices
in enumerate(self.test_times_['slices']))
self.scores_ = scores
return scores
def _predict_time_loop(X, estimators, cv, slices, predict_mode):
"""Aux function of GeneralizationAcrossTime
Run classifiers predictions loop across time samples.
Parameters
----------
X : ndarray, shape (n_epochs, n_features, n_times)
To-be-fitted data.
estimators : array-like, shape (n_times, n_folds)
Array of scikit-learn classifiers fitted in cross-validation.
slices : list
List of slices selecting data from X from which is prediction is
generated.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
'cross-validation' : estimates a single prediction per sample based
on the unique independent classifier fitted in the cross-
validation.
'mean-prediction' : estimates k predictions per sample, based on
each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Default: 'cross-validation'
"""
n_epochs = len(X)
# Loop across testing slices
y_pred = [list() for _ in range(len(slices))]
# XXX EHN: This loop should be parallelized in a similar way to fit()
for t, indices in enumerate(slices):
# Flatten features in case of multiple time samples
Xtrain = X[:, :, indices].reshape(
n_epochs, np.prod(X[:, :, indices].shape[1:]))
# Single trial predictions
if predict_mode == 'cross-validation':
# If predict within cross validation, only predict with
# corresponding classifier, else predict with each fold's
# classifier and average prediction.
# Check that training cv and predicting cv match
if (len(estimators) != cv.n_folds) or (cv.n != Xtrain.shape[0]):
raise ValueError(
'When `predict_mode = "cross-validation"`, the training '
'and predicting cv schemes must be identical.')
for k, (train, test) in enumerate(cv):
# XXX I didn't manage to initialize correctly this array, as
# its size depends on the the type of predictor and the
# number of class.
if k == 0:
y_pred_ = _predict(Xtrain[test, :], estimators[k:k + 1])
y_pred[t] = np.empty((n_epochs, y_pred_.shape[1]))
y_pred[t][test, :] = y_pred_
y_pred[t][test, :] = _predict(Xtrain[test, :],
estimators[k:k + 1])
elif predict_mode == 'mean-prediction':
y_pred[t] = _predict(Xtrain, estimators)
else:
raise ValueError('`predict_mode` must be a str, "mean-prediction"'
' or "cross-validation"')
return y_pred
def _score_loop(y_true, y_pred, slices, scorer):
n_time = len(slices)
# Loop across testing times
scores = [0] * n_time
for t, indices in enumerate(slices):
# Scores across trials
scores[t] = scorer(y_true, y_pred[t])
return scores
def _check_epochs_input(epochs, y, picks=None):
"""Aux function of GeneralizationAcrossTime
Format MNE data into scikit-learn X and y
Parameters
----------
epochs : instance of Epochs
The epochs.
y : ndarray shape (n_epochs) | list shape (n_epochs) | None
To-be-fitted model. If y is None, y == epochs.events.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
Returns
-------
X : ndarray, shape (n_epochs, n_selected_chans, n_times)
To-be-fitted data.
y : ndarray, shape (n_epochs,)
To-be-fitted model.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
"""
if y is None:
y = epochs.events[:, 2]
elif isinstance(y, list):
y = np.array(y)
# Convert MNE data into trials x features x time matrix
X = epochs.get_data()
# Pick channels
if picks is None: # just use good data channels
picks = pick_types(epochs.info, meg=True, eeg=True, seeg=True,
eog=False, ecg=False, misc=False, stim=False,
ref_meg=False, exclude='bads')
if isinstance(picks, (list, np.ndarray)):
picks = np.array(picks, dtype=np.int)
else:
raise ValueError('picks must be a list or a numpy.ndarray of int')
X = X[:, picks, :]
# Check data sets
assert X.shape[0] == y.shape[0]
return X, y, picks
def _fit_slices(clf, x_chunk, y, slices, cv):
"""Aux function of GeneralizationAcrossTime
Fit each classifier.
Parameters
----------
clf : scikit-learn classifier
The classifier object.
x_chunk : ndarray, shape (n_epochs, n_features, n_times)
To-be-fitted data.
y : list | array, shape (n_epochs,)
To-be-fitted model.
slices : list | array, shape (n_training_slice,)
List of training slices, indicating time sample relative to X
cv : scikit-learn cross-validation generator
A cross-validation generator to use.
Returns
-------
estimators : list of lists of estimators
List of fitted scikit-learn classifiers corresponding to each training
slice.
"""
from sklearn.base import clone
# Initialize
n_epochs = len(x_chunk)
estimators = list()
# Identify the time samples of X_chunck corresponding to X
values = np.unique(np.concatenate(slices))
indices = range(len(values))
# Loop across time slices
for t_slice in slices:
# Translate absolute time samples into time sample relative to x_chunk
for ii in indices:
t_slice[t_slice == values[ii]] = indices[ii]
# Select slice
X = x_chunk[..., t_slice]
# Reshape data matrix to flatten features in case of multiple time
# samples.
X = X.reshape(n_epochs, np.prod(X.shape[1:]))
# Loop across folds
estimators_ = list()
for fold, (train, test) in enumerate(cv):
# Fit classifier
clf_ = clone(clf)
clf_.fit(X[train, :], y[train])
estimators_.append(clf_)
# Store classifier
estimators.append(estimators_)
return estimators
def _sliding_window(times, window_params):
"""Aux function of GeneralizationAcrossTime
Define the slices on which to train each classifier.
Parameters
----------
times : ndarray, shape (n_times,)
Array of times from MNE epochs.
window_params : dict keys: ('start', 'stop', 'step', 'length')
Either train or test times. See GAT documentation.
Returns
-------
time_pick : list
List of training slices, indicating for each classifier the time
sample (in indices of times) to be fitted on.
"""
window_params = _DecodingTime(window_params)
# Sampling frequency as int
freq = (times[-1] - times[0]) / len(times)
# Default values
if ('slices' in window_params and
all(k in window_params for k in
('start', 'stop', 'step', 'length'))):
time_pick = window_params['slices']
else:
if 'start' not in window_params:
window_params['start'] = times[0]
if 'stop' not in window_params:
window_params['stop'] = times[-1]
if 'step' not in window_params:
window_params['step'] = freq
if 'length' not in window_params:
window_params['length'] = freq
if (window_params['start'] < times[0] or
window_params['start'] > times[-1]):
raise ValueError(
'`start` (%.2f s) outside time range [%.2f, %.2f].' % (
window_params['start'], times[0], times[-1]))
if (window_params['stop'] < times[0] or
window_params['stop'] > times[-1]):
raise ValueError(
'`stop` (%.2f s) outside time range [%.2f, %.2f].' % (
window_params['stop'], times[0], times[-1]))
if window_params['step'] < freq:
raise ValueError('`step` must be >= 1 / sampling_frequency')
if window_params['length'] < freq:
raise ValueError('`length` must be >= 1 / sampling_frequency')
if window_params['length'] > np.ptp(times):
raise ValueError('`length` must be <= time range')
# Convert seconds to index
def find_time_idx(t): # find closest time point
return np.argmin(np.abs(np.asarray(times) - t))
start = find_time_idx(window_params['start'])
stop = find_time_idx(window_params['stop'])
step = int(round(window_params['step'] / freq))
length = int(round(window_params['length'] / freq))
# For each training slice, give time samples to be included
time_pick = [range(start, start + length)]
while (time_pick[-1][0] + step) <= (stop - length + 1):
start = time_pick[-1][0] + step
time_pick.append(range(start, start + length))
window_params['slices'] = time_pick
# Keep last training times in milliseconds
t_inds_ = [t[-1] for t in window_params['slices']]
window_params['times'] = times[t_inds_]
return window_params
def _predict(X, estimators):
"""Aux function of GeneralizationAcrossTime
Predict each classifier. If multiple classifiers are passed, average
prediction across all classifiers to result in a single prediction per
classifier.
Parameters
----------
estimators : ndarray, shape (n_folds,) | shape (1,)
Array of scikit-learn classifiers to predict data.
X : ndarray, shape (n_epochs, n_features, n_times)
To-be-predicted data
Returns
-------
y_pred : ndarray, shape (n_epochs, m_prediction_dimensions)
Classifier's prediction for each trial.
"""
from scipy import stats
from sklearn.base import is_classifier
# Initialize results:
n_epochs = X.shape[0]
n_clf = len(estimators)
# Compute prediction for each sub-estimator (i.e. per fold)
# if independent, estimators = all folds
for fold, clf in enumerate(estimators):
_y_pred = clf.predict(X)
# initialize predict_results array
if fold == 0:
predict_size = _y_pred.shape[1] if _y_pred.ndim > 1 else 1
y_pred = np.ones((n_epochs, predict_size, n_clf))
if predict_size == 1:
y_pred[:, 0, fold] = _y_pred
else:
y_pred[:, :, fold] = _y_pred
# Collapse y_pred across folds if necessary (i.e. if independent)
if fold > 0:
# XXX need API to identify how multiple predictions can be combined?
if is_classifier(clf):
y_pred, _ = stats.mode(y_pred, axis=2)
else:
y_pred = np.mean(y_pred, axis=2)
# Format shape
y_pred = y_pred.reshape((n_epochs, predict_size))
return y_pred
class GeneralizationAcrossTime(_GeneralizationAcrossTime):
"""Generalize across time and conditions
Creates and estimator object used to 1) fit a series of classifiers on
multidimensional time-resolved data, and 2) test the ability of each
classifier to generalize across other time samples.
Parameters
----------
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
cv : int | object
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
scikit-learn.cross_validation module for the list of possible objects.
Defaults to 5.
clf : object | None
An estimator compliant with the scikit-learn API (fit & predict).
If None the classifier will be a standard pipeline including
StandardScaler and LogisticRegression with default parameters.
train_times : dict | None
A dictionary to configure the training times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``start`` : float
Time at which to start decoding (in seconds).
Defaults to min(epochs.times).
``stop`` : float
Maximal time at which to stop decoding (in seconds).
Defaults to max(times).
``step`` : float
Duration separating the start of subsequent classifiers (in
seconds). Defaults to one time sample.
``length`` : float
Duration of each classifier (in seconds).
Defaults to one time sample.
If None, empty dict.
test_times : 'diagonal' | dict | None, optional
Configures the testing times.
If set to 'diagonal', predictions are made at the time at which
each classifier is trained.
If set to None, predictions are made at all time points.
If set to dict, the dict should contain ``slices`` or be contructed in
a similar way to train_times::
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
If None, empty dict.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
``cross-validation`` : estimates a single prediction per sample
based on the unique independent classifier fitted in the
cross-validation.
``mean-prediction`` : estimates k predictions per sample, based on
each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Default: 'cross-validation'
scorer : object | None
scikit-learn Scorer instance. If None, set to accuracy_score.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
Attributes
----------
picks_ : array-like of int | None
The channels indices to include.
ch_names : list, array-like, shape (n_channels,)
Names of the channels used for training.
y_train_ : list | ndarray, shape (n_samples,)
The categories used for training.
train_times_ : dict
A dictionary that configures the training times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``times`` : ndarray, shape (n_clfs,)
The training times (in seconds).
test_times_ : dict
A dictionary that configures the testing times for each training time:
``slices`` : ndarray, shape (n_clfs, n_testing_times)
Array of time slices (in indices) used for each classifier.
``times`` : ndarray, shape (n_clfs, n_testing_times)
The testing times (in seconds) for each training time.
cv_ : CrossValidation object
The actual CrossValidation input depending on y.
estimators_ : list of list of scikit-learn.base.BaseEstimator subclasses.
The estimators for each time point and each fold.
y_pred_ : list of lists of arrays of floats, shape (n_train_times, n_test_times, n_epochs, n_prediction_dims)
The single-trial predictions estimated by self.predict() at each
training time and each testing time. Note that the number of testing
times per training time need not be regular, else
``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs).``
y_true_ : list | ndarray, shape (n_samples,)
The categories used for scoring ``y_pred_``.
scorer_ : object
scikit-learn Scorer instance.
scores_ : list of lists of float
The scores estimated by ``self.scorer_`` at each training time and each
testing time (e.g. mean accuracy of self.predict(X)). Note that the
number of testing times per training time need not be regular;
else, ``np.shape(scores) = (n_train_time, n_test_time)``.
See Also
--------
TimeDecoding
Notes
-----
The function implements the method used in:
Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
and Stanislas Dehaene, "Two distinct dynamic modes subtend the
detection of unexpected sounds", PLoS ONE, 2014
DOI: 10.1371/journal.pone.0085791
.. versionadded:: 0.9.0
""" # noqa
def __init__(self, picks=None, cv=5, clf=None, train_times=None,
test_times=None, predict_mode='cross-validation', scorer=None,
n_jobs=1):
super(GeneralizationAcrossTime, self).__init__(
picks=picks, cv=cv, clf=clf, train_times=train_times,
test_times=test_times, predict_mode=predict_mode, scorer=scorer,
n_jobs=n_jobs)
def __repr__(self):
s = ''
if hasattr(self, "estimators_"):
s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
self.train_times_['start'], self.train_times_['stop'])
else:
s += 'no fit'
if hasattr(self, 'y_pred_'):
s += (", predicted %d epochs" % len(self.y_pred_[0][0]))
else:
s += ", no prediction"
if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
s += ',\n '
else:
s += ', '
if hasattr(self, 'scores_'):
s += "scored"
if callable(self.scorer_):
s += " (%s)" % (self.scorer_.__name__)
else:
s += "no score"
return "<GAT | %s>" % s
def plot(self, title=None, vmin=None, vmax=None, tlim=None, ax=None,
cmap='RdBu_r', show=True, colorbar=True,
xlabel=True, ylabel=True):
"""Plotting function of GeneralizationAcrossTime object
Plot the score of each classifier at each tested time window.
Parameters
----------
title : str | None
Figure title.
vmin : float | None
Min color value for scores. If None, sets to min(gat.scores_).
vmax : float | None
Max color value for scores. If None, sets to max(gat.scores_).
tlim : ndarray, (train_min, test_max) | None
The time limits used for plotting.
ax : object | None
Plot pointer. If None, generate new figure.
cmap : str | cmap object
The color map to be used. Defaults to 'RdBu_r'.
show : bool
If True, the figure will be shown. Defaults to True.
colorbar : bool
If True, the colorbar of the figure is displayed. Defaults to True.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
return plot_gat_matrix(self, title=title, vmin=vmin, vmax=vmax,
tlim=tlim, ax=ax, cmap=cmap, show=show,
colorbar=colorbar, xlabel=xlabel, ylabel=ylabel)
def plot_diagonal(self, title=None, xmin=None, xmax=None, ymin=None,
ymax=None, ax=None, show=True, color=None,
xlabel=True, ylabel=True, legend=True, chance=True,
label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot each classifier score trained and tested at identical time
windows.
Parameters
----------
title : str | None
Figure title.
xmin : float | None, optional
Min time value.
xmax : float | None, optional
Max time value.
ymin : float | None, optional
Min score value. If None, sets to min(scores).
ymax : float | None, optional
Max score value. If None, sets to max(scores).
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
show : bool
If True, the figure will be shown. Defaults to True.
color : str
Score line color.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float. Defaults to None
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
return plot_gat_times(self, train_time='diagonal', title=title,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, ax=ax, show=show,
color=color, xlabel=xlabel, ylabel=ylabel,
legend=legend, chance=chance, label=label)
def plot_times(self, train_time, title=None, xmin=None, xmax=None,
ymin=None, ymax=None, ax=None, show=True, color=None,
xlabel=True, ylabel=True, legend=True, chance=True,
label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot the scores of the classifier trained at specific training time(s).
Parameters
----------
train_time : float | list or array of float
Plots scores of the classifier trained at train_time.
title : str | None
Figure title.
xmin : float | None, optional
Min time value.
xmax : float | None, optional
Max time value.
ymin : float | None, optional
Min score value. If None, sets to min(scores).
ymax : float | None, optional
Max score value. If None, sets to max(scores).
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
show : bool
If True, the figure will be shown. Defaults to True.
color : str or list of str
Score line color(s).
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float.
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
if (not isinstance(train_time, float) and
not (isinstance(train_time, (list, np.ndarray)) and
np.all([isinstance(time, float) for time in train_time]))):
raise ValueError('train_time must be float | list or array of '
'floats. Got %s.' % type(train_time))
return plot_gat_times(self, train_time=train_time, title=title,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, ax=ax, show=show,
color=color, xlabel=xlabel, ylabel=ylabel,
legend=legend, chance=chance, label=label)
class TimeDecoding(_GeneralizationAcrossTime):
"""Train and test a series of classifiers at each time point to obtain a
score across time.
Parameters
----------
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
cv : int | object
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
scikit-learn.cross_validation module for the list of possible objects.
Defaults to 5.
clf : object | None
An estimator compliant with the scikit-learn API (fit & predict).
If None the classifier will be a standard pipeline including
StandardScaler and a Logistic Regression with default parameters.
times : dict | None
A dictionary to configure the training times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``start`` : float
Time at which to start decoding (in seconds). By default,
min(epochs.times).
``stop`` : float
Maximal time at which to stop decoding (in seconds). By
default, max(times).
``step`` : float
Duration separating the start of subsequent classifiers (in
seconds). By default, equals one time sample.
``length`` : float
Duration of each classifier (in seconds). By default, equals
one time sample.
If None, empty dict.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
``cross-validation`` : estimates a single prediction per sample
based on the unique independent classifier fitted in the
cross-validation.
``mean-prediction`` : estimates k predictions per sample, based on
each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Default: 'cross-validation'
scorer : object | None
scikit-learn Scorer instance. If None, set to accuracy_score.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
Attributes
----------
picks_ : array-like of int | None
The channels indices to include.
ch_names : list, array-like, shape (n_channels,)
Names of the channels used for training.
y_train_ : ndarray, shape (n_samples,)
The categories used for training.
times_ : dict
A dictionary that configures the training times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``times`` : ndarray, shape (n_clfs,)
The training times (in seconds).
cv_ : CrossValidation object
The actual CrossValidation input depending on y.
estimators_ : list of list of scikit-learn.base.BaseEstimator subclasses.
The estimators for each time point and each fold.
y_pred_ : ndarray, shape (n_times, n_epochs, n_prediction_dims)
Class labels for samples in X.
y_true_ : list | ndarray, shape (n_samples,)
The categories used for scoring y_pred_.
scorer_ : object
scikit-learn Scorer instance.
scores_ : list of float, shape (n_times,)
The scores (mean accuracy of self.predict(X) wrt. y.).
See Also
--------
GeneralizationAcrossTime
Notes
-----
The function is equivalent to the diagonal of GeneralizationAcrossTime()
.. versionadded:: 0.10
"""
def __init__(self, picks=None, cv=5, clf=None, times=None,
predict_mode='cross-validation', scorer=None, n_jobs=1):
super(TimeDecoding, self).__init__(picks=picks, cv=cv, clf=None,
train_times=times,
test_times='diagonal',
predict_mode=predict_mode,
scorer=scorer, n_jobs=n_jobs)
self._clean_times()
def __repr__(self):
s = ''
if hasattr(self, "estimators_"):
s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
self.times_['start'], self.times_['stop'])
else:
s += 'no fit'
if hasattr(self, 'y_pred_'):
s += (", predicted %d epochs" % len(self.y_pred_[0]))
else:
s += ", no prediction"
if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
s += ',\n '
else:
s += ', '
if hasattr(self, 'scores_'):
s += "scored"
if callable(self.scorer_):
s += " (%s)" % (self.scorer_.__name__)
else:
s += "no score"
return "<TimeDecoding | %s>" % s
def fit(self, epochs, y=None):
""" Train a classifier on each specified time slice.
Note. This function sets the ``picks_``, ``ch_names``, ``cv_``,
``y_train``, ``train_times_`` and ``estimators_`` attributes.
Parameters
----------
epochs : instance of Epochs
The epochs.
y : list or ndarray of int, shape (n_samples,) or None, optional
To-be-fitted model values. If None, y = epochs.events[:, 2].
Returns
-------
self : TimeDecoding
Returns fitted TimeDecoding object.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
self._prep_times()
super(TimeDecoding, self).fit(epochs, y=y)
self._clean_times()
return self
def predict(self, epochs):
""" Test each classifier on each specified testing time slice.
.. note:: This function sets the ``y_pred_`` and ``test_times_``
attributes.
Parameters
----------
epochs : instance of Epochs
The epochs. Can be similar to fitted epochs or not. See
predict_mode parameter.
Returns
-------
y_pred : list of lists of arrays of floats, shape (n_times, n_epochs, n_prediction_dims)
The single-trial predictions at each time sample.
""" # noqa
self._prep_times()
super(TimeDecoding, self).predict(epochs)
self._clean_times()
return self.y_pred_
def score(self, epochs=None, y=None):
"""Score Epochs
Estimate scores across trials by comparing the prediction estimated for
each trial to its true value.
Calls ``predict()`` if it has not been already.
Note. The function updates the ``scorer_``, ``scores_``, and
``y_true_`` attributes.
Parameters
----------
epochs : instance of Epochs | None, optional
The epochs. Can be similar to fitted epochs or not.
If None, it needs to rely on the predictions ``y_pred_``
generated with ``predict()``.
y : list | ndarray, shape (n_epochs,) | None, optional
True values to be compared with the predictions ``y_pred_``
generated with ``predict()`` via ``scorer_``.
If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
Returns
-------
scores : list of float, shape (n_times,)
The scores estimated by ``scorer_`` at each time sample (e.g. mean
accuracy of ``predict(X)``).
"""
if epochs is not None:
self.predict(epochs)
else:
if not hasattr(self, 'y_pred_'):
raise RuntimeError('Please predict() epochs first or pass '
'epochs to score()')
self._prep_times()
super(TimeDecoding, self).score(epochs=None, y=y)
self._clean_times()
return self.scores_
def plot(self, title=None, xmin=None, xmax=None, ymin=None, ymax=None,
ax=None, show=True, color=None, xlabel=True, ylabel=True,
legend=True, chance=True, label='Classif. score'):
"""Plotting function
Predict each classifier. If multiple classifiers are passed, average
prediction across all classifiers to result in a single prediction per
classifier.
Parameters
----------
title : str | None
Figure title.
xmin : float | None, optional,
Min time value.
xmax : float | None, optional,
Max time value.
ymin : float
Min score value. Defaults to 0.
ymax : float
Max score value. Defaults to 1.
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
show : bool
If True, the figure will be shown. Defaults to True.
color : str
Score line color. Defaults to 'steelblue'.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float. Defaults to None
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
# XXX JRK: need cleanup in viz
self._prep_times()
fig = plot_gat_times(self, train_time='diagonal', title=title,
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, ax=ax,
show=show, color=color, xlabel=xlabel,
ylabel=ylabel, legend=legend, chance=chance,
label=label)
self._clean_times()
return fig
def _prep_times(self):
"""Auxiliary function to allow compability with GAT"""
self.test_times = 'diagonal'
if hasattr(self, 'times'):
self.train_times = self.times
if hasattr(self, 'times_'):
self.train_times_ = self.times_
self.test_times_ = _DecodingTime()
self.test_times_['slices'] = [[slic] for slic in
self.train_times_['slices']]
self.test_times_['times'] = [[tim] for tim in
self.train_times_['times']]
if hasattr(self, 'scores_'):
self.scores_ = [[score] for score in self.scores_]
if hasattr(self, 'y_pred_'):
self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]
def _clean_times(self):
"""Auxiliary function to allow compability with GAT"""
if hasattr(self, 'train_times'):
self.times = self.train_times
if hasattr(self, 'train_times_'):
self.times_ = self.train_times_
for attr in ['test_times', 'train_times',
'test_times_', 'train_times_']:
if hasattr(self, attr):
delattr(self, attr)
if hasattr(self, 'y_pred_'):
self.y_pred_ = [y_pred[0] for y_pred in self.y_pred_]
if hasattr(self, 'scores_'):
self.scores_ = [score[0] for score in self.scores_]
|
{
"content_hash": "5964db9302fb68b232093f17fc4accc4",
"timestamp": "",
"source": "github",
"line_count": 1238,
"max_line_length": 113,
"avg_line_length": 39.899030694668824,
"alnum_prop": 0.5639234740358335,
"repo_name": "trachelr/mne-python",
"id": "211bb0407dc8189da534dec374371fb8787217dd",
"size": "49656",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mne/decoding/time_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4465450"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from kolibri.auth.models import Collection, FacilityUser
def get_members_or_user(collection_kind, collection_id):
if 'user' == collection_kind:
return FacilityUser.objects.filter(pk=collection_id)
else: # if not user, then it must be a collection
return Collection.objects.filter(kind=collection_kind).get(pk=collection_id).get_members()
|
{
"content_hash": "d5a0acb29803ce072f52c83705dba085",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 98,
"avg_line_length": 45.625,
"alnum_prop": 0.736986301369863,
"repo_name": "benjaoming/kolibri",
"id": "c82c83f7269f848ff27a3fceeca2f17ef78653b1",
"size": "365",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "kolibri/plugins/coach/utils/return_users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "864"
},
{
"name": "CSS",
"bytes": "29176"
},
{
"name": "Dockerfile",
"bytes": "1872"
},
{
"name": "HTML",
"bytes": "12616"
},
{
"name": "JavaScript",
"bytes": "799257"
},
{
"name": "Makefile",
"bytes": "8232"
},
{
"name": "Python",
"bytes": "1241333"
},
{
"name": "Shell",
"bytes": "10412"
},
{
"name": "Vue",
"bytes": "815069"
}
],
"symlink_target": ""
}
|
import inspect
import logging
from oslo_config import cfg
from oslo_utils import importutils
from oslo_messaging._drivers.zmq_driver.matchmaker import base
from oslo_messaging._drivers.zmq_driver import zmq_address
from retrying import retry
redis = importutils.try_import('redis')
redis_sentinel = importutils.try_import('redis.sentinel')
LOG = logging.getLogger(__name__)
matchmaker_redis_opts = [
cfg.StrOpt('host',
default='127.0.0.1',
help='Host to locate redis.'),
cfg.PortOpt('port',
default=6379,
help='Use this port to connect to redis host.'),
cfg.StrOpt('password',
default='',
secret=True,
help='Password for Redis server (optional).'),
cfg.ListOpt('sentinel_hosts',
default=[],
help='List of Redis Sentinel hosts (fault tolerance mode) e.g.\
[host:port, host1:port ... ]'),
cfg.StrOpt('sentinel_group_name',
default='oslo-messaging-zeromq',
help='Redis replica set name.'),
cfg.IntOpt('wait_timeout',
default=500,
help='Time in ms to wait between connection attempts.'),
cfg.IntOpt('check_timeout',
default=20000,
help='Time in ms to wait before the transaction is killed.'),
cfg.IntOpt('socket_timeout',
default=1000,
help='Timeout in ms on blocking socket operations'),
]
_PUBLISHERS_KEY = "PUBLISHERS"
_RETRY_METHODS = ("get_hosts", "get_publishers")
def retry_if_connection_error(ex):
return isinstance(ex, redis.ConnectionError)
def retry_if_empty(hosts):
return not hosts
def apply_retrying(obj, cfg):
for attr_name, attr in inspect.getmembers(obj):
if not (inspect.ismethod(attr) or inspect.isfunction(attr)):
continue
if attr_name in _RETRY_METHODS:
setattr(
obj,
attr_name,
retry(
wait_fixed=cfg.matchmaker_redis.wait_timeout,
stop_max_delay=cfg.matchmaker_redis.check_timeout,
retry_on_exception=retry_if_connection_error,
retry_on_result=retry_if_empty
)(attr))
class RedisMatchMaker(base.MatchMakerBase):
def __init__(self, conf, *args, **kwargs):
super(RedisMatchMaker, self).__init__(conf, *args, **kwargs)
self.conf.register_opts(matchmaker_redis_opts, "matchmaker_redis")
self.sentinel_hosts = self._extract_sentinel_options()
if not self.sentinel_hosts:
self.standalone_redis = self._extract_standalone_redis_options()
self._redis = redis.StrictRedis(
host=self.standalone_redis["host"],
port=self.standalone_redis["port"],
password=self.standalone_redis["password"]
)
else:
socket_timeout = self.conf.matchmaker_redis.socket_timeout / 1000.
sentinel = redis.sentinel.Sentinel(
sentinels=self.sentinel_hosts,
socket_timeout=socket_timeout
)
self._redis = sentinel.master_for(
self.conf.matchmaker_redis.sentinel_group_name,
socket_timeout=socket_timeout
)
apply_retrying(self, self.conf)
def _extract_sentinel_options(self):
if self.url and self.url.hosts:
if len(self.url.hosts) > 1:
return [(host.hostname, host.port) for host in self.url.hosts]
elif self.conf.matchmaker_redis.sentinel_hosts:
s = self.conf.matchmaker_redis.sentinel_hosts
return [tuple(i.split(":")) for i in s]
def _extract_standalone_redis_options(self):
if self.url and self.url.hosts:
redis_host = self.url.hosts[0]
return {"host": redis_host.hostname,
"port": redis_host.port,
"password": redis_host.password}
else:
return {"host": self.conf.matchmaker_redis.host,
"port": self.conf.matchmaker_redis.port,
"password": self.conf.matchmaker_redis.password}
def register_publisher(self, hostname):
host_str = ",".join(hostname)
self._redis.sadd(_PUBLISHERS_KEY, host_str)
def unregister_publisher(self, hostname):
host_str = ",".join(hostname)
self._redis.srem(_PUBLISHERS_KEY, host_str)
def get_publishers(self):
hosts = []
hosts.extend([tuple(host_str.split(","))
for host_str in
self._get_hosts_by_key(_PUBLISHERS_KEY)])
return hosts
def _get_hosts_by_key(self, key):
return self._redis.smembers(key)
def register(self, target, hostname, listener_type, expire=-1):
def register_key(key):
self._redis.sadd(key, hostname)
if expire > 0:
self._redis.expire(key, expire)
if target.topic and target.server:
key = zmq_address.target_to_key(target, listener_type)
register_key(key)
if target.topic:
key = zmq_address.prefix_str(target.topic, listener_type)
register_key(key)
def unregister(self, target, hostname, listener_type):
key = zmq_address.target_to_key(target, listener_type)
self._redis.srem(key, hostname)
def get_hosts(self, target, listener_type):
LOG.debug("[Redis] get_hosts for target %s", target)
hosts = []
key = zmq_address.target_to_key(target, listener_type)
hosts.extend(self._get_hosts_by_key(key))
if not hosts and target.topic and target.server:
key = zmq_address.prefix_str(target.topic, listener_type)
hosts.extend(self._get_hosts_by_key(key))
return hosts
|
{
"content_hash": "e0745c4ef71f31a92582b8da830f7fd6",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 79,
"avg_line_length": 35.68674698795181,
"alnum_prop": 0.5854152599594868,
"repo_name": "dukhlov/oslo.messaging",
"id": "0d0ce8bca7cd96edd2c1dede67bb0a92d3ef2c79",
"size": "6499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_messaging/_drivers/zmq_driver/matchmaker/matchmaker_redis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "912386"
},
{
"name": "Shell",
"bytes": "8086"
}
],
"symlink_target": ""
}
|
from .. import ConfigIncluder, ConfigIncluderFile, ConfigIncluderURL, \
ConfigIncluderClasspath
class FullIncluder(ConfigIncluder, ConfigIncluderFile, ConfigIncluderURL,
ConfigIncluderClasspath):
pass
|
{
"content_hash": "fd6325b73d150b5d47e984c3bc876cb2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 73,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.7847533632286996,
"repo_name": "chris-martin/hocon-python",
"id": "94365eb11f73e802a5320652a5c7e0b3e226c311",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hocon/impl/FullIncluder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "313323"
},
{
"name": "Python",
"bytes": "151883"
},
{
"name": "Scala",
"bytes": "301214"
}
],
"symlink_target": ""
}
|
import argparse
import logging
import textwrap
from tripleo_common.cmd.utils import _clients as clients
from tripleo_common.cmd.utils import environment
from tripleo_common import updates
def parse_args():
description = textwrap.dedent("""
Run stack update.
""")
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-s', '--stack', dest='stack', required=True,
help='Name or ID of a stack to update')
parser.add_argument('-c', '--continue', dest='continue_in_update',
action='store_true',
help='Name or ID of a stack with update in progress')
parser.add_argument('-n', '--name', dest='update_name',
help='Name for the update stack')
parser.add_argument('-i', '--interactive', dest='interactive',
action='store_true',
help='Run update process in interactive mode')
environment._add_logging_arguments(parser)
return parser.parse_args()
def main():
args = parse_args()
environment._configure_logging(args)
try:
environment._ensure()
client = clients.get_heat_client()
update = updates.UpdateManager(
heatclient=clients.get_heat_client(),
tuskarclient=clients.get_tuskar_client(),
stack_id=args.stack)
if not args.continue_in_update:
update.start()
if args.interactive:
update.do_interactive_update()
else:
print("status: {0} ({1})".format(update.get_status()))
except Exception:
logging.exception("Unexpected error during command execution")
return 1
return 0
|
{
"content_hash": "6f5a63dac87e2b17ab86ba02de17d3ef",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 34.075471698113205,
"alnum_prop": 0.6079734219269103,
"repo_name": "jprovaznik/tripleo-common",
"id": "a8139ffcff4ab830ecf7cba4637b1477f983feaf",
"size": "2407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tripleo_common/cmd/stack_update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35199"
}
],
"symlink_target": ""
}
|
import os
from fontbakery.callable import check
from fontbakery.checkrunner import ERROR, FAIL, INFO, PASS, WARN, Section
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
from .shared_conditions import is_variable_font
profile_imports = ['.shared_conditions']
profile = profile_factory(default_section=Section("Checks inherited from Microsoft Font Validator"))
@check(
id = 'com.google.fonts/check/fontvalidator'
)
def com_google_fonts_check_fontvalidator(font):
"""Checking with Microsoft Font Validator."""
# In some cases we want to override the severity level of
# certain checks in FontValidator:
downgrade_to_warn = [
# There are reports that this fontval check has an out-of-date
# understanding of valid bits in fsSelection.
# More info at:
# https://github.com/googlei18n/fontmake/issues/414#issuecomment-379408127
"There are undefined bits set in fsSelection field",
# FIX-ME: Why did we downgrade this one to WARN?
"Misoriented contour"
]
# Some other checks we want to completely disable:
disabled_fval_checks = [
# FontVal E4012 thinks that
# "Versions 0x00010000 and 0x0001002 are currently
# the only defined versions of the GDEF table."
# but the GDEF chapter of the OpenType specification at
# https://docs.microsoft.com/en-us/typography/opentype/spec/gdef
# describes GDEF header version 1.3, which is not yet recognized
# by FontVal, thus resulting in this spurious false-FAIL:
"The version number is neither 0x00010000 nor 0x0001002",
# These messages below are simply fontval given user feedback
# on the progress of runnint it. It has nothing to do with
# actual issues on the font files:
"Validating glyph with index",
"Table Test:",
# No software is affected by Mac strings nowadays.
# More info at: googlei18n/fontmake#414
"The table doesn't contain strings for Mac platform",
"The PostScript string is not present for both required platforms",
# Font Bakery has got a native check for the xAvgCharWidth field
# which is: com.google.fonts/check/xavgcharwidth
"The xAvgCharWidth field does not equal the calculated value",
# The optimal ordering suggested by FVal check W0020 seems to only be
# relevant to performance optimizations on old versions of Windows
# running on old hardware. Since such performance considerations
# are most likely negligible, we're not going to bother users with
# this check's table ordering requirements.
# More info at:
# https://github.com/googlefonts/fontbakery/issues/2105
"Tables are not in optimal order",
# Font Bakery has its own check for required/optional tables:
# com.google.fonts/check/required_tables
"Recommended table is missing"
]
# There are also some checks that do not make
# sense when we're dealing with variable fonts:
VARFONT_disabled_fval_checks = [
# Variable fonts typically do have lots of self-intersecting
# contours because they are used to draw each portion
# of variable glyph features.
"Intersecting contours",
"Intersecting components of composite glyph",
# DeltaFormat = 32768 (same as 0x8000) means VARIATION_INDEX,
# according to https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2
# The FontVal problem description for this check (E5200) only mentions
# the other values as possible valid ones. So apparently this means FontVal
# implementation is not up-to-date with more recent versions of the OpenType spec
# and that's why these spurious FAILs are being emitted.
# That's good enough reason to mute it.
# More info at:
# https://github.com/googlefonts/fontbakery/issues/2109
"The device table's DeltaFormat value is invalid"
]
from fontTools.ttLib import TTFont
if is_variable_font(TTFont(font)):
disabled_fval_checks.extend(VARFONT_disabled_fval_checks)
try:
import subprocess
fval_cmd = [
"FontValidator", "-file", font, "-all-tables",
"-report-in-font-dir", "-no-raster-tests"
]
subprocess.check_output(fval_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
filtered_msgs = ""
for line in e.output.decode().split("\n"):
disable_it = False
for substring in disabled_fval_checks:
if substring in line:
disable_it = True
if not disable_it:
filtered_msgs += line + "\n"
yield INFO, ("Microsoft Font Validator returned an error code."
" Output follows :\n\n{}\n").format(filtered_msgs)
except (OSError, IOError) as error:
yield ERROR, ("Mono runtime and/or "
"Microsoft Font Validator are not available!")
raise error
def report_message(msg, details):
if details:
if isinstance(details, list) and len(details) > 1:
# We'll print lists with one item per line for
# improved readability.
if None in details:
details.remove(None)
# A designer will likely not need the full list
# in order to fix a problem.
# Showing only the 10 first ones is more than enough
# and helps avoid flooding the report.
if len(details) > 25:
num_similar = len(details) - 10
details = details[:10]
details.append(f"NOTE: {num_similar} other similar"
" results were hidden!")
details = '\n\t- ' + '\n\t- '.join(details)
return f"MS-FonVal: {msg} DETAILS: {details}"
else:
return f"MS-FonVal: {msg}"
xml_report_file = f"{font}.report.xml"
html_report_file = f"{font}.report.html"
fval_file = os.path.join(os.path.dirname(font), 'fval.xsl')
grouped_msgs = {}
with open(xml_report_file, "rb") as xml_report:
from lxml import etree
doc = etree.fromstring(xml_report.read())
for report in doc.iterfind('.//Report'):
msg = report.get("Message")
details = report.get("Details")
disable_it = False
for substring in disabled_fval_checks:
if substring in msg:
disable_it = True
if disable_it:
continue
if msg not in grouped_msgs:
grouped_msgs[msg] = {"errortype": report.get("ErrorType"),
"details": [details]}
else:
if details not in grouped_msgs[msg]["details"]:
# avoid cluttering the output with tons of identical reports
# yield INFO, 'grouped_msgs[msg]["details"]: {}'.format(grouped_msgs[msg]["details"])
grouped_msgs[msg]["details"].append(details)
# ---------------------------
# Clean-up generated files...
os.remove(xml_report_file)
# FontVal internal detail: HTML report generated only on non-Windows due to
# Mono or the used HTML renderer not being able to render XML with a
# stylesheet directly. https://github.com/googlefonts/fontbakery/issues/1747
if os.path.exists(html_report_file):
os.remove(html_report_file)
os.remove(fval_file)
# ---------------------------
# Here we start emitting the grouped log messages
for msg, data in grouped_msgs.items():
# But before printing we try to make the "details" more
# readable. Otherwise the user would get the text terminal
# flooded with messy data.
# No need to print is as a list if wereally only
# got one log message of this kind:
if len(data["details"]) == 1:
data["details"] = data["details"][0]
# Simplify the list of glyph indices by only displaying
# their numerical values in a list:
for glyph_index in ["Glyph index ", "glyph# "]:
if data["details"] and \
data["details"][0] and \
glyph_index in data["details"][0]:
try:
data["details"] = {'Glyph index': [int(x.split(glyph_index)[1])
for x in data["details"]]}
break
except ValueError:
pass
# And, finally, the log messages are emitted:
if data["errortype"] == "P":
yield PASS, report_message(msg, data["details"])
elif data["errortype"] == "E":
status = FAIL
for substring in downgrade_to_warn:
if substring in msg:
status = WARN
yield status, report_message(msg, data["details"])
elif data["errortype"] == "W":
yield WARN, report_message(msg, data["details"])
else:
yield INFO, report_message(msg, data["details"])
profile.auto_register(globals())
|
{
"content_hash": "25ef3889372a9feed1088cbef107a517",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 105,
"avg_line_length": 42.93636363636364,
"alnum_prop": 0.6035358882066483,
"repo_name": "graphicore/fontbakery",
"id": "094f6886187fc69523c6b16c80345aea6179036e",
"size": "9446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/fontbakery/profiles/fontval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6620"
},
{
"name": "PureBasic",
"bytes": "22329"
},
{
"name": "Python",
"bytes": "1016333"
},
{
"name": "Shell",
"bytes": "2457"
}
],
"symlink_target": ""
}
|
from SimpleCV import Image
import time
# Get the template and image
goBoard = Image('go.png')
black = Image('go-black.png')
black.show()
time.sleep(3)
goBoard.show()
time.sleep(3)
# Find the matches and draw them
matches = goBoard.findTemplate(black)
matches.draw()
# Show the board with matches print the number
goBoard.show()
print str(len(matches)) + " matches found."
# Should output: 9 matches found.
time.sleep(3)
|
{
"content_hash": "f607d169aba49ddfc61d7b12c4918610",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 46,
"avg_line_length": 18.52173913043478,
"alnum_prop": 0.7323943661971831,
"repo_name": "vizcacha/practicalcv",
"id": "eb49a22c12fe775d96e2d8f25bfbb4bcff74782b",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter_10/find-black-pieces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14992"
}
],
"symlink_target": ""
}
|
import numpy
from setuptools import setup, find_packages, Extension
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
lic = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
correlation_module = Extension('sga.toolbox._c_impl',
sources=['sga/toolbox/src/c_impl.i', 'sga/toolbox/src/correlation.c', 'sga/toolbox/src/table_norm.c', 'sga/toolbox/src/safe.c'],
include_dirs = [numpy_include],
swig_opts=['-threads', '-modern', '-outdir', 'sga/toolbox/'],
libraries = ['gsl', 'gslcblas','m'],
extra_compile_args = ["-O3"],
)
console_scripts = [
'sga-similarity=sga.similarity:main',
'sga-safe=sga.safe:main'
]
setup(
name='sga',
version='0.1.0',
description='SGA Utilities',
install_requires=required,
long_description=readme,
author='Matej Usaj',
author_email='usaj.m@utoronto.ca',
url='https://github.com/usajusaj/sga_utils',
license=lic,
ext_modules = [correlation_module],
packages=find_packages(exclude=('tests', 'docs')),
entry_points = {
'console_scripts': console_scripts,
}
)
|
{
"content_hash": "11163f718075d58318a8768acb36073f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 136,
"avg_line_length": 27.22,
"alnum_prop": 0.6443791329904482,
"repo_name": "usajusaj/sga_utils",
"id": "4bc330d4df3b7b9d978109c04cdece62973fc1c8",
"size": "1386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "119163"
},
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "34218"
}
],
"symlink_target": ""
}
|
class MerkleTree:
@property
def root(self):
return self._root
class MerkleNode:
"""Simple node structure for the MerkleTree."""
def __init__(self):
self._children = []
self._offset = None
self._hash = None
@property
def offset(self):
return self._offset
@property
def hash(self):
return self._hash
@property
def children(self):
return self._children
@property
def leaf(self):
return len(self.children) == 0
|
{
"content_hash": "dd4c32c7146f228837613bb944686018",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 51,
"avg_line_length": 17.566666666666666,
"alnum_prop": 0.5673624288425048,
"repo_name": "jarretraim/pymerkle",
"id": "856cb5566739883f18bd1fe58acaafeaa225124d",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymerkle/merkle_tree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4734"
}
],
"symlink_target": ""
}
|
import cPickle as pickle
import csv
import glob
import os
import time
import numpy as np
import theano
import theano.tensor as T
import data_loader
import postprocess
import utils
import paths
METADATAS_LOCATION = 'metadatas.pkl'
RELOAD_METADATAS = False
DO_XVAL = True
PREDICTIONS_PATH = paths.INTERMEDIATE_PREDICTIONS_PATH
SUBMISSION_PATH = paths.SUBMISSION_PATH
PRINT_EVERY = 1000 # Print itermediate results during training
LEARNING_RATE = 10 # Learning rate of gradient descend
NR_EPOCHS = 100
C = 0.0001 # L1 regularisation parameters
SELECT_TOP = 10 # Nr of models to select
def _get_all_prediction_files():
all_files = glob.glob(PREDICTIONS_PATH + '*')
res = []
for f in all_files:
if f in MODELS_TO_USE:
res.append(f)
return res
def _construct_all_patient_ids():
all_patient_ids = {key: [] for key in data_loader.patient_folders}
for pid in data_loader.id_to_index_map:
pset, pindex = data_loader.id_to_index_map[pid]
all_patient_ids[pset].append(pid)
for arr in all_patient_ids.values():
arr.sort()
return all_patient_ids
all_patient_ids = _construct_all_patient_ids()
def load_prediction_file(path):
try:
return np.load(path)
except:
return False
def _is_empty_prediction(patient_prediction):
return (
len(patient_prediction['systole']) == 0
and len(patient_prediction['diastole']) == 0
and 'patient' in patient_prediction)
not_predicted_sets = {}
def _register_model(pats_not_predicted):
not_predicted = tuple(sorted(pats_not_predicted['validation'] + pats_not_predicted['test']))
not_predicted_sets[not_predicted] = not_predicted_sets.get(not_predicted, 0) + 1
def _compute_pats_not_predicted(metadata):
pats_not_predicted = {set: [] for set in data_loader.patient_folders}
pats_predicted = {set: [] for set in data_loader.patient_folders}
for prediction in metadata['predictions']:
pid = prediction['patient']
pset = data_loader.id_to_index_map[pid][0]
if _is_empty_prediction(prediction):
pats_not_predicted[pset].append(pid)
else:
pats_predicted[pset].append(pid)
return pats_not_predicted, pats_predicted
## AVERAGING
def geomav(x):
if len(x) == 0:
return np.zeros(600)
res = np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
return res
def normalav(x):
if len(x) == 0:
return np.zeros(600)
return np.mean(x, axis=0)
AVERAGING_METHODS = (normalav, geomav)
def _validate_metadata(metadata, pats_predicted):
"""Compute validation score for given patients.
"""
errors = []
for pid in pats_predicted:
prediction_pid = metadata['predictions'][pid - 1]
assert prediction_pid['patient'] == pid
error_sys = utils.CRSP(prediction_pid["systole_average"], data_loader.regular_labels[pid - 1, 1])
error_dia = utils.CRSP(prediction_pid["diastole_average"], data_loader.regular_labels[pid - 1, 2])
errors += [error_sys, error_dia]
return np.mean(errors)
def _compute_tta(metadata, averaging_method):
for prediction_pid in metadata['predictions']:
pid = prediction_pid['patient']
prediction_pid["systole_average"] = averaging_method(prediction_pid["systole"])
prediction_pid["diastole_average"] = averaging_method(prediction_pid["diastole"])
def _compute_nr_ttas(metadata, pats_predicted):
return len(metadata['predictions'][pats_predicted[0]]["systole"])
def _make_valid_distributions(metadata):
for prediction in metadata['predictions']:
prediction["systole_average"] = postprocess.make_monotone_distribution(prediction["systole_average"])
postprocess.test_if_valid_distribution(prediction["systole_average"])
prediction["diastole_average"] = postprocess.make_monotone_distribution(prediction["diastole_average"])
postprocess.test_if_valid_distribution(prediction["diastole_average"])
def _add_tta_score_to_metadata(metadata, tta_score):
metadata['tta_score'] = tta_score
def _compute_best_tta(metadata, pats_predicted):
print " Using %d patients, averaging over %d TTAs" % (
len(pats_predicted), _compute_nr_ttas(metadata, pats_predicted))
best_score = 1
best_method = None
for averaging_method in AVERAGING_METHODS:
_compute_tta(metadata, averaging_method)
err = _validate_metadata(metadata, pats_predicted)
print " - %s: %.5f" % (averaging_method.func_name, err)
if err < best_score:
best_score, best_method = err, averaging_method
_compute_tta(metadata, best_method)
_make_valid_distributions(metadata)
tta_score = _validate_metadata(metadata, pats_predicted)
print " Choosing %s (%.5f)" % (best_method.func_name, tta_score)
_add_tta_score_to_metadata(metadata, tta_score)
def _remove_ttas(metadata, tags_to_remove=('systole', 'diastole')):
for prediction in metadata['predictions']:
# Tag the prediction to keep track of whether it was empty or not
prediction['is_empty'] = _is_empty_prediction(prediction)
for tag in tags_to_remove:
if tag in prediction:
del prediction[tag]
def _process_prediction_file(path):
metadata = load_prediction_file(path)
if not metadata:
print " Couldn't load file"
return
# Compute for which patients there are no predictions
pats_not_predicted, pats_predicted = _compute_pats_not_predicted(metadata)
_register_model(pats_not_predicted)
nr_val_predicted = len(pats_predicted['validation'])
nr_test_predicted = len(pats_predicted['test'])
print " val: %3d/%3d, test: %3d/%3d" % (
nr_val_predicted, data_loader.NUM_VALID_PATIENTS,
nr_test_predicted, data_loader.NUM_TEST_PATIENTS,)
if nr_val_predicted == 0 or nr_test_predicted == 0:
print " Skipping this model, not enough predictions."
return
# Compute best way of averaging
print " Trying out different ways of doing TTA:"
_compute_best_tta(metadata, pats_predicted['validation'])
# Clean up the metadata file and return it
_remove_ttas(metadata)
return metadata
def _load_and_process_metadata_files():
all_prediction_files = sorted(_get_all_prediction_files())[:]
nr_prediction_files = len(all_prediction_files)
print "Using the following files:"
print
print "\n".join(map(os.path.basename, all_prediction_files))
useful_files = []
for idx, path in enumerate(all_prediction_files):
print
print 'Processing %s (%d/%d)' % (os.path.basename(path), idx + 1, nr_prediction_files)
m = _process_prediction_file(path)
if m:
useful_files.append(m)
print
print "Loaded %d files" % len(useful_files)
return useful_files
## ENSEMBLING
def _create_prediction_matrix(metadatas):
nr_models = len(metadatas)
nr_patients = data_loader.NUM_PATIENTS
res_mask = np.zeros((nr_models, nr_patients))
res_sys = np.zeros((nr_models, nr_patients, 600))
res_dia = np.zeros((nr_models, nr_patients, 600))
for i, metadata in enumerate(metadatas):
for j, prediction in enumerate(metadata['predictions']):
res_mask[i, j] = not prediction["is_empty"]
res_sys[i, j] = prediction["systole_average"]
res_dia[i, j] = prediction["diastole_average"]
assert prediction["patient"] == j + 1
return res_mask, res_sys, res_dia
def _create_label_matrix():
systole_valid_labels = np.array(
[utils.cumulative_one_hot(v) for v in data_loader.regular_labels[:, 1].flatten()])
diastole_valid_labels = np.array(
[utils.cumulative_one_hot(v) for v in data_loader.regular_labels[:, 2].flatten()])
return systole_valid_labels, diastole_valid_labels
def _get_train_val_test_ids():
sets = [data_loader.id_to_index_map[pid][0] for pid in range(1, data_loader.NUM_PATIENTS + 1)]
return np.array(sets) == "train", np.array(sets) == "validation", np.array(sets) == "test"
def _find_weights(w_init, preds_matrix, targets_matrix, mask_matrix):
nr_models = len(w_init)
nr_patients = mask_matrix.shape[1]
learning_rate = LEARNING_RATE
nr_epochs = NR_EPOCHS
print " Compiling function"
# Create theano expression
# inputs:
weights = theano.shared(w_init.astype('float32'))
preds = theano.shared(preds_matrix.astype('float32'))
targets = theano.shared(targets_matrix.astype('float32'))
mask = theano.shared(mask_matrix.astype('float32'))
# expression
masked_weights = mask * weights.dimshuffle(0, 'x')
tot_masked_weights = masked_weights.sum(axis=0)
preds_weighted_masked = preds * masked_weights.dimshuffle(0, 1, 'x')
av_preds = preds_weighted_masked.sum(axis=0) / tot_masked_weights.dimshuffle(0, 'x')
# loss
l1_loss = weights.sum()
loss = ((av_preds - targets) ** 2).mean() + C * l1_loss
# Update function (keeping the weights normalised)
grad_weights = theano.grad(loss, weights)
updated_weights = T.clip(weights - learning_rate * grad_weights, 0, 100) # Dont allow weights smaller than 0
updated_weights_normalised = updated_weights / updated_weights.sum() # Renormalise
updates = {weights: updated_weights_normalised}
iter_train = theano.function([], loss, updates=updates)
# Do training
print " Training"
for iteration in xrange(nr_epochs):
train_err = iter_train()
w_value = weights.eval()
if (iteration + 1) % PRINT_EVERY == 0:
print iteration
print " train_error: %.4f, weights: %s" % (train_err, str((w_value * 1000).astype('int32')))
return np.array(weights.eval()) # Convert cudaNArray ot numpy if necessairy
def _compute_predictions_ensemble(weights, preds, mask):
masked_weights = mask * weights[:, np.newaxis]
tot_masked_weights = masked_weights.sum(axis=0)
preds_weighted_masked = preds * masked_weights[:, :, np.newaxis]
av_preds = preds_weighted_masked.sum(axis=0) / tot_masked_weights[:, np.newaxis]
return av_preds
def _eval_weights(weights, preds, targets, mask):
av_preds = _compute_predictions_ensemble(weights, preds, mask)
crps = ((av_preds - targets) ** 2).mean()
return crps
def _create_ensembles(metadatas):
# aggregate predictions and targets
mask, preds_sys, preds_dia = _create_prediction_matrix(metadatas)
targets_sys, targets_dia = _create_label_matrix()
print mask.mean()
# initialise weights
nr_models = len(metadatas)
w_init = np.ones((nr_models,), dtype='float32') / nr_models
# split data
_, val_idx, test_idx = _get_train_val_test_ids()
mask_val, mask_test = mask[:, val_idx], mask[:, test_idx]
targets_sys_val = targets_sys[val_idx[:len(targets_sys)], :]
targets_dia_val = targets_dia[val_idx[:len(targets_dia)], :]
preds_sys_val, preds_sys_test = preds_sys[:, val_idx, :], preds_sys[:, test_idx, :]
preds_dia_val, preds_dia_test = preds_dia[:, val_idx, :], preds_dia[:, test_idx, :]
## CREATE SYSTOLE AND DIASTOLE PREDICTION USING LOOXVAL
if DO_XVAL:
print "Making systole ensemble"
print " Doing leave one patient out xval"
nr_val_patients = len(targets_sys_val)
train_errs_sys = []
val_errs_sys = []
w_iters_sys = []
for val_pid in xrange(nr_val_patients):
print " - fold %d" % val_pid
w_iter = _find_weights(
w_init,
np.hstack((preds_sys_val[:, :val_pid], preds_sys_val[:, val_pid + 1:])),
np.vstack((targets_sys_val[:val_pid], targets_sys_val[val_pid + 1:])),
np.hstack((mask_val[:, :val_pid], mask_val[:, val_pid + 1:])),
)
train_err = _eval_weights(
w_iter,
np.hstack((preds_sys_val[:, :val_pid], preds_sys_val[:, val_pid + 1:])),
np.vstack((targets_sys_val[:val_pid], targets_sys_val[val_pid + 1:])),
np.hstack((mask_val[:, :val_pid], mask_val[:, val_pid + 1:])),
)
val_err = _eval_weights(w_iter,
preds_sys_val[:, val_pid:val_pid + 1],
targets_sys_val[val_pid:val_pid + 1],
mask_val[:, val_pid:val_pid + 1],
)
print " train_err: %.4f, val_err: %.4f" % (train_err, val_err)
train_errs_sys.append(train_err)
val_errs_sys.append(val_err)
w_iters_sys.append(w_iter)
expected_systole_loss = np.mean(val_errs_sys)
print " average train err: %.4f" % np.mean(train_errs_sys)
print " average valid err: %.4f" % np.mean(val_errs_sys)
print "Making diastole ensemble"
print " Doing leave one patient out xval"
nr_val_patients = len(targets_dia_val)
train_errs_dia = []
val_errs_dia = []
w_iters_dia = []
for val_pid in xrange(nr_val_patients):
print " - fold %d" % val_pid
w_iter = _find_weights(
w_init,
np.hstack((preds_dia_val[:, :val_pid], preds_dia_val[:, val_pid + 1:])),
np.vstack((targets_dia_val[:val_pid], targets_dia_val[val_pid + 1:])),
np.hstack((mask_val[:, :val_pid], mask_val[:, val_pid + 1:])),
)
train_err = _eval_weights(
w_iter,
np.hstack((preds_dia_val[:, :val_pid], preds_dia_val[:, val_pid + 1:])),
np.vstack((targets_dia_val[:val_pid], targets_dia_val[val_pid + 1:])),
np.hstack((mask_val[:, :val_pid], mask_val[:, val_pid + 1:])),
)
val_err = _eval_weights(w_iter,
preds_dia_val[:, val_pid:val_pid + 1],
targets_dia_val[val_pid:val_pid + 1],
mask_val[:, val_pid:val_pid + 1],
)
print " train_err: %.4f, val_err: %.4f" % (train_err, val_err)
train_errs_dia.append(train_err)
val_errs_dia.append(val_err)
w_iters_dia.append(w_iter)
expected_diastole_loss = np.mean(val_errs_dia)
print " average train err: %.4f" % np.mean(train_errs_dia)
print " average valid err: %.4f" % np.mean(val_errs_dia)
## MAKE ENSEMBLE USING THE FULL VALIDATION SET
print "Fitting weights on the entire validation set"
w_sys = _find_weights(w_init, preds_sys_val, targets_sys_val, mask_val)
w_dia = _find_weights(w_init, preds_dia_val, targets_dia_val, mask_val)
# Print the result
print
print "dia sys "
sort_key = lambda x: - x[1] - x[2]
for metadata, weight_sys, weight_dia in sorted(zip(metadatas, w_sys, w_dia), key=sort_key):
print "%4.1f %4.1f : %s" % (weight_sys * 100, weight_dia * 100, metadata["configuration_file"])
## SELECT THE TOP MODELS AND RETRAIN ONCE MORE
print
print "Selecting the top %d models and retraining" % SELECT_TOP
sorted_models, sorted_w_sys, sorted_w_dia = zip(*sorted(zip(metadatas, w_sys, w_dia), key=sort_key))
top_models = sorted_models[:SELECT_TOP]
w_init_sys_top = np.array(sorted_w_sys[:SELECT_TOP]) / np.array(sorted_w_sys[:SELECT_TOP]).sum()
w_init_dia_top = np.array(sorted_w_dia[:SELECT_TOP]) / np.array(sorted_w_dia[:SELECT_TOP]).sum()
mask_top, preds_sys_top, preds_dia_top = _create_prediction_matrix(top_models)
mask_top_val, mask_top_test = mask_top[:, val_idx], mask_top[:, test_idx]
preds_sys_top_val, preds_sys_top_test = preds_sys_top[:, val_idx, :], preds_sys_top[:, test_idx, :]
preds_dia_top_val, preds_dia_top_test = preds_dia_top[:, val_idx, :], preds_dia_top[:, test_idx, :]
w_sys_top = _find_weights(w_init_sys_top, preds_sys_top_val, targets_sys_val, mask_top_val)
w_dia_top = _find_weights(w_init_dia_top, preds_dia_top_val, targets_dia_val, mask_top_val)
sys_err = _eval_weights(w_sys_top, preds_sys_top_val, targets_sys_val, mask_top_val)
dia_err = _eval_weights(w_dia_top, preds_dia_top_val, targets_dia_val, mask_top_val)
print
print "dia sys "
sort_key = lambda x: - x[1] - x[2]
for metadata, weight_sys, weight_dia in zip(top_models, w_sys_top, w_dia_top):
print "%4.1f %4.1f : %s" % (weight_sys * 100, weight_dia * 100, metadata["configuration_file"])
print "Final scores on the validation set:"
print " systole: %.4f" % sys_err
print " diastole: %.4f" % dia_err
print " average: %.4f" % ((sys_err + dia_err) / 2.0)
if DO_XVAL:
print "Expected leaderboard scores:"
print " systole: %.4f" % expected_systole_loss
print " diastole: %.4f" % expected_diastole_loss
print " average: %.4f" % ((expected_systole_loss + expected_diastole_loss) / 2.0)
## COMPUTE TEST PREDICTIONS
test_ids = np.where(test_idx)[0] + 1
preds_sys = _compute_predictions_ensemble(w_sys_top, preds_sys_top_test, mask_top_test)
preds_dia = _compute_predictions_ensemble(w_dia_top, preds_dia_top_test, mask_top_test)
submission_path = SUBMISSION_PATH + "final_submission-%s.csv" % time.time()
print "dumping submission file to %s" % submission_path
with open(submission_path, 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(['Id'] + ['P%d' % i for i in xrange(600)])
for pid, pat_pred_sys, pat_pred_dia in zip(test_ids, preds_sys, preds_dia):
pat_pred_sys = postprocess.make_monotone_distribution(pat_pred_sys)
pat_pred_dia = postprocess.make_monotone_distribution(pat_pred_dia)
csvwriter.writerow(["%d_Diastole" % pid] + ["%.18f" % p for p in pat_pred_dia.flatten()])
csvwriter.writerow(["%d_Systole" % pid] + ["%.18f" % p for p in pat_pred_sys.flatten()])
print "submission file dumped"
def dump_metadatas(metadatas):
with open(METADATAS_LOCATION, 'w') as f:
pickle.dump(metadatas, f, pickle.HIGHEST_PROTOCOL)
print "metadatas file dumped"
def load_metadatas():
metadatas = np.load(METADATAS_LOCATION)
print "Loaded metadatas file"
return metadatas
def main():
if RELOAD_METADATAS:
metadatas = load_metadatas()
else:
metadatas = _load_and_process_metadata_files()
dump_metadatas(metadatas)
_create_ensembles(metadatas)
if __name__ == '__main__':
metamodels = sorted([]
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.meta_gauss_roi10_maxout_seqshift_96.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.meta_gauss_roi10_big_leaky_after_seqshift.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.meta_gauss_roi10_zoom_mask_leaky_after.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.meta_gauss_roi10_maxout.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.meta_gauss_roi_zoom_big.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.meta_gauss_roi_zoom_mask_leaky_after.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.meta_gauss_roi_zoom_mask_leaky.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.meta_gauss_roi_zoom.pkl")
# + glob.glob(PREDICTIONS_PATH + "je_os_fixedaggr_rellocframe.pkl")
# + glob.glob(PREDICTIONS_PATH + "je_meta_fixedaggr_jsc80leakyconv.pkl")
# + glob.glob(PREDICTIONS_PATH + "je_meta_fixedaggr_framemax_reg.pkl")
# + glob.glob(PREDICTIONS_PATH + "je_os_fixedaggr_relloc_filtered.pkl")
# + glob.glob(PREDICTIONS_PATH + "je_os_fixedaggr_relloc_filtered_discs.pkl")
)
slice_models = sorted([]
+ glob.glob(PREDICTIONS_PATH + "j6_2ch_128mm_skew.pkl")
+ glob.glob(PREDICTIONS_PATH + "je_ss_jonisc64small_360.pkl")
+ glob.glob(PREDICTIONS_PATH + "j6_2ch_96mm.pkl")
+ glob.glob(PREDICTIONS_PATH + "j6_2ch_128mm_96.pkl")
+ glob.glob(PREDICTIONS_PATH + "j6_4ch_32mm_specialist.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.ch2_zoom_leaky_after_maxout.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.gauss_roi_zoom_mask_leaky.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.gauss_roi_zoom_mask_leaky_after.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.gauss_roi10_big_leaky_after_seqshift.pkl")
+ glob.glob(PREDICTIONS_PATH + "ira_configurations.gauss_roi10_maxout.pkl")
)
# meta
MODELS_TO_USE = metamodels
main()
# ss
MODELS_TO_USE = slice_models
main()
# mixed_models
MODELS_TO_USE = metamodels + slice_models
main()
|
{
"content_hash": "3a9ecb7b3228672643ba1d99cf4d7ff3",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 122,
"avg_line_length": 41.046065259117086,
"alnum_prop": 0.613514145429039,
"repo_name": "317070/kaggle-heart",
"id": "b5830ee682e5e7916b59518e8e53e03e6bc687e1",
"size": "21385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "merge_predictions_ira.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2686608"
}
],
"symlink_target": ""
}
|
"""
Reference GENI GCH Clearinghouse, for talking to the GENI Clearinghouse
via xmlrpc instead of smime (its native interface)
Run from gcf-gch.py
Will produce signed user credentials from a GID, return a
list of aggregates read from a config file, and create a new Slice Credential.
"""
from __future__ import absolute_import
import dateutil.parser
import datetime
import logging
import os
import socket
import traceback
import uuid
from .ch import SampleClearinghouseServer
from .SecureXMLRPCServer import SecureXMLRPCServer
from .util import cred_util
from .util.ch_interface import *
# FIXME: GENI CH APIs have evolved since this was last run
# Clearinghouse interface that communicates with the
# new clearinghouse services (SA, PA, MA, CS, AUTHZ, LOG, etc.)
class GENIClearinghouse(object):
def __init__(self):
self.logger = logging.getLogger('gcf-gch')
def runserver(self, addr, keyfile=None, certfile=None,
ca_certs=None, config=None):
"""Run the clearinghouse server."""
# ca_certs is a dir of several certificates for peering
# If not supplied just use the certfile as the only trusted root
self.keyfile = keyfile
self.certfile = certfile
self.config = config
# Error check the keyfile, certfile all exist
if keyfile is None or not os.path.isfile(os.path.expanduser(keyfile)) or os.path.getsize(os.path.expanduser(keyfile)) < 1:
raise Exception("Missing CH key file %s" % keyfile)
if certfile is None or not os.path.isfile(os.path.expanduser(certfile)) or os.path.getsize(os.path.expanduser(certfile)) < 1:
raise Exception("Missing CH cert file %s" % certfile)
if ca_certs is None:
ca_certs = certfile
self.logger.info("Using only my CH cert as a trusted root cert")
self.trusted_root_files = cred_util.CredentialVerifier(ca_certs).root_cert_files
if not os.path.exists(os.path.expanduser(ca_certs)):
raise Exception("Missing CA cert(s): %s" % ca_certs)
# This is the arg to _make_server
ca_certs_onefname = cred_util.CredentialVerifier.getCAsFileFromDir(ca_certs)
# Set up the URL's to the CH services
self.establish_ch_interface();
# Create the xmlrpc server, load the rootkeys and do the ssl thing.
self._server = self._make_server(addr, keyfile, certfile,
ca_certs_onefname)
self._server.register_instance(SampleGENIClearinghouseServer(self))
self.logger.info('GENI CH Listening on port %d...' % (addr[1]))
self._server.serve_forever()
def _make_server(self, addr, keyfile=None, certfile=None,
ca_certs=None):
"""Creates the XML RPC server."""
# ca_certs is a file of concatenated certs
return SecureXMLRPCServer(addr, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs)
def GetVersion(self):
self.logger.info("Called GetVersion")
version = dict()
version['gcf-ch_api'] = 2
return version
def CreateProject(self, project_name, lead_id, project_purpose):
self.logger.info("Called CreateProject");
argsdict = dict(project_name=project_name,
lead_id=lead_id,
project_purpose=project_purpose);
result = invokeCH(self.pa_url, 'create_project', self.logger,
argsdict, self.certfile, self.keyfile);
# print("CP.RESULT = " + str(result))
return result
def CreateSlice(self, slice_name, project_id, owner_id):
self.logger.info("Called CreateSlice SN " + slice_name +
" PID " + str(project_id));
project_name = 'Dummy';
argsdict = dict(slice_name=slice_name,
project_id=project_id,
project_name=project_name,
owner_id=owner_id)
key_and_cert_files = get_inside_cert_and_key(self._server.peercert, \
self.ma_url, \
self.logger);
inside_keyfile = key_and_cert_files['key'];
inside_certfile = key_and_cert_files['cert'];
# print("KF = " + inside_keyfile + " CF = " + inside_certfile);
# print("SA_URL = " + self.sa_url);
result = invokeCH(self.sa_url, 'create_slice', self.logger,
argsdict, inside_certfile, inside_certfile);
# print("RES = " + str(result));
os.unlink(inside_certfile);
os.unlink(inside_keyfile);
# Don't understand why, but this returns a 'None' output so I need
# to fill it in with a ''
if(result['output'] == None): result['output'] = '';
# print("CreateSlice RET = " + str(result));
return result;
def GetSliceCredential(self, slice_id, cert, slice_urn=None):
self.logger.info("Called GetSliceCredential (ID=%s URN=%s)", \
slice_id, slice_urn)
key_and_cert_files = get_inside_cert_and_key(self._server.peercert, \
self.ma_url, \
self.logger);
inside_keyfile = key_and_cert_files['key'];
inside_certfile = key_and_cert_files['cert'];
# print("KF = " + inside_keyfile + " CF = " + inside_certfile);
if (slice_urn != None):
argsdict = dict(slice_urn=slice_urn);
row = invokeCH(self.sa_url, 'lookup_slice_by_urn',
self.logger, argsdict,
inside_certfile, inside_keyfile);
# print("Row = " + str(row));
if (row['code'] != 0):
return False;
slice_id = row['value']['slice_id']
# print "SLICE_ID = " + str(slice_id);
argsdict = dict(slice_id=slice_id, experimenter_certificate=cert)
result = invokeCH(self.sa_url, 'get_slice_credential',
self.logger, argsdict, inside_certfile, inside_keyfile);
# print("SC return = " + str(result))
os.unlink(inside_certfile);
os.unlink(inside_keyfile);
return result
def RenewSlice(self, slice_urn, expire_str):
self.logger.info("Called RenewSlice(%s, %s)", slice_urn, expire_str)
return True
def DeleteSlice(self, urn_req):
self.logger.info("Called DeleteSlice %r" % urn_req)
return False
def ListAggregates(self):
self.logger.info("Called ListAggregates")
return None
def CreateUserCredential(self, user_gid):
# print "GID = " + str(user_gid)
argsdict=dict(experimenter_certificate=user_gid);
result = invokeCH(self.sa_url, 'get_user_credential',
self.logger, argsdict, self.certfile, self.keyfile);
if(result['code'] == 0):
result = result['value']['user_credential'];
# print "RES = " + str(result)
return result;
def establish_ch_interface(self):
self.sr_url = "https://" + socket.gethostname() + "/sr/sr_controller.php";
# print("SR_URL = " + self.sr_url);
self.sa_url = self.get_first_service_of_type(1); # SERVICE_AUTHORITY
self.pa_url = self.get_first_service_of_type(2); # PROJECT_AUTHORITY
self.ma_url = self.get_first_service_of_type(3); # MEMBER_AUTHORITY
def get_first_service_of_type(self, service_type):
result = invokeCH(self.sr_url, 'get_services_of_type',
self.logger,
dict(service_type=service_type),
self.certfile, self.keyfile);
# print("GSOT.RESULT = " + str(result))
if(result['code'] != 0):
return None
services = result['value'];
service = services[0];
service_url = service['service_url'];
print("Service of type " + str(service_type) + " = " + service_url);
return service_url;
class SampleGENIClearinghouseServer(object):
"""A sample clearinghouse with barebones functionality."""
def __init__(self, delegate):
self._delegate = delegate
def GetVersion(self):
return self._delegate.GetVersion()
def CreateProject(self, project_name, lead_id, project_purpose):
return self._delegate.CreateProject(project_name,
lead_id, project_purpose);
def CreateSlice(self, slice_name, project_id, owner_id):
return self._delegate.CreateSlice(slice_name, project_id, owner_id);
def GetSliceCredential(self, slice_id, cert, slice_urn=None):
return self._delegate.GetSliceCredential(slice_id, cert, slice_urn);
def RenewSlice(self, urn, expire_str):
try:
return self._delegate.RenewSlice(urn, expire_str)
except:
self._delegate.logger.error(traceback.format_exc())
raise
def DeleteSlice(self, urn):
return self._delegate.DeleteSlice(urn)
def ListAggregates(self):
return self._delegate.ListAggregates()
def CreateUserCredential(self, cert):
return self._delegate.CreateUserCredential(cert)
|
{
"content_hash": "82014be6cb68993faffd2cec78622252",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 133,
"avg_line_length": 40.87012987012987,
"alnum_prop": 0.5861667196271582,
"repo_name": "ahelsing/geni-tools",
"id": "9dcba4517714e658454f92b668b243133b0d313d",
"size": "10658",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "src/gcf/geni/gch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "87278"
},
{
"name": "Python",
"bytes": "2610833"
},
{
"name": "Shell",
"bytes": "12644"
},
{
"name": "Visual Basic",
"bytes": "668"
}
],
"symlink_target": ""
}
|
import os
import tempfile
from typing import List, Any, Optional
from py4j.java_gateway import JavaObject
from pyflink.common import WatermarkStrategy
from pyflink.common.execution_config import ExecutionConfig
from pyflink.common.job_client import JobClient
from pyflink.common.job_execution_result import JobExecutionResult
from pyflink.common.restart_strategy import RestartStrategies, RestartStrategyConfiguration
from pyflink.common.typeinfo import TypeInformation, Types
from pyflink.datastream import SlotSharingGroup
from pyflink.datastream.checkpoint_config import CheckpointConfig
from pyflink.datastream.checkpointing_mode import CheckpointingMode
from pyflink.datastream.connectors import Source
from pyflink.datastream.data_stream import DataStream
from pyflink.datastream.execution_mode import RuntimeExecutionMode
from pyflink.datastream.functions import SourceFunction
from pyflink.datastream.state_backend import _from_j_state_backend, StateBackend
from pyflink.datastream.time_characteristic import TimeCharacteristic
from pyflink.java_gateway import get_gateway
from pyflink.serializers import PickleSerializer
from pyflink.util.java_utils import load_java_class, add_jars_to_context_class_loader, invoke_method
__all__ = ['StreamExecutionEnvironment']
class StreamExecutionEnvironment(object):
"""
The StreamExecutionEnvironment is the context in which a streaming program is executed. A
*LocalStreamEnvironment* will cause execution in the attached JVM, a
*RemoteStreamEnvironment* will cause execution on a remote setup.
The environment provides methods to control the job execution (such as setting the parallelism
or the fault tolerance/checkpointing parameters) and to interact with the outside world (data
access).
"""
def __init__(self, j_stream_execution_environment, serializer=PickleSerializer()):
self._j_stream_execution_environment = j_stream_execution_environment
self.serializer = serializer
def get_config(self) -> ExecutionConfig:
"""
Gets the config object.
:return: The :class:`~pyflink.common.ExecutionConfig` object.
"""
return ExecutionConfig(self._j_stream_execution_environment.getConfig())
def set_parallelism(self, parallelism: int) -> 'StreamExecutionEnvironment':
"""
Sets the parallelism for operations executed through this environment.
Setting a parallelism of x here will cause all operators (such as map,
batchReduce) to run with x parallel instances. This method overrides the
default parallelism for this environment. The
*LocalStreamEnvironment* uses by default a value equal to the
number of hardware contexts (CPU cores / threads). When executing the
program via the command line client from a JAR file, the default degree
of parallelism is the one configured for that setup.
:param parallelism: The parallelism.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setParallelism(parallelism)
return self
def set_max_parallelism(self, max_parallelism: int) -> 'StreamExecutionEnvironment':
"""
Sets the maximum degree of parallelism defined for the program. The upper limit (inclusive)
is 32767.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:param max_parallelism: Maximum degree of parallelism to be used for the program,
with 0 < maxParallelism <= 2^15 - 1.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setMaxParallelism(max_parallelism)
return self
def register_slot_sharing_group(self, slot_sharing_group: SlotSharingGroup) \
-> 'StreamExecutionEnvironment':
"""
Register a slot sharing group with its resource spec.
Note that a slot sharing group hints the scheduler that the grouped operators CAN be
deployed into a shared slot. There's no guarantee that the scheduler always deploy the
grouped operators together. In cases grouped operators are deployed into separate slots, the
slot resources will be derived from the specified group requirements.
:param slot_sharing_group: Which contains name and its resource spec.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.registerSlotSharingGroup(
slot_sharing_group.get_java_slot_sharing_group())
return self
def get_parallelism(self) -> int:
"""
Gets the parallelism with which operation are executed by default.
Operations can individually override this value to use a specific
parallelism.
:return: The parallelism used by operations, unless they override that value.
"""
return self._j_stream_execution_environment.getParallelism()
def get_max_parallelism(self) -> int:
"""
Gets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:return: Maximum degree of parallelism.
"""
return self._j_stream_execution_environment.getMaxParallelism()
def set_runtime_mode(self, execution_mode: RuntimeExecutionMode):
"""
Sets the runtime execution mode for the application
:class:`~pyflink.datastream.execution_mode.RuntimeExecutionMode`. This
is equivalent to setting the `execution.runtime-mode` in your application's
configuration file.
We recommend users to NOT use this method but set the `execution.runtime-mode` using
the command-line when submitting the application. Keeping the application code
configuration-free allows for more flexibility as the same application will be able to be
executed in any execution mode.
:param execution_mode: The desired execution mode.
:return: The execution environment of your application.
.. versionadded:: 1.13.0
"""
return self._j_stream_execution_environment.setRuntimeMode(
execution_mode._to_j_execution_mode())
def set_buffer_timeout(self, timeout_millis: int) -> 'StreamExecutionEnvironment':
"""
Sets the maximum time frequency (milliseconds) for the flushing of the
output buffers. By default the output buffers flush frequently to provide
low latency and to aid smooth developer experience. Setting the parameter
can result in three logical modes:
- A positive integer triggers flushing periodically by that integer
- 0 triggers flushing after every record thus minimizing latency
- -1 triggers flushing only when the output buffer is full thus maximizing throughput
:param timeout_millis: The maximum time between two output flushes.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setBufferTimeout(timeout_millis)
return self
def get_buffer_timeout(self) -> int:
"""
Gets the maximum time frequency (milliseconds) for the flushing of the
output buffers. For clarification on the extremal values see
:func:`set_buffer_timeout`.
:return: The timeout of the buffer.
"""
return self._j_stream_execution_environment.getBufferTimeout()
def disable_operator_chaining(self) -> 'StreamExecutionEnvironment':
"""
Disables operator chaining for streaming operators. Operator chaining
allows non-shuffle operations to be co-located in the same thread fully
avoiding serialization and de-serialization.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.disableOperatorChaining()
return self
def is_chaining_enabled(self) -> bool:
"""
Returns whether operator chaining is enabled.
:return: True if chaining is enabled, false otherwise.
"""
return self._j_stream_execution_environment.isChainingEnabled()
def get_checkpoint_config(self) -> CheckpointConfig:
"""
Gets the checkpoint config, which defines values like checkpoint interval, delay between
checkpoints, etc.
:return: The :class:`~pyflink.datastream.CheckpointConfig`.
"""
j_checkpoint_config = self._j_stream_execution_environment.getCheckpointConfig()
return CheckpointConfig(j_checkpoint_config)
def enable_checkpointing(self, interval: int, mode: CheckpointingMode = None) \
-> 'StreamExecutionEnvironment':
"""
Enables checkpointing for the streaming job. The distributed state of the streaming
dataflow will be periodically snapshotted. In case of a failure, the streaming
dataflow will be restarted from the latest completed checkpoint.
The job draws checkpoints periodically, in the given interval. The system uses the
given :class:`~pyflink.datastream.CheckpointingMode` for the checkpointing ("exactly once"
vs "at least once"). The state will be stored in the configured state backend.
.. note::
Checkpointing iterative streaming dataflows in not properly supported at
the moment. For that reason, iterative jobs will not be started if used
with enabled checkpointing.
Example:
::
>>> env.enable_checkpointing(300000, CheckpointingMode.AT_LEAST_ONCE)
:param interval: Time interval between state checkpoints in milliseconds.
:param mode: The checkpointing mode, selecting between "exactly once" and "at least once"
guaranteed.
:return: This object.
"""
if mode is None:
self._j_stream_execution_environment = \
self._j_stream_execution_environment.enableCheckpointing(interval)
else:
j_checkpointing_mode = CheckpointingMode._to_j_checkpointing_mode(mode)
self._j_stream_execution_environment.enableCheckpointing(
interval,
j_checkpointing_mode)
return self
def get_checkpoint_interval(self) -> int:
"""
Returns the checkpointing interval or -1 if checkpointing is disabled.
Shorthand for get_checkpoint_config().get_checkpoint_interval().
:return: The checkpointing interval or -1.
"""
return self._j_stream_execution_environment.getCheckpointInterval()
def get_checkpointing_mode(self) -> CheckpointingMode:
"""
Returns the checkpointing mode (exactly-once vs. at-least-once).
Shorthand for get_checkpoint_config().get_checkpointing_mode().
:return: The :class:`~pyflink.datastream.CheckpointingMode`.
"""
j_checkpointing_mode = self._j_stream_execution_environment.getCheckpointingMode()
return CheckpointingMode._from_j_checkpointing_mode(j_checkpointing_mode)
def get_state_backend(self) -> StateBackend:
"""
Gets the state backend that defines how to store and checkpoint state.
.. seealso:: :func:`set_state_backend`
:return: The :class:`StateBackend`.
"""
j_state_backend = self._j_stream_execution_environment.getStateBackend()
return _from_j_state_backend(j_state_backend)
def set_state_backend(self, state_backend: StateBackend) -> 'StreamExecutionEnvironment':
"""
Sets the state backend that describes how to store and checkpoint operator state. It
defines both which data structures hold state during execution (for example hash tables,
RockDB, or other data stores) as well as where checkpointed data will be persisted.
The :class:`~pyflink.datastream.MemoryStateBackend` for example maintains the state in heap
memory, as objects. It is lightweight without extra dependencies, but can checkpoint only
small states(some counters).
In contrast, the :class:`~pyflink.datastream.FsStateBackend` stores checkpoints of the state
(also maintained as heap objects) in files. When using a replicated file system (like HDFS,
S3, MapR FS, Alluxio, etc) this will guarantee that state is not lost upon failures of
individual nodes and that streaming program can be executed highly available and strongly
consistent(assuming that Flink is run in high-availability mode).
The build-in state backend includes:
:class:`~pyflink.datastream.MemoryStateBackend`,
:class:`~pyflink.datastream.FsStateBackend`
and :class:`~pyflink.datastream.RocksDBStateBackend`.
.. seealso:: :func:`get_state_backend`
Example:
::
>>> env.set_state_backend(EmbeddedRocksDBStateBackend())
:param state_backend: The :class:`StateBackend`.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setStateBackend(state_backend._j_state_backend)
return self
def enable_changelog_state_backend(self, enabled: bool) -> 'StreamExecutionEnvironment':
"""
Enable the change log for current state backend. This change log allows operators to persist
state changes in a very fine-grained manner. Currently, the change log only applies to keyed
state, so non-keyed operator state and channel state are persisted as usual. The 'state'
here refers to 'keyed state'. Details are as follows:
* Stateful operators write the state changes to that log (logging the state), in addition \
to applying them to the state tables in RocksDB or the in-mem Hashtable.
* An operator can acknowledge a checkpoint as soon as the changes in the log have reached \
the durable checkpoint storage.
* The state tables are persisted periodically, independent of the checkpoints. We call \
this the materialization of the state on the checkpoint storage.
* Once the state is materialized on checkpoint storage, the state changelog can be \
truncated to the corresponding point.
It establish a way to drastically reduce the checkpoint interval for streaming
applications across state backends. For more details please check the FLIP-158.
If this method is not called explicitly, it means no preference for enabling the change
log. Configs for change log enabling will override in different config levels
(job/local/cluster).
.. seealso:: :func:`is_changelog_state_backend_enabled`
:param enabled: True if enable the change log for state backend explicitly, otherwise
disable the change log.
:return: This object.
.. versionadded:: 1.14.0
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.enableChangelogStateBackend(enabled)
return self
def is_changelog_state_backend_enabled(self) -> Optional[bool]:
"""
Gets the enable status of change log for state backend.
.. seealso:: :func:`enable_changelog_state_backend`
:return: An :class:`Optional[bool]` for the enable status of change log for state backend.
Could be None if user never specify this by calling
:func:`enable_changelog_state_backend`.
.. versionadded:: 1.14.0
"""
j_ternary_boolean = self._j_stream_execution_environment.isChangelogStateBackendEnabled()
return j_ternary_boolean.getAsBoolean()
def set_default_savepoint_directory(self, directory: str) -> 'StreamExecutionEnvironment':
"""
Sets the default savepoint directory, where savepoints will be written to if none
is explicitly provided when triggered.
Example:
::
>>> env.set_default_savepoint_directory("hdfs://savepoints")
:param directory The savepoint directory
:return: This object.
"""
self._j_stream_execution_environment.setDefaultSavepointDirectory(directory)
return self
def get_default_savepoint_directory(self) -> Optional[str]:
"""
Gets the default savepoint directory for this Job.
"""
j_path = self._j_stream_execution_environment.getDefaultSavepointDirectory()
if j_path is None:
return None
else:
return j_path.toString()
def set_restart_strategy(self, restart_strategy_configuration: RestartStrategyConfiguration):
"""
Sets the restart strategy configuration. The configuration specifies which restart strategy
will be used for the execution graph in case of a restart.
Example:
::
>>> env.set_restart_strategy(RestartStrategies.no_restart())
:param restart_strategy_configuration: Restart strategy configuration to be set.
:return:
"""
self._j_stream_execution_environment.setRestartStrategy(
restart_strategy_configuration._j_restart_strategy_configuration)
def get_restart_strategy(self) -> RestartStrategyConfiguration:
"""
Returns the specified restart strategy configuration.
:return: The restart strategy configuration to be used.
"""
return RestartStrategies._from_j_restart_strategy(
self._j_stream_execution_environment.getRestartStrategy())
def add_default_kryo_serializer(self, type_class_name: str, serializer_class_name: str):
"""
Adds a new Kryo default serializer to the Runtime.
Example:
::
>>> env.add_default_kryo_serializer("com.aaa.bbb.TypeClass", "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with the
given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.addDefaultKryoSerializer(type_clz, j_serializer_clz)
def register_type_with_kryo_serializer(self, type_class_name: str, serializer_class_name: str):
"""
Registers the given Serializer via its class as a serializer for the given type at the
KryoSerializer.
Example:
::
>>> env.register_type_with_kryo_serializer("com.aaa.bbb.TypeClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with
the given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.registerTypeWithKryoSerializer(
type_clz, j_serializer_clz)
def register_type(self, type_class_name: str):
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> env.register_type("com.aaa.bbb.TypeClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_stream_execution_environment.registerType(type_clz)
def set_stream_time_characteristic(self, characteristic: TimeCharacteristic):
"""
Sets the time characteristic for all streams create from this environment, e.g., processing
time, event time, or ingestion time.
If you set the characteristic to IngestionTime of EventTime this will set a default
watermark update interval of 200 ms. If this is not applicable for your application
you should change it using
:func:`pyflink.common.ExecutionConfig.set_auto_watermark_interval`.
Example:
::
>>> env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
:param characteristic: The time characteristic, which could be
:data:`TimeCharacteristic.ProcessingTime`,
:data:`TimeCharacteristic.IngestionTime`,
:data:`TimeCharacteristic.EventTime`.
"""
j_characteristic = TimeCharacteristic._to_j_time_characteristic(characteristic)
self._j_stream_execution_environment.setStreamTimeCharacteristic(j_characteristic)
def get_stream_time_characteristic(self) -> 'TimeCharacteristic':
"""
Gets the time characteristic.
.. seealso:: :func:`set_stream_time_characteristic`
:return: The :class:`TimeCharacteristic`.
"""
j_characteristic = self._j_stream_execution_environment.getStreamTimeCharacteristic()
return TimeCharacteristic._from_j_time_characteristic(j_characteristic)
def add_python_file(self, file_path: str):
"""
Adds a python dependency which could be python files, python packages or
local directories. They will be added to the PYTHONPATH of the python UDF worker.
Please make sure that these dependencies can be imported.
:param file_path: The path of the python dependency.
"""
jvm = get_gateway().jvm
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil\
.getEnvironmentConfig(self._j_stream_execution_environment)
python_files = env_config.getString(jvm.PythonOptions.PYTHON_FILES.key(), None)
if python_files is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join([file_path, python_files])
else:
python_files = file_path
env_config.setString(jvm.PythonOptions.PYTHON_FILES.key(), python_files)
def set_python_requirements(self, requirements_file_path: str,
requirements_cache_dir: str = None):
"""
Specifies a requirements.txt file which defines the third-party dependencies.
These dependencies will be installed to a temporary directory and added to the
PYTHONPATH of the python UDF worker.
For the dependencies which could not be accessed in the cluster, a directory which contains
the installation packages of these dependencies could be specified using the parameter
"requirements_cached_dir". It will be uploaded to the cluster to support offline
installation.
Example:
::
# commands executed in shell
$ echo numpy==1.16.5 > requirements.txt
$ pip download -d cached_dir -r requirements.txt --no-binary :all:
# python code
>>> stream_env.set_python_requirements("requirements.txt", "cached_dir")
.. note::
Please make sure the installation packages matches the platform of the cluster
and the python version used. These packages will be installed using pip,
so also make sure the version of Pip (version >= 7.1.0) and the version of
SetupTools (version >= 37.0.0).
:param requirements_file_path: The path of "requirements.txt" file.
:param requirements_cache_dir: The path of the local directory which contains the
installation packages.
"""
jvm = get_gateway().jvm
python_requirements = requirements_file_path
if requirements_cache_dir is not None:
python_requirements = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[python_requirements, requirements_cache_dir])
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
env_config.setString(jvm.PythonOptions.PYTHON_REQUIREMENTS.key(), python_requirements)
def add_python_archive(self, archive_path: str, target_dir: str = None):
"""
Adds a python archive file. The file will be extracted to the working directory of
python UDF worker.
If the parameter "target_dir" is specified, the archive file will be extracted to a
directory named ${target_dir}. Otherwise, the archive file will be extracted to a
directory with the same name of the archive file.
If python UDF depends on a specific python version which does not exist in the cluster,
this method can be used to upload the virtual environment.
Note that the path of the python interpreter contained in the uploaded environment
should be specified via the method :func:`pyflink.table.TableConfig.set_python_executable`.
The files uploaded via this method are also accessible in UDFs via relative path.
Example:
::
# command executed in shell
# assert the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> stream_env.add_python_archive("py_env.zip")
>>> stream_env.set_python_executable("py_env.zip/py_env/bin/python")
# or
>>> stream_env.add_python_archive("py_env.zip", "myenv")
>>> stream_env.set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
>>> def my_udf():
... with open("myenv/py_env/data/data.txt") as f:
... ...
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.6 or higher.
.. note::
Currently only zip-format is supported. i.e. zip, jar, whl, egg, etc.
The other archive formats such as tar, tar.gz, 7z, rar, etc are not supported.
:param archive_path: The archive file path.
:param target_dir: Optional, the target dir name that the archive file extracted to.
"""
jvm = get_gateway().jvm
if target_dir is not None:
archive_path = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[archive_path, target_dir])
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
python_archives = env_config.getString(jvm.PythonOptions.PYTHON_ARCHIVES.key(), None)
if python_archives is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join(
[python_archives, archive_path])
else:
python_files = archive_path
env_config.setString(jvm.PythonOptions.PYTHON_ARCHIVES.key(), python_files)
def set_python_executable(self, python_exec: str):
"""
Sets the path of the python interpreter which is used to execute the python udf workers.
e.g. "/usr/local/bin/python3".
If python UDF depends on a specific python version which does not exist in the cluster,
the method :func:`pyflink.datastream.StreamExecutionEnvironment.add_python_archive` can be
used to upload a virtual environment. The path of the python interpreter contained in the
uploaded environment can be specified via this method.
Example:
::
# command executed in shell
# assume that the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> stream_env.add_python_archive("py_env.zip")
>>> stream_env.set_python_executable("py_env.zip/py_env/bin/python")
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.6 or higher.
.. note::
The python udf worker depends on Apache Beam (version == 2.27.0).
Please ensure that the specified environment meets the above requirements.
:param python_exec: The path of python interpreter.
"""
jvm = get_gateway().jvm
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
env_config.setString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), python_exec)
def add_jars(self, *jars_path: str):
"""
Adds a list of jar files that will be uploaded to the cluster and referenced by the job.
:param jars_path: Path of jars.
"""
add_jars_to_context_class_loader(jars_path)
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
old_jar_paths = env_config.getString(jars_key, None)
joined_jars_path = ';'.join(jars_path)
if old_jar_paths and old_jar_paths.strip():
joined_jars_path = ';'.join([old_jar_paths, joined_jars_path])
env_config.setString(jars_key, joined_jars_path)
def add_classpaths(self, *classpaths: str):
"""
Adds a list of URLs that are added to the classpath of each user code classloader of the
program. Paths must specify a protocol (e.g. file://) and be accessible on all nodes
:param classpaths: Classpaths that will be added.
"""
add_jars_to_context_class_loader(classpaths)
jvm = get_gateway().jvm
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
old_classpaths = env_config.getString(classpaths_key, None)
joined_classpaths = ';'.join(list(classpaths))
if old_classpaths and old_classpaths.strip():
joined_classpaths = ';'.join([old_classpaths, joined_classpaths])
env_config.setString(classpaths_key, joined_classpaths)
def get_default_local_parallelism(self) -> int:
"""
Gets the default parallelism that will be used for the local execution environment.
:return: The default local parallelism.
"""
return self._j_stream_execution_environment.getDefaultLocalParallelism()
def set_default_local_parallelism(self, parallelism: int):
"""
Sets the default parallelism that will be used for the local execution environment.
:param parallelism: The parallelism to use as the default local parallelism.
"""
self._j_stream_execution_environment.setDefaultLocalParallelism(parallelism)
def execute(self, job_name: str = None) -> JobExecutionResult:
"""
Triggers the program execution. The environment will execute all parts of
the program that have resulted in a "sink" operation. Sink operations are
for example printing results or forwarding them to a message queue.
The program execution will be logged and displayed with the provided name
:param job_name: Desired name of the job, optional.
:return: The result of the job execution, containing elapsed time and accumulators.
"""
j_stream_graph = self._generate_stream_graph(clear_transformations=True, job_name=job_name)
return JobExecutionResult(self._j_stream_execution_environment.execute(j_stream_graph))
def execute_async(self, job_name: str = 'Flink Streaming Job') -> JobClient:
"""
Triggers the program asynchronously. The environment will execute all parts of the program
that have resulted in a "sink" operation. Sink operations are for example printing results
or forwarding them to a message queue.
The program execution will be logged and displayed with a generated default name.
:param job_name: Desired name of the job.
:return: A JobClient that can be used to communicate with the submitted job, completed on
submission succeeded.
"""
j_stream_graph = self._generate_stream_graph(clear_transformations=True, job_name=job_name)
j_job_client = self._j_stream_execution_environment.executeAsync(j_stream_graph)
return JobClient(j_job_client=j_job_client)
def get_execution_plan(self) -> str:
"""
Creates the plan with which the system will execute the program, and returns it as
a String using a JSON representation of the execution data flow graph.
Note that this needs to be called, before the plan is executed.
If the compiler could not be instantiated, or the master could not
be contacted to retrieve information relevant to the execution planning,
an exception will be thrown.
:return: The execution plan of the program, as a JSON String.
"""
j_stream_graph = self._generate_stream_graph(False)
return j_stream_graph.getStreamingPlanAsJSON()
@staticmethod
def get_execution_environment() -> 'StreamExecutionEnvironment':
"""
Creates an execution environment that represents the context in which the
program is currently executed. If the program is invoked standalone, this
method returns a local execution environment.
:return: The execution environment of the context in which the program is executed.
"""
gateway = get_gateway()
j_stream_exection_environment = gateway.jvm.org.apache.flink.streaming.api.environment\
.StreamExecutionEnvironment.getExecutionEnvironment()
return StreamExecutionEnvironment(j_stream_exection_environment)
def add_source(self, source_func: SourceFunction, source_name: str = 'Custom Source',
type_info: TypeInformation = None) -> 'DataStream':
"""
Adds a data source to the streaming topology.
:param source_func: the user defined function.
:param source_name: name of the data source. Optional.
:param type_info: type of the returned stream. Optional.
:return: the data stream constructed.
"""
if type_info:
j_type_info = type_info.get_java_type_info()
else:
j_type_info = None
j_data_stream = self._j_stream_execution_environment.addSource(source_func
.get_java_function(),
source_name,
j_type_info)
return DataStream(j_data_stream=j_data_stream)
def from_source(self,
source: Source,
watermark_strategy: WatermarkStrategy,
source_name: str,
type_info: TypeInformation = None) -> 'DataStream':
"""
Adds a data :class:`~pyflink.datastream.connectors.Source` to the environment to get a
:class:`~pyflink.datastream.DataStream`.
The result will be either a bounded data stream (that can be processed in a batch way) or
an unbounded data stream (that must be processed in a streaming way), based on the
boundedness property of the source.
This method takes an explicit type information for the produced data stream, so that
callers can define directly what type/serializer will be used for the produced stream. For
sources that describe their produced type, the parameter type_info should not be specified
to avoid specifying the produced type redundantly.
.. versionadded:: 1.13.0
"""
if type_info:
j_type_info = type_info.get_java_type_info()
else:
j_type_info = None
j_data_stream = self._j_stream_execution_environment.fromSource(
source.get_java_function(),
watermark_strategy._j_watermark_strategy,
source_name,
j_type_info)
return DataStream(j_data_stream=j_data_stream)
def read_text_file(self, file_path: str, charset_name: str = "UTF-8") -> DataStream:
"""
Reads the given file line-by-line and creates a DataStream that contains a string with the
contents of each such line. The charset with the given name will be used to read the files.
Note that this interface is not fault tolerant that is supposed to be used for test purpose.
:param file_path: The path of the file, as a URI (e.g., "file:///some/local/file" or
"hdfs://host:port/file/path")
:param charset_name: The name of the character set used to read the file.
:return: The DataStream that represents the data read from the given file as text lines.
"""
return DataStream(self._j_stream_execution_environment
.readTextFile(file_path, charset_name))
def from_collection(self, collection: List[Any],
type_info: TypeInformation = None) -> DataStream:
"""
Creates a data stream from the given non-empty collection. The type of the data stream is
that of the elements in the collection.
Note that this operation will result in a non-parallel data stream source, i.e. a data
stream source with parallelism one.
:param collection: The collection of elements to create the data stream from.
:param type_info: The TypeInformation for the produced data stream
:return: the data stream representing the given collection.
"""
if type_info is not None:
collection = [type_info.to_internal_type(element) for element in collection]
return self._from_collection(collection, type_info)
def _from_collection(self, elements: List[Any],
type_info: TypeInformation = None) -> DataStream:
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = self.serializer
try:
with temp_file:
# dumps elements to a temporary file by pickle serializer.
serializer.serialize(elements, temp_file)
gateway = get_gateway()
# if user does not defined the element data types, read the pickled data as a byte array
# list.
if type_info is None:
j_objs = gateway.jvm.PythonBridgeUtils.readPickledBytes(temp_file.name)
out_put_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
else:
j_objs = gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name)
out_put_type_info = type_info
# Since flink python module depends on table module, we can make use of utils of it when
# implementing python DataStream API.
PythonTableUtils = gateway.jvm\
.org.apache.flink.table.planner.utils.python.PythonTableUtils
execution_config = self._j_stream_execution_environment.getConfig()
j_input_format = PythonTableUtils.getCollectionInputFormat(
j_objs,
out_put_type_info.get_java_type_info(),
execution_config
)
JInputFormatSourceFunction = gateway.jvm.org.apache.flink.streaming.api.functions.\
source.InputFormatSourceFunction
JBoundedness = gateway.jvm.org.apache.flink.api.connector.source.Boundedness
j_data_stream_source = invoke_method(
self._j_stream_execution_environment,
"org.apache.flink.streaming.api.environment.StreamExecutionEnvironment",
"addSource",
[JInputFormatSourceFunction(j_input_format, out_put_type_info.get_java_type_info()),
"Collection Source",
out_put_type_info.get_java_type_info(),
JBoundedness.BOUNDED],
["org.apache.flink.streaming.api.functions.source.SourceFunction",
"java.lang.String",
"org.apache.flink.api.common.typeinfo.TypeInformation",
"org.apache.flink.api.connector.source.Boundedness"])
j_data_stream_source.forceNonParallel()
return DataStream(j_data_stream=j_data_stream_source)
finally:
os.unlink(temp_file.name)
def _generate_stream_graph(self, clear_transformations: bool = False, job_name: str = None) \
-> JavaObject:
j_stream_graph = get_gateway().jvm \
.org.apache.flink.python.util.PythonConfigUtil.generateStreamGraphWithDependencies(
self._j_stream_execution_environment, clear_transformations)
if job_name is not None:
j_stream_graph.setJobName(job_name)
return j_stream_graph
def is_unaligned_checkpoints_enabled(self):
"""
Returns whether Unaligned Checkpoints are enabled.
"""
return self._j_stream_execution_environment.isUnalignedCheckpointsEnabled()
def is_force_unaligned_checkpoints(self):
"""
Returns whether Unaligned Checkpoints are force-enabled.
"""
return self._j_stream_execution_environment.isForceUnalignedCheckpoints()
|
{
"content_hash": "789654944925204105b2d8fc079d4c32",
"timestamp": "",
"source": "github",
"line_count": 925,
"max_line_length": 100,
"avg_line_length": 45.99351351351351,
"alnum_prop": 0.6622320421210982,
"repo_name": "tillrohrmann/flink",
"id": "a83796ef04fab4075695cbc81c873cafb50a6a36",
"size": "43502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/datastream/stream_execution_environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "20448"
},
{
"name": "Batchfile",
"bytes": "1863"
},
{
"name": "C",
"bytes": "847"
},
{
"name": "Clojure",
"bytes": "84400"
},
{
"name": "Dockerfile",
"bytes": "5563"
},
{
"name": "FreeMarker",
"bytes": "86639"
},
{
"name": "GAP",
"bytes": "139514"
},
{
"name": "HTML",
"bytes": "135625"
},
{
"name": "HiveQL",
"bytes": "78611"
},
{
"name": "Java",
"bytes": "83158201"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Less",
"bytes": "65918"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "2433935"
},
{
"name": "Scala",
"bytes": "10501870"
},
{
"name": "Shell",
"bytes": "525933"
},
{
"name": "TypeScript",
"bytes": "288472"
},
{
"name": "q",
"bytes": "7406"
}
],
"symlink_target": ""
}
|
import unittest
from ..pyjsongooglechart import GoogleChart
from ..pyjsongooglechart import (StringColumn, NumberColumn)
class GoogleChartTests(unittest.TestCase):
def test_base(self):
# Verify we can create a new chart
g = GoogleChart("Test")
self.assertEqual(g.title, "Test")
self.assertEqual(g.columns, [])
# Verify we can add a column to it
g.add_string_column("Column0")
self.assertTrue(isinstance(g[0], StringColumn))
self.assertEqual(g[0].label, "Column0")
# Verify the behavior of column indexing
g.add_column(NumberColumn("Column2"), index=2)
self.assertRaises(IndexError, g.__getitem__, 2)
self.assertTrue(isinstance(g[1], NumberColumn))
# Verify the behavior of row insertion
g.insert_row("hullo", (7, "7"))
self.assertEqual(g[0].values[0], ("hullo",))
self.assertEqual(g[1].values[0], (7, "7"))
# Verify the data structures can be assembled correctly
rows = g._build_rows_struct()
self.assertEqual(rows,
[{'c': [{'v': 'hullo'}, {'f': '7', 'v': 7}]}])
cols = g._build_columns_struct()
self.assertEqual(cols,
[{ 'pattern': '',
'type': 'string',
'id': '',
'p': '',
'label': 'Column0'},
{ 'pattern': '',
'type': 'number',
'id': '',
'p': '',
'label': 'Column2'}])
def test_options(self):
g = GoogleChart("Title")
self.assertEqual(g.options._attributes, {})
g.options.height = 50
g.options.width = 55
g.options.legend.position = 'bottom'
expected = {"height": 50,
"width": 55,
"title": "Title",
"legend": {"position": "bottom"}
}
self.assertEqual(g.build_options_struct(), expected)
g.options.title = "New Title"
expected['title'] = "New Title"
self.assertEqual(g.build_options_struct(), expected)
|
{
"content_hash": "2c5a1dfab404ae91727b3d8217cb4a09",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 71,
"avg_line_length": 35.171875,
"alnum_prop": 0.494002665482008,
"repo_name": "msteffeck/pyjsongooglechart",
"id": "e0471df941da824cb487f42d201beff5742ad26d",
"size": "2251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/chart_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16059"
}
],
"symlink_target": ""
}
|
from . import constants as c
from . import control
from . import setup
from . states import commonarea
from . states import mainmenu
import sys
import traceback
def main():
setup.start()
states = {
c.StateName.MAINMENU: mainmenu.MainMenu(),
c.StateName.COMMONAREA: commonarea.CommonArea(),
}
game = control.Control(c.CAPTION)
game.setup_states(states, c.StateName.MAINMENU)
game.setup_game_ui()
# Never let exceptions crash the game loop.
try:
game.game_loop()
except:
traceback.print_exc()
# The game loop has quit and we still have control of the program here.
# Do any clean up necessary before exit.
sys.exit()
if __name__ == "__main__":
main()
|
{
"content_hash": "20f4149b09cdcf4db855edbe372c1bfc",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 20.63888888888889,
"alnum_prop": 0.6514131897711979,
"repo_name": "revainisdead/garden",
"id": "6fe4899ea9eccb6e3fdaffd64b7f23554c4da119",
"size": "743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "572"
},
{
"name": "Python",
"bytes": "164644"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
}
|
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.util.tf_export import tf_export
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = linalg_ops.cholesky
cholesky_solve = linalg_ops.cholesky_solve
det = linalg_ops.matrix_determinant
# pylint: disable=protected-access
slogdet = gen_linalg_ops._log_matrix_determinant
# pylint: disable=protected-access
diag = array_ops.matrix_diag
diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
expm = gen_linalg_ops._matrix_exponential
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
logm = gen_linalg_ops._matrix_logarithm
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = linalg_ops.matrix_solve
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = math_ops.trace
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
@tf_export('linalg.logdet')
def logdet(matrix, name=None):
"""Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility
"""
# This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
reduction_indices=[-1])
@tf_export('linalg.adjoint')
def adjoint(matrix, name=None):
"""Transposes the last two dimensions of and conjugates tensor `matrix`.
For example:
```python
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
matrix.
"""
with ops.name_scope(name, 'adjoint', [matrix]):
matrix = ops.convert_to_tensor(matrix, name='matrix')
return array_ops.matrix_transpose(matrix, conjugate=True)
|
{
"content_hash": "ba66d74a7129caabfe3b7c0211248054",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 78,
"avg_line_length": 33.306930693069305,
"alnum_prop": 0.6914387633769322,
"repo_name": "av8ramit/tensorflow",
"id": "d5bd916f80d8a03e5423c43d1ca039bc4dceff5e",
"size": "4053",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/linalg/linalg_impl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9096"
},
{
"name": "C",
"bytes": "332331"
},
{
"name": "C++",
"bytes": "37144977"
},
{
"name": "CMake",
"bytes": "193247"
},
{
"name": "Go",
"bytes": "1061627"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "544069"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48122"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "1487"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "32711532"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "422931"
}
],
"symlink_target": ""
}
|
"""
haproxyadmin.exceptions
~~~~~~~~~~~~~~~~~~~~~~~
This module contains the set of haproxyadmin' exceptions with the following
hierarchy::
HAProxyBaseError
├── CommandFailed
├── HAProxyDataError
│ ├── IncosistentData
│ └── MultipleCommandResults
└── HAProxySocketError
├── SocketApplicationError
├── SocketConnectionError
├── SocketPermissionError
├── SocketTimeout
└── SocketTransportError
"""
class HAProxyBaseError(Exception):
"""haproxyadmin base exception.
:param message: error message.
:type message: ``string``
"""
message = ''
def __init__(self, message=''):
if message:
self.message = message
super(HAProxyBaseError, self).__init__(self.message)
class CommandFailed(HAProxyBaseError):
"""Raised when a command to HAProxy returned an error."""
class HAProxyDataError(HAProxyBaseError):
"""Base DataError class.
:param results: A structure which contains data returned be each socket.
:type results: ``list`` of ``list``
"""
def __init__(self, results):
self.results = results
super(HAProxyDataError, self).__init__()
class MultipleCommandResults(HAProxyDataError):
"""Command returned different results per HAProxy process."""
message = 'Received different result per HAProxy process'
class IncosistentData(HAProxyDataError):
"""Data across all processes is not the same."""
message = 'Received different data per HAProxy process'
class HAProxySocketError(HAProxyBaseError):
"""Base SocketError class.
:param socket_file: socket file.
:type socket_file: ``string``
"""
def __init__(self, socket_file):
self.socket_file = socket_file
self.message = self.message + ' ' + self.socket_file
super(HAProxySocketError, self).__init__(self.message)
class SocketTimeout(HAProxySocketError):
"""Raised when we timeout on the socket."""
message = 'Socket timed out'
class SocketPermissionError(HAProxySocketError):
"""Raised when permissions are not granted to access socket file."""
message = 'No permissions are granted to access socket file'
class SocketConnectionError(HAProxySocketError):
"""Raised when socket file is not bound to a process."""
message = 'No process is bound to socket file'
class SocketApplicationError(HAProxySocketError):
"""Raised when we connect to a socket and HAProxy is not bound to it."""
message = 'HAProxy is not bound to socket file'
class SocketTransportError(HAProxySocketError):
"""Raised when endpoint of socket hasn't closed an old connection.
.. note::
It only occurs in cases where HAProxy is ~90% CPU utilization for
processing traffic and we reconnect to the socket too
fast and as a result HAProxy doesn't have enough time to close the
previous connection.
"""
message = 'Transport endpoint is already connected'
|
{
"content_hash": "b8f3f75335b04b0e620a84a2c1db6a56",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 76,
"avg_line_length": 28.961165048543688,
"alnum_prop": 0.6798524974857526,
"repo_name": "unixsurfer/haproxyadmin",
"id": "c83386d681dd364d69cd9fe6222ac9328fc68842",
"size": "3093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haproxyadmin/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "110157"
},
{
"name": "Shell",
"bytes": "305"
}
],
"symlink_target": ""
}
|
"""
.. module:: django_core_models.location.tests.test_validation
:synopsis: location application validation unit test module.
*location* application validation unit test module.
"""
from __future__ import print_function
from django.core.exceptions import ValidationError
from django_core_utils.tests.test_utils import BaseModelTestCase
from . import factories
from ..validation import (country_validation, language_validation,
post_office_box_validation, postal_code_validation,
province_validation, state_validation)
from .factories import (CountryModelFactory, LanguageModelFactory,
ProvinceModelFactory, StateModelFactory)
class ValidationTestCase(BaseModelTestCase):
"""Base validation unit test class."""
def country_usa(self):
return factories.country_usa()
def country_france(self):
return factories.country_france()
valid_post_office_boxes = (
'PO Box 001', 'P.O. Box 002', 'po b 001', 'po bin 001',
'Post O bin 001', 'P. Office bin 001',
'P.O.Box 003')
invalid_post_office_boxes = ('004 P.O. Box', '005 PO Box', '006', 'abc')
class PostOfficeBoxValidationTestCase(ValidationTestCase):
"""Post office box validation unit test class."""
def test_post_office_box_validation_usa(self):
for pob in valid_post_office_boxes:
post_office_box_validation(self.country_usa(), pob)
def test_usa_post_office_box_validation_exceptions_usa(self):
for pob in invalid_post_office_boxes:
with self.assertRaises(ValidationError):
post_office_box_validation(self.country_usa(), pob)
valid_postal_codes = ('12345', '12345-6789', '12345 - 6789')
invalid_postal_codes = ('1234', '1234A', '12345 A', '12345-6789A')
class PostalCodeValidationTestCase(ValidationTestCase):
"""Postal code validation unit test class."""
def test_postal_code_validation_usa(self):
for postal_box in valid_postal_codes:
postal_code_validation(self.country_usa(), postal_box)
def test_postal_code_validation_exceptions_usa(self):
for pob in invalid_postal_codes:
with self.assertRaises(ValidationError):
postal_code_validation(self.country_usa(), pob)
class CountryValidationTestCase(ValidationTestCase):
"""Country validation unit test class."""
def test_country_validation_usa(self):
country_validation(self.country_usa())
def test_postal_code_validation_exceptions_usa(self):
with self.assertRaises(ValidationError):
country_validation(CountryModelFactory(
name="USA", iso_code="US"))
class LanguageValidationTestCase(ValidationTestCase):
"""Language validation unit test class."""
def test_language_validation_usa(self):
language_validation(LanguageModelFactory(
name=LanguageModelFactory.LANGUAGE_FRENCH,
iso_code=LanguageModelFactory.ISO_639_2_FR))
def test_language_validation_exceptions_usa(self):
with self.assertRaises(ValidationError):
country_validation(CountryModelFactory(
name="French", iso_code="zz"))
class ProvinceValidationTestCase(ValidationTestCase):
"""Province validation unit test class."""
def test_province_validation(self):
province_validation(ProvinceModelFactory(
name=ProvinceModelFactory.PROVINCE_NORMANDY,
iso_code=ProvinceModelFactory.ISO_3166_2_NORMANDY,
country=self.country_france()))
def test_province_validation_invalid_iso(self):
with self.assertRaises(ValidationError):
province_validation(ProvinceModelFactory(
name=ProvinceModelFactory.PROVINCE_NORMANDY,
iso_code="FR-P",
country=self.country_france()))
def test_province_validation_invalid_name(self):
with self.assertRaises(ValidationError):
province_validation(StateModelFactory(
name="Bad name",
iso_code=ProvinceModelFactory.ISO_3166_2_NORMANDY,
country=self.country_france()))
def test_state_validation_invalid_country(self):
with self.assertRaises(ValidationError):
province_validation(StateModelFactory(
name=ProvinceModelFactory.PROVINCE_NORMANDY,
iso_code=ProvinceModelFactory.ISO_3166_2_NORMANDY,
country=self.country_usa()))
class StateValidationTestCase(ValidationTestCase):
"""State validation unit test class."""
def test_state_validation(self):
state_validation(StateModelFactory(
name="New Jersey", iso_code="US-NJ",
country=self.country_usa()))
def test_state_validation_invalid_iso(self):
with self.assertRaises(ValidationError):
state_validation(StateModelFactory(
name="New Jersey",
iso_code="US-NJT",
country=self.country_usa()))
def test_state_validation_invalid_name(self):
with self.assertRaises(ValidationError):
state_validation(StateModelFactory(
name="Old Jersey",
iso_code="US-NJ",
country=self.country_usa()))
def test_state_validation_invalid_country(self):
with self.assertRaises(ValidationError):
state_validation(StateModelFactory(
name="New Jersey",
iso_code="US-NJ",
country=self.country_france()))
|
{
"content_hash": "3ac13bb45f9f6caacd1bd1fb1c9fe593",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 77,
"avg_line_length": 37.02,
"alnum_prop": 0.6600036016567621,
"repo_name": "ajaniv/django-core-models",
"id": "7b96c2436d0110a8e44b9488d0581760d3bf1076",
"size": "5553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_core_models/locations/tests/test_validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "402856"
},
{
"name": "Shell",
"bytes": "2154"
}
],
"symlink_target": ""
}
|
from selenium.webdriver.common.by import By
from pages.base import Page
class Message(Page):
_type_locator = (By.CSS_SELECTOR, 'span.sprite')
_body_locator = (By.CSS_SELECTOR, 'p')
_platform_locator = (By.CSS_SELECTOR, '.meta li:nth-child(3) a')
_locale_locator = (By.CSS_SELECTOR, '.meta li:nth-child(4) a')
_site_locator = (By.CSS_SELECTOR, '.meta li:nth-child(5)')
_permalink_locator = (By.CSS_SELECTOR, '.meta li:nth-child(5) a')
def __init__(self, testsetup, element):
Page.__init__(self, testsetup)
self._root_element = element
def click_locale(self):
self._root_element.find_element(*self._locale_locator).click()
def click_timestamp(self):
self._root_element.find_element(*self._time_locator).click()
@property
def is_locale_visible(self):
return self.is_element_visible(self._locale_locator)
@property
def type(self):
return self._root_element.find_element(*self._type_locator).text
@property
def body(self):
return self._root_element.find_element(*self._body_locator).text
@property
def locale(self):
return self._root_element.find_element(*self._locale_locator).text
@property
def platform(self):
return self._root_element.find_element(*self._platform_locator).text
@property
def site(self):
return self._root_element.find_element(*self._site_locator).text
@property
def response_id(self):
permalink = self._root_element.find_element(*self._permalink_locator)
link = permalink.get_attribute('href')
return link.split('/')[-1]
|
{
"content_hash": "27d3db4f0360f9849a2b1979ad0375da",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 31.07547169811321,
"alnum_prop": 0.6484517304189436,
"repo_name": "staranjeet/fjord",
"id": "4828ee9e548a65899e519adc627fd12ef6dd9ad4",
"size": "1870",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "smoketests/pages/regions/message.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "158619"
},
{
"name": "HTML",
"bytes": "127302"
},
{
"name": "JavaScript",
"bytes": "296754"
},
{
"name": "Python",
"bytes": "853569"
},
{
"name": "Shell",
"bytes": "11673"
},
{
"name": "Smarty",
"bytes": "780"
}
],
"symlink_target": ""
}
|
import numpy as np
import collections
import scipy.stats as stats
import matplotlib.pyplot as plt
x = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4, 4, 4, 4, 5, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 9, 9]
#calculate frequency
c = collections.Counter(x) #print(c)
# calculate the number of instances in the list
count_sum = sum(c.values())
for k,v in c.iteritems():
print("The frequency of number " + str(k) + " is " + str(float(v) / count_sum))
#create box plot
plt.boxplot(x)
#plt.show()
plt.savefig("x_array_boxplot.png")
#create histogram
plt.hist(x, histtype='bar')
#plt.show()
plt.savefig("x_array_histogram.png")
#creat qq plot
plt.figure()
test_data = np.random.normal(size=1000)
graph1 = stats.probplot(x, dist="norm", plot=plt)
#plt.show() #this will generate the first graph
plt.savefig("x_array_qqplot.png")
|
{
"content_hash": "cbf55746f01f2e4e3ca2f4a76e82133d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 100,
"avg_line_length": 23.055555555555557,
"alnum_prop": 0.6650602409638554,
"repo_name": "ttglennhall/simple_data_analysis_python",
"id": "2a0239d69295f1e8bcb2c92a22eaf3ccd1c41d73",
"size": "830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prob.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6468"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from pathlib import Path
import easyargs
import nbconvert
@contextmanager
def _tmpnotebook(data, fname="notebook"):
filepath = ""
try:
fid, filepath = tempfile.mkstemp(suffix=".py", prefix=f"{fname}_")
os.write(fid, data.encode("utf8"))
os.write(fid, b"\n")
os.close(fid)
yield filepath
finally:
if os.path.isfile(filepath):
print(filepath)
os.remove(filepath)
def _notebook_to_py(notebook):
exporter = nbconvert.PythonExporter()
exporter.exclude_markdown = True
exporter.exclude_output = True
exporter.exclude_raw = True
exporter.exclude_unknown = True
exporter.exclude_header = True
body, resources = exporter.from_file(notebook)
# Remove magic commands.
body = body.replace("get_ipython()", "# get_ipython()")
# The last two elements are a lint added by nbconvert (extra lines).
return body[:-2]
@easyargs
def lint(notebook):
ignore = [
"E226", # Missing whitespace around arithmetic operator.
"E402", # Module level import not at top of file.
"E703", # Statement ends with a semicolon.
"E731", # Do not assign a lambda expression, use a def.
"I100", # Your import statements are in the wrong order.
"I201", # Missing newline between import groups.
"I202", # Additional newline in a group of imports.
"W391", # Blank line at end of file.
"W504", # Line break after binary operator.
]
code = _notebook_to_py(notebook)
p = Path(notebook)
with _tmpnotebook(code, fname=p.stem) as tmp:
proc = subprocess.Popen(
[
"flake8",
tmp,
f'--ignore={",".join(ignore)}',
"--show-source",
"--max-line-length=999",
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out = proc.communicate()[0]
sys.stdout.write("".join(out.decode()))
sys.stdout.flush()
sys.exit(proc.returncode)
if __name__ == "__main__":
lint()
|
{
"content_hash": "eaad771bfae9532a954ca389c13568d4",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 74,
"avg_line_length": 29.52,
"alnum_prop": 0.5934959349593496,
"repo_name": "ocefpaf/notebooks_demos",
"id": "1671664cfe3807bfb8d8cd4bc3934953712d17a2",
"size": "2214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nbflake8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "29957155"
},
{
"name": "Python",
"bytes": "6757"
},
{
"name": "Shell",
"bytes": "909"
},
{
"name": "Smarty",
"bytes": "2387"
}
],
"symlink_target": ""
}
|
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol
class EchoClientProtocol(WebSocketClientProtocol):
"""
Example WebSocket client protocol. This is where you define your application
specific protocol and logic.
"""
def sendHello(self):
self.sendMessage("Hello, world!".encode('utf8'))
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
self.factory.reactor.callLater(1, self.sendHello)
class EchoClientFactory(WebSocketClientFactory):
"""
Example WebSocket client factory. This creates a new instance of our protocol
when the client connects to the server.
"""
protocol = EchoClientProtocol
if __name__ == '__main__':
import sys
import argparse
from twisted.python import log
from twisted.internet.endpoints import clientFromString
# parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true",
help="Enable debug output.")
parser.add_argument("--websocket", default="tcp:127.0.0.1:9000",
help='WebSocket client Twisted endpoint descriptor, e.g. "tcp:127.0.0.1:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", default=u"ws://127.0.0.1:9000",
help='WebSocket URL (must suit the endpoint), e.g. ws://127.0.0.1:9000.')
args = parser.parse_args()
# start Twisted logging to stdout
log.startLogging(sys.stdout)
# we use an Autobahn utility to import the "best" available Twisted reactor
from autobahn.choosereactor import install_reactor
reactor = install_reactor()
print("Running on reactor {}".format(reactor))
# start a WebSocket client
wsfactory = EchoClientFactory(args.wsurl)
wsclient = clientFromString(reactor, args.websocket)
wsclient.connect(wsfactory)
# now enter the Twisted reactor loop
reactor.run()
|
{
"content_hash": "9b11f86697ef9b35955e5bc9ce14188e",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 131,
"avg_line_length": 29.859154929577464,
"alnum_prop": 0.6698113207547169,
"repo_name": "RyanHope/AutobahnPython",
"id": "e053a0bfa0c2cc39e251cbc9708e0978c24a6068",
"size": "3397",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/twisted/websocket/echo_endpoints/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3648"
},
{
"name": "Python",
"bytes": "983364"
}
],
"symlink_target": ""
}
|
from coffee_machine_control.srv import *
import rospy, time
import RPi.GPIO as GPIO
import pickle
# capsule type: (dispenser position, capsule count)
coffee_capsule_dispenser={
'coffee_type: chocolate':(1,10),
'coffee_type: caramel':(2,10),
'coffee_type: christmas':(3,10),
'coffee_type: vanilla':(4,10)
}
current_coffee_capsule_dispenser_position = pickle.load(open("/home/pi/settings.p", 'rb'))
steps_per_quarter_rotation = 128
loader_motor_reverse_activation_time = 3
loader_motor_forward_activation_time = 1
loader_motor_partialforward_activation_time = 1
freq = 230
period = 1/freq
stepper_output_gpio_bus = [17, 22, 23, 4]
motor_reverse_gpio = 24
motor_forward_gpio = 25
coffee_switch_gpio = 27
step_sequence = [
[1, 0, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 1],
[1, 0, 0, 1]
]
def setup_gpio():
GPIO.setmode(GPIO.BCM)
for gpio_channel in stepper_output_gpio_bus:
GPIO.setup(gpio_channel, GPIO.OUT)
GPIO.output(gpio_channel, False)
GPIO.setup(motor_forward_gpio, GPIO.OUT)
GPIO.setup(motor_reverse_gpio, GPIO.OUT)
GPIO.output(motor_forward_gpio, False)
GPIO.output(motor_reverse_gpio, False)
GPIO.setup(coffee_switch_gpio, GPIO.OUT)
GPIO.output(coffee_switch_gpio, False)
def setup_coffee_for_manual_vending(coffee_type):
print "Coffee Machine: "+str(coffee_type)+" selected."
if (coffee_capsule_dispenser[str(coffee_type)][1] == 0):
return coffee_machineResponse(False, "Out of chosen capsules")
# Rotate capsule holder to select chosen capsule
print "Coffee Machine: Current capsule dispenser position is "+str(current_coffee_capsule_dispenser_position)
print "Coffee Machine: Rotating capsule dispenser to position "+str(coffee_capsule_dispenser[str(coffee_type)][0])
pickle.dump(coffee_capsule_dispenser[str(coffee_type)][0], open("/home/pi/settings.p", "wb"))
quarter_rotation_steps_needed = (coffee_capsule_dispenser[str(coffee_type)][0] - current_coffee_capsule_dispenser_position) % 4
for quarter_rotation_count in range(0, quarter_rotation_steps_needed):
for step_count in range(1, steps_per_quarter_rotation):
for step in range(0, 8):
for gpio_channel in range(0, 4):
if (step_sequence[step][gpio_channel] == 0):
GPIO.output(stepper_output_gpio_bus[gpio_channel], False)
else:
GPIO.output(stepper_output_gpio_bus[gpio_channel], True)
time.sleep(0.01)
# Open capsule tray to allow capsule to be loaded
GPIO.output(motor_forward_gpio, True)
time.sleep(loader_motor_partialforward_activation_time)
GPIO.output(motor_forward_gpio, False)
# Pause to let capsule drop
time.sleep(1)
# Complete capsule opening
GPIO.output(motor_forward_gpio, True)
time.sleep(loader_motor_forward_activation_time)
GPIO.output(motor_forward_gpio, False)
# Disable stepper to prevent overheating
for gpio_channel in stepper_output_gpio_bus:
GPIO.setup(gpio_channel, GPIO.OUT)
GPIO.output(gpio_channel, False)
print "Coffee Machine: Capsule selected and added to loader"
time.sleep(1)
# Load selected capsule into machine
print "Coffee Machine: Loading capsule into Nespresso machine"
GPIO.output(motor_reverse_gpio, True)
time.sleep(loader_motor_reverse_activation_time)
GPIO.output(motor_reverse_gpio, False)
print "Coffee Machine: Capsule loaded into Nespresso machine, ready for coffee vend"
# Dispense coffee
#Push Button
for count in range(0, 100):
GPIO.output(27, 1)
time.sleep(0.0016)
GPIO.output(27, 0)
time.sleep(0.0016)
#Retract ServoMotor
for count in range(0, 100):
GPIO.output(27, 1)
time.sleep(0.003)
GPIO.output(27, 0)
time.sleep(0.003)
# Decrement capsule count
coffee_capsule_dispenser[str(coffee_type)] = (coffee_capsule_dispenser[str(coffee_type)][0], coffee_capsule_dispenser[str(coffee_type)][1] - 1)
print "Coffee Machine: "+str(coffee_capsule_dispenser[str(coffee_type)][1])+" of "+str(coffee_type)+" remaining"
return coffee_machineResponse(True, "Ready")
def coffee_machine_control_server():
setup_gpio()
rospy.init_node('coffee_machine_control_server')
rospy.Service('coffee_machine', coffee_machine, setup_coffee_for_manual_vending)
print "Ready to load capsules"
rospy.spin()
if __name__ == "__main__":
coffee_machine_control_server()
|
{
"content_hash": "ff30ba53e171c3aa0e8fe97372aba0c4",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 148,
"avg_line_length": 33.77037037037037,
"alnum_prop": 0.677780214959421,
"repo_name": "Barista-Bot/coffee-machine-control",
"id": "b3c1dbfd7e87045d53dd22297b39bc3635b85ef0",
"size": "4582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/coffee_machine_server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5126"
}
],
"symlink_target": ""
}
|
"""Prepares the views for point scoreboard widget."""
import datetime
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.player_mgr import player_mgr
from apps.managers.team_mgr import team_mgr
def supply(request, page_name):
"""Supply the view_objects content for this widget, which is all the scoreboard data."""
user = request.user
team = user.profile.team
num_results = 10 if page_name != "status" else None
round_standings = {}
current_round = challenge_mgr.get_round_name()
today = datetime.datetime.today()
rounds = challenge_mgr.get_all_round_info()["rounds"]
for key in rounds.keys():
# 1. always display current round
# 2. if not future round
# a. display the round with the "display_scoreboard" flag
# b. display in the status page
if rounds[key]["start"] <= today and \
(rounds[key]["display_scoreboard"] or page_name == "status"):
round_standings[key] = {
#"group_standings": team_mgr.group_points_leaders(num_results, key),
"team_standings": team_mgr.team_points_leaders(num_results, key),
"profile_standings": player_mgr.points_leaders(num_results, key),
#"group_participation": team_mgr.group_active_participation(num_results, key) if \
# page_name == "status" else None,
"team_participation": team_mgr.team_active_participation(num_results, key) if \
page_name == "status" else None,
"user_team_standings": team.points_leaders(num_results, key) if \
team and page_name != "status" else None,
}
"""
# add an overall scoreboard
round_standings["Overall"] = {
#"group_standings": team_mgr.group_points_leaders(num_results, "Overall"),
"team_standings": team_mgr.team_points_leaders(num_results, "Overall"),
"profile_standings": player_mgr.points_leaders(num_results, "Overall"),
#"group_participation": team_mgr.group_active_participation(num_results, "Overall") if\
# page_name == "status" else None,
"team_participation": team_mgr.team_active_participation(num_results, "Overall") if \
page_name == "status" else None,
}
"""
count = len(rounds)
return {
"profile": user.profile,
"team": team,
"current_round": current_round,
"round_standings": round_standings,
"no_carousel": page_name == "status",
"range": count,
"user": user,
}
def remote_supply(request, page_name):
"""Supplies data to remote views."""
return supply(request, page_name)
|
{
"content_hash": "9c18a6b82eb26a366188ca135b590e7b",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 98,
"avg_line_length": 42.4,
"alnum_prop": 0.6066763425253991,
"repo_name": "yongwen/makahiki",
"id": "1ecf601755badb88fd9e93e0cbab1a104523def6",
"size": "2756",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "makahiki/apps/widgets/scoreboard/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "107603"
},
{
"name": "HTML",
"bytes": "568630"
},
{
"name": "JavaScript",
"bytes": "244377"
},
{
"name": "Python",
"bytes": "1489909"
},
{
"name": "Shell",
"bytes": "20118"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import inspect
from functools import wraps
from collections import OrderedDict
from contextlib import contextmanager
import copy
import tensorflow as tf
from tfutils.crossdevice_batchnorm import crossgpu_batch_norm, CRTPUBatchNormalization
import numpy as np
def initializer(kind='xavier', *args, **kwargs):
if kind == 'xavier':
init = tf.contrib.layers.xavier_initializer(*args, **kwargs)
elif kind == 'normal':
init = normal_initializer
else:
init = getattr(tf, kind + '_initializer')(*args, **kwargs)
return init
def normal_initializer(shape, dtype=None, partition_info=None):
'''
Used for EfficientNets
'''
H, W, _, C_out = shape
fan_out = int(H * W * C_out)
return tf.random_normal(
shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
def groupnorm(inputs, G=32, data_format='channels_last', weight_decay=0.0, epsilon=1e-5, trainable=True, gamma_init=1, beta_init=0):
'''
Like LayerNorm, z-scores features along the channel dimension only.
However, it only normalizes within G groups of C/G channels each.
Optionally applies learnable scale/shift parameters.
'''
assert len(inputs.shape.as_list()) == 4, "Applies only to conv2D layers"
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0,2,3,1])
elif data_format == 'channels_last':
pass
else:
raise ValueError("data_format must be 'channels_first' or 'channels_last'")
B,H,W,C = inputs.shape.as_list()
assert C % G == 0, "num groups G must divide C"
CpG = C // G
inputs = tf.reshape(inputs, [B,H,W,CpG,G])
mean, var = tf.nn.moments(inputs, axes=[1,2,3], keep_dims=True)
inputs = tf.div(inputs - mean, tf.sqrt(var + epsilon))
inputs = tf.reshape(inputs, [B,H,W,C])
if trainable:
gamma = tf.get_variable("groupnorm_scale", shape=[1,1,1,C], dtype=tf.float32,
initializer=initializer("constant", float(gamma_init)))
# regularizer=tf.contrib.layers.l2_regularizer(weight_decay))
beta = tf.get_variable("groupnorm_shift", shape=[1,1,1,C], dtype=tf.float32,
initializer=initializer("constant", float(beta_init)))
# regularizer=tf.contrib.layers.l2_regularizer(weight_decay))
else:
gamma = tf.constant(gamma_init, dtype=tf.float32)
beta = tf.constant(beta_init, dtype=tf.float32)
inputs = gamma*inputs + beta
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0,3,1,2])
print("applied group norm to", inputs.name.split('/')[:-1])
return inputs
def batchnorm_corr(inputs, is_training, data_format='channels_last',
decay = 0.9, epsilon = 1e-5, init_zero=None, constant_init=None,
activation=None, time_suffix=None, bn_trainable=True,
use_crossgpu_bn=False, num_dev=None, use_crtpu_bn=False):
if time_suffix is not None:
bn_op_name = "post_conv_BN_" + time_suffix
reuse_flag = tf.AUTO_REUSE # create bn variables per timestep if they do not exist
else:
bn_op_name = "post_conv_BN"
reuse_flag = None
# if activation is none, should use zeros; else ones
if constant_init is None:
if init_zero is None:
init_zero = True if activation is None else False
if init_zero:
gamma_init = tf.zeros_initializer()
else:
gamma_init = tf.ones_initializer()
else:
gamma_init = tf.constant_initializer(constant_init)
if use_crossgpu_bn:
output = crossgpu_batch_norm(inputs=inputs,
decay=decay,
epsilon=epsilon,
is_training=is_training,
data_format=data_format,
trainable=bn_trainable,
gamma_initializer=gamma_init,
scope=bn_op_name,
reuse=reuse_flag,
num_dev=num_dev)
elif use_crtpu_bn:
axis = 1 if data_format == 'channels_first' else 3
crtpu_bn_func = CRTPUBatchNormalization(axis=axis,
momentum=decay,
epsilon=epsilon,
center=True,
scale=True,
trainable=bn_trainable,
gamma_initializer=gamma_init,
name=bn_op_name,
_reuse=reuse_flag,
_scope=bn_op_name)
output = crtpu_bn_func(inputs, training=is_training)
else:
axis = 1 if data_format == 'channels_first' else 3
output = tf.layers.batch_normalization(inputs=inputs,
axis=axis,
momentum=decay,
epsilon=epsilon,
center=True,
scale=True,
training=is_training,
trainable=bn_trainable,
fused=True,
gamma_initializer=gamma_init,
name=bn_op_name,
reuse=reuse_flag)
return output
def conv(inp,
out_depth,
ksize=[3,3],
strides=[1,1,1,1],
data_format='channels_last',
padding='SAME',
kernel_init='xavier',
kernel_init_kwargs=None,
use_bias=True,
bias=0,
weight_decay=None,
activation='relu',
batch_norm=False,
group_norm=False,
num_groups=32,
is_training=False,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_gamma_init=None,
init_zero=None,
dropout=None,
dropout_seed=0,
time_sep=False,
time_suffix=None,
bn_trainable=True,
crossdevice_bn_kwargs={},
name='conv'
):
# assert out_shape is not None
if time_sep:
assert time_suffix is not None
if batch_norm or group_norm:
use_bias = False
if weight_decay is None:
weight_decay = 0.
if isinstance(ksize, int):
ksize = [ksize, ksize]
if isinstance(strides, int):
strides = [1, strides, strides, 1]
if kernel_init_kwargs is None:
kernel_init_kwargs = {}
in_depth = inp.get_shape().as_list()[-1]
if out_depth is None:
out_depth = in_depth
# weights
init = initializer(kernel_init, **kernel_init_kwargs)
kernel = tf.get_variable(initializer=init,
shape=[ksize[0], ksize[1], in_depth, out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
if use_bias:
init = initializer(kind='constant', value=bias)
biases = tf.get_variable(initializer=init,
shape=[out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
# ops
if dropout is not None:
inp = tf.nn.dropout(inp, keep_prob=dropout, seed=dropout_seed, name='dropout')
conv = tf.nn.conv2d(inp, kernel,
strides=strides,
padding=padding)
if use_bias:
output = tf.nn.bias_add(conv, biases, name=name)
else:
output = tf.identity(conv, name=name)
if batch_norm:
output = batchnorm_corr(inputs=output,
is_training=is_training,
data_format=data_format,
decay = batch_norm_decay,
epsilon = batch_norm_epsilon,
constant_init=batch_norm_gamma_init,
init_zero=init_zero,
activation=activation,
time_suffix=time_suffix,
bn_trainable=bn_trainable,
**crossdevice_bn_kwargs)
elif group_norm:
output = groupnorm(inputs=output,
G=num_groups,
data_format=data_format,
weight_decay=weight_decay,
gamma_init=(0.0 if init_zero else 1.0),
epsilon=batch_norm_epsilon)
if activation is not None:
output = getattr(tf.nn, activation)(output, name=activation)
return output
def conv_bnf(inp,
out_depth,
ksize=[3,3],
strides=[1,1,1,1],
padding='SAME',
kernel_init='xavier',
kernel_init_kwargs=None,
bias=0,
weight_decay=None,
activation='relu6',
batch_norm=True,
is_training=True,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
init_zero=None,
data_format='channels_last',
time_sep=False,
time_suffix=None,
bn_trainable=True,
crossdevice_bn_kwargs={},
name='conv_bnf'
):
# assert out_shape is not None
if time_sep:
assert time_suffix is not None
if weight_decay is None:
weight_decay = 0.
if isinstance(ksize, int):
ksize = [ksize, ksize]
if isinstance(strides, int):
strides = [1, strides, strides, 1]
if kernel_init_kwargs is None:
kernel_init_kwargs = {}
in_depth = inp.get_shape().as_list()[-1]
# weights
init = initializer(kernel_init, **kernel_init_kwargs)
kernel = tf.get_variable(initializer=init,
shape=[ksize[0], ksize[1], in_depth, out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
# ops
conv = tf.nn.conv2d(inp, kernel,
strides=strides,
padding=padding)
if batch_norm:
# if activation is none, should use zeros; else ones
output = batchnorm_corr(inputs=output,
is_training=is_training,
data_format=data_format,
decay = batch_norm_decay,
epsilon = batch_norm_epsilon,
init_zero=init_zero,
activation=activation,
time_suffix=time_suffix,
bn_trainable=bn_trainable,
**crossdevice_bn_kwargs)
else:
init = initializer(kind='constant', value=bias)
biases = tf.get_variable(initializer=init,
shape=[out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
output = tf.nn.bias_add(conv, biases, name=name)
if activation is not None:
output = getattr(tf.nn, activation)(output, name=activation)
return output
def depthsep_conv(inp,
out_depth,
multiplier=1,
ksize=3,
strides=1,
dep_padding='SAME',
sep_padding='SAME',
batch_norm = True,
is_training=True,
name='depthsep_conv',
*args,
**kwargs
):
with tf.variable_scope('depthwise_conv'):
d_out = depth_conv(inp, multiplier = multiplier,
ksize = ksize,
strides = strides,
padding = dep_padding,
batch_norm = batch_norm,
is_training = is_training,
*args, **kwargs)
with tf.variable_scope('pointwise_conv'):
# we batch norm first according to mobilenet paper
p_out = conv_bnf(d_out, out_depth = out_depth,
ksize = 1,
strides = 1,
padding = sep_padding,
batch_norm = batch_norm,
is_training = is_training,
*args, **kwargs)
return p_out
def depth_conv(inp,
multiplier=1,
out_depth=None,
ksize=3,
strides=1,
padding='SAME',
kernel_init='xavier',
kernel_init_kwargs=None,
activation='relu6',
weight_decay=None,
batch_norm = False,
group_norm=False,
num_groups=32,
use_bias=False,
is_training=True,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_gamma_init=None,
init_zero=None,
data_format='channels_last',
time_sep=False,
time_suffix=None,
bn_trainable=True,
crossdevice_bn_kwargs={},
name='depth_conv'
):
# assert out_shape is not None
if time_sep:
assert time_suffix is not None
if weight_decay is None:
weight_decay = 0.
if isinstance(ksize, int):
ksize = [ksize, ksize]
if isinstance(strides, int):
strides = [1, strides, strides, 1]
if kernel_init_kwargs is None:
kernel_init_kwargs = {}
in_depth = inp.get_shape().as_list()[-1]
out_depth = multiplier * in_depth
# weights
init = initializer(kernel_init, **kernel_init_kwargs)
kernel = tf.get_variable(initializer=init,
shape=[ksize[0], ksize[1], in_depth, multiplier],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
output = tf.nn.depthwise_conv2d(inp, kernel,
strides=strides,
padding=padding)
if batch_norm:
output = batchnorm_corr(inputs=output,
is_training=is_training,
data_format=data_format,
decay = batch_norm_decay,
epsilon = batch_norm_epsilon,
constant_init=batch_norm_gamma_init,
init_zero=init_zero,
activation=activation,
time_suffix=time_suffix,
bn_trainable=bn_trainable,
**crossdevice_bn_kwargs)
elif group_norm:
output = groupnorm(inputs=output,
G=num_groups,
data_format=data_format,
weight_decay=weight_decay,
gamma_init=(0.0 if init_zero else 1.0),
epsilon=batch_norm_epsilon)
elif use_bias:
init = initializer(kind='constant', value=1.0)
biases = tf.get_variable(initializer=init,
shape=[out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
output = tf.nn.bias_add(output, biases, name=name)
if activation is not None:
output = getattr(tf.nn, activation)(output, name=activation)
return output
def fc(inp,
out_depth,
kernel_init='xavier',
kernel_init_kwargs=None,
use_bias=True,
bias=1,
weight_decay=None,
activation='relu',
batch_norm=False,
is_training=False,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
init_zero=None,
dropout=None,
dropout_seed=0,
time_sep=False,
time_suffix=None,
bn_trainable=True,
crossdevice_bn_kwargs={},
name='fc'):
if batch_norm:
use_bias = False
if weight_decay is None:
weight_decay = 0.
# assert out_shape is not None
if kernel_init_kwargs is None:
kernel_init_kwargs = {}
resh = tf.reshape(inp, [inp.get_shape().as_list()[0], -1], name='reshape')
in_depth = resh.get_shape().as_list()[-1]
# weights
init = initializer(kernel_init, **kernel_init_kwargs)
kernel = tf.get_variable(initializer=init,
shape=[in_depth, out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
if use_bias:
init = initializer(kind='constant', value=bias)
biases = tf.get_variable(initializer=init,
shape=[out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
# ops
if dropout is not None:
resh = tf.nn.dropout(resh, keep_prob=dropout, seed=dropout_seed, name='dropout')
fcm = tf.matmul(resh, kernel)
if use_bias:
output = tf.nn.bias_add(fcm, biases, name=name)
else:
output = tf.identity(fcm, name=name)
if activation is not None:
output = getattr(tf.nn, activation)(output, name=activation)
if batch_norm:
# if activation is none, should use zeros; else ones
if init_zero is None:
init_zero = True if activation is None else False
if init_zero:
gamma_init = tf.zeros_initializer()
else:
gamma_init = tf.ones_initializer()
if time_suffix is not None:
bn_op_name = "post_conv_BN_" + time_suffix
reuse_flag = tf.AUTO_REUSE # create bn variables per timestep if they do not exist
else:
bn_op_name = "post_conv_BN"
reuse_flag = None
use_crossgpu_bn = crossdevice_bn_kwargs.get('use_crossgpu_bn', False)
use_crtpu_bn = crossdevice_bn_kwargs.get('use_crtpu_bn', False)
if use_crossgpu_bn:
cg_bn_kw = copy.deepcopy(crossdevice_bn_kwargs)
cg_bn_kw.pop('use_crossgpu_bn', False)
cg_bn_kw.pop('use_crtpu_bn', False)
output = crossgpu_batch_norm(inputs=inputs,
decay=batch_norm_decay,
epsilon=batch_norm_epsilon,
training=is_training,
trainable=bn_trainable,
gamma_initializer=gamma_init,
scope=bn_op_name,
reuse=reuse_flag,
**cg_bn_kw)
elif use_crtpu_bn:
crtpu_bn_func = CRTPUBatchNormalization(axis=-1,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
center=True,
scale=True,
trainable=bn_trainable,
gamma_initializer=gamma_init,
name=bn_op_name,
_reuse=reuse_flag,
_scope=bn_op_name)
output = crtpu_bn_func(output, training=is_training)
else:
output = tf.layers.batch_normalization(inputs=output,
axis=-1,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
center=True,
scale=True,
training=is_training,
trainable=bn_trainable,
fused=True,
gamma_initializer=gamma_init,
name=bn_op_name,
reuse=reuse_flag)
return output
def global_pool(inp, kind='avg', keep_dims=False, name=None):
if kind not in ['max', 'avg']:
raise ValueError('Only global avg or max pool is allowed, but'
'you requested {}.'.format(kind))
if name is None:
name = 'global_{}_pool'.format(kind)
h, w = inp.get_shape().as_list()[1:3]
out = getattr(tf.nn, kind + '_pool')(inp,
ksize=[1,h,w,1],
strides=[1,1,1,1],
padding='VALID')
if keep_dims:
output = tf.identity(out, name=name)
else:
output = tf.reshape(out, [out.get_shape().as_list()[0], -1], name=name)
return output
def avg_pool2d(inp, kernel_size, stride=2, padding='VALID', name=None):
if name is None:
name = 'avg_pool2d'
output = tf.contrib.layers.avg_pool2d(inp, kernel_size=kernel_size, stride=stride, padding=padding)
return output
class ConvNet(object):
INTERNAL_FUNC = ['arg_scope', '_func_wrapper', '_val2list', 'layer',
'_reuse_scope_name', '__call__', '_get_func']
CUSTOM_FUNC = [conv, fc, global_pool, conv_bnf, depthsep_conv, depth_conv, avg_pool2d]
def __init__(self, defaults=None, name=None):
"""
A quick convolutional neural network constructor
This is wrapper over many tf.nn functions for a quick construction of
a standard convolutional neural network that uses 2d convolutions, pooling
and fully-connected layers, and most other tf.nn methods.
It also stores layers and their parameters easily accessible per
tfutils' approach of saving everything.
Kwargs:
- defaults
Default kwargs values for functions. Complimentary to `arg_scope
- name (default: '')
If '', then the existing scope is used.
"""
self._defaults = defaults if defaults is not None else {}
self.name = name
self.state = None
self.output = None
self._layer = None
self.layers = OrderedDict()
self.params = OrderedDict()
self._scope_initialized = False
def __getattribute__(self, attr):
attrs = object.__getattribute__(self, '__dict__')
internal_func = object.__getattribute__(self, 'INTERNAL_FUNC')
if attr in attrs: # is it an attribute?
return attrs[attr]
elif attr in internal_func: # is it one of the internal functions?
return object.__getattribute__(self, attr)
else:
func = self._get_func(attr)
return self._func_wrapper(func)
def _get_func(self, attr):
custom_func = object.__getattribute__(self, 'CUSTOM_FUNC')
custom_func_names = [f.__name__ for f in custom_func]
if attr in custom_func_names: # is it one of the custom functions?
func = custom_func[custom_func_names.index(attr)]
else:
func = getattr(tf.nn, attr) # ok, so it is a tf.nn function
return func
def _func_wrapper(self, func):
"""
A wrapper on top of *any* function that is called.
- Pops `inp` and `layer` from kwargs,
- All args are turned into kwargs
- Default values from arg_scope are set
- Sets the name in kwargs to func.__name__ if not specified
- Expands `strides` from an int or list inputs for
all functions and expands `ksize` for pool functions.
If `layer` is not None, a new scope is created, else the existing scope
is reused.
Finally, all params are stored.
"""
@wraps(func)
def wrapper(*args, **kwargs):
kwargs['func_name'] = func.__name__
# convert args to kwargs
varnames = inspect.getargspec(func).args
for i, arg in enumerate(args):
kwargs[varnames[i+1]] = arg # skip the first (inputs)
layer = kwargs.pop('layer', self._layer)
if layer not in self.params:
self.params[layer] = OrderedDict()
# update kwargs with default values defined by user
if func.__name__ in self._defaults:
kwargs.update(self._defaults[func.__name__])
if 'name' not in kwargs:
fname = func.__name__
if fname in self.params[layer]:
if fname in self.params[layer]:
i = 1
while fname + '_{}'.format(i) in self.params[layer]:
i += 1
fname += '_{}'.format(i)
kwargs['name'] = fname
spec = ['avg_pool', 'max_pool', 'max_pool_with_argmax']
if 'ksize' in kwargs and func.__name__ in spec:
kwargs['ksize'] = self._val2list(kwargs['ksize'])
if 'strides' in kwargs:
kwargs['strides'] = self._val2list(kwargs['strides'])
self.params[layer][kwargs['name']] = kwargs
return wrapper
def __call__(self, inp=None):
output = inp
for layer, params in self.params.items():
with tf.variable_scope(layer):
for func_name, kwargs in params.items():
with tf.variable_scope(func_name):
output = kwargs.get('inp', output)
if output is None:
raise ValueError('Layer {} function {} got None as input'.format(layer, func_name))
kw = {k:v for k,v in kwargs.items() if k not in ['func_name', 'inp']}
func = self._get_func(kwargs['func_name'])
output = tf.identity(func(output, **kw), name='output')
self.layers[layer] = tf.identity(output, name='output')
self.output = output
return output
def _val2list(self, value):
if isinstance(value, int):
out = [1, value, value, 1]
elif len(value) == 2:
out = [1, value[0], value[1], 1]
else:
out = value
return out
@contextmanager
def arg_scope(self, defaults):
"""
Sets the arg_scope.
Pass a dict of {<func_name>: {<arg_name>: <arg_value>, ...}, ...}. These
values will then override the default values for the specified functions
whenever that function is called.
"""
self._defaults = defaults
yield
self._defaults = {}
@contextmanager
def layer(self, name):
"""
Sets the scope. Can be used with `with`.
"""
if name is None or name == '':
raise ValueError('Layer name cannot be None or an empty string')
self._layer = name
yield
def _reuse_scope_name(self, name):
graph = tf.get_default_graph()
if graph._name_stack is not None and graph._name_stack != '':
name = graph._name_stack + '/' + name + '/' # this will reuse the already-created scope
else:
name += '/'
return name
def mnist(train=True, seed=0):
m = ConvNet()
with m.arg_scope({'fc': {'kernel_init': 'truncated_normal',
'kernel_init_kwargs': {'stddev': .01, 'seed': seed},
'dropout': None, 'batch_norm': False}}):
m.fc(128, layer='hidden1')
m.fc(32, layer='hidden2')
m.fc(10, activation=None, layer='softmax_linear')
return m
def alexnet(train=True, norm=True, seed=0, **kwargs):
defaults = {'conv': {'batch_norm': False,
'kernel_init': 'xavier',
'kernel_init_kwargs': {'seed': seed}},
'weight_decay': .0005,
'max_pool': {'padding': 'SAME'},
'fc': {'batch_norm': False,
'kernel_init': 'truncated_normal',
'kernel_init_kwargs': {'stddev': .01, 'seed': seed},
'weight_decay': .0005,
'dropout_seed': 0}}
m = ConvNet(defaults=defaults)
dropout = .5 if train else None
m.conv(96, 11, 4, padding='VALID', layer='conv1')
if norm:
m.lrn(depth_radius=5, bias=1, alpha=.0001, beta=.75, layer='conv1')
m.max_pool(3, 2, layer='conv1')
m.conv(256, 5, 1, layer='conv2')
if norm:
m.lrn(depth_radius=5, bias=1, alpha=.0001, beta=.75, layer='conv2')
m.max_pool(3, 2, layer='conv2')
m.conv(384, 3, 1, layer='conv3')
m.conv(384, 3, 1, layer='conv4')
m.conv(256, 3, 1, layer='conv5')
m.max_pool(3, 2, layer='conv5')
m.fc(4096, dropout=dropout, bias=.1, layer='fc6')
m.fc(4096, dropout=dropout, bias=.1, layer='fc7')
m.fc(1000, activation=None, dropout=None, bias=0, layer='fc8')
return m
def mnist_tfutils(inputs, train=True, **kwargs):
m = mnist(train=train)
return m(inputs['images']), m.params
def alexnet_tfutils(inputs, **kwargs):
m = alexnet(**kwargs)
return m(inputs['images']), m.params
|
{
"content_hash": "e427cda99a231db1dd8e6012a7073edc",
"timestamp": "",
"source": "github",
"line_count": 808,
"max_line_length": 132,
"avg_line_length": 38.01980198019802,
"alnum_prop": 0.49261067708333334,
"repo_name": "neuroailab/tfutils",
"id": "eb3db7c4fd84b6660cc99edad33a07a0c3dfee10",
"size": "30720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfutils/model_tool_old.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "405509"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='Flask-Bootstrap',
version='3.0.0.2.dev1',
url='http://github.com/mbr/flask-bootstrap',
license='BSD',
author='Marc Brinkmann',
author_email='git@marcbrinkmann.de',
description='An extension that includes Twitter\'s Bootstrap in your '
'project, without any boilerplate code.',
long_description=read('README.rst'),
packages=['flask_bootstrap'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask>=0.8',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
{
"content_hash": "0bc1fc8971f967734b141b2ca1df5b54",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 29.02777777777778,
"alnum_prop": 0.6220095693779905,
"repo_name": "miguelgrinberg/flask-bootstrap",
"id": "5c42da9bec839d472036f6f410ffc52136324257",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "136711"
},
{
"name": "JavaScript",
"bytes": "58458"
},
{
"name": "Python",
"bytes": "16740"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) Bojan Mihelac and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# https://raw.githubusercontent.com/django-import-export/django-import-export/master/import_export/formats/base_formats.py
from importlib import import_module
import tablib
class Format:
def get_title(self):
return type(self)
def create_dataset(self, in_stream):
"""
Create dataset from given string.
"""
raise NotImplementedError()
def export_data(self, dataset, **kwargs):
"""
Returns format representation for given dataset.
"""
raise NotImplementedError()
def is_binary(self):
"""
Returns if this format is binary.
"""
return True
def get_read_mode(self):
"""
Returns mode for opening files.
"""
return 'rb'
def get_extension(self):
"""
Returns extension for this format files.
"""
return ""
def get_content_type(self):
# For content types see
# https://www.iana.org/assignments/media-types/media-types.xhtml
return 'application/octet-stream'
@classmethod
def is_available(cls):
return True
def can_import(self):
return False
def can_export(self):
return False
class TablibFormat(Format):
TABLIB_MODULE = None
CONTENT_TYPE = 'application/octet-stream'
def get_format(self):
"""
Import and returns tablib module.
"""
try:
# Available since tablib 1.0
from tablib.formats import registry
except ImportError:
return import_module(self.TABLIB_MODULE)
else:
key = self.TABLIB_MODULE.split('.')[-1].replace('_', '')
return registry.get_format(key)
@classmethod
def is_available(cls):
try:
cls().get_format()
except (tablib.core.UnsupportedFormat, ImportError):
return False
return True
def get_title(self):
return self.get_format().title
def create_dataset(self, in_stream, **kwargs):
return tablib.import_set(in_stream, format=self.get_title())
def export_data(self, dataset, **kwargs):
return dataset.export(self.get_title(), **kwargs)
def get_extension(self):
return self.get_format().extensions[0]
def get_content_type(self):
return self.CONTENT_TYPE
def can_import(self):
return hasattr(self.get_format(), 'import_set')
def can_export(self):
return hasattr(self.get_format(), 'export_set')
class TextFormat(TablibFormat):
def get_read_mode(self):
return 'r'
def is_binary(self):
return False
class CSV(TextFormat):
TABLIB_MODULE = 'tablib.formats._csv'
CONTENT_TYPE = 'text/csv'
def create_dataset(self, in_stream, **kwargs):
return super().create_dataset(in_stream, **kwargs)
class JSON(TextFormat):
TABLIB_MODULE = 'tablib.formats._json'
CONTENT_TYPE = 'application/json'
class YAML(TextFormat):
TABLIB_MODULE = 'tablib.formats._yaml'
# See https://stackoverflow.com/questions/332129/yaml-mime-type
CONTENT_TYPE = 'text/yaml'
class TSV(TextFormat):
TABLIB_MODULE = 'tablib.formats._tsv'
CONTENT_TYPE = 'text/tab-separated-values'
def create_dataset(self, in_stream, **kwargs):
return super().create_dataset(in_stream, **kwargs)
class ODS(TextFormat):
TABLIB_MODULE = 'tablib.formats._ods'
CONTENT_TYPE = 'application/vnd.oasis.opendocument.spreadsheet'
class HTML(TextFormat):
TABLIB_MODULE = 'tablib.formats._html'
CONTENT_TYPE = 'text/html'
class XLS(TablibFormat):
TABLIB_MODULE = 'tablib.formats._xls'
CONTENT_TYPE = 'application/vnd.ms-excel'
def create_dataset(self, in_stream):
"""
Create dataset from first sheet.
"""
import xlrd
xls_book = xlrd.open_workbook(file_contents=in_stream)
dataset = tablib.Dataset()
sheet = xls_book.sheets()[0]
dataset.headers = sheet.row_values(0)
for i in range(1, sheet.nrows):
dataset.append(sheet.row_values(i))
return dataset
class XLSX(TablibFormat):
TABLIB_MODULE = 'tablib.formats._xlsx'
CONTENT_TYPE = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
def create_dataset(self, in_stream):
"""
Create dataset from first sheet.
"""
from io import BytesIO
import openpyxl
xlsx_book = openpyxl.load_workbook(BytesIO(in_stream), read_only=True)
dataset = tablib.Dataset()
sheet = xlsx_book.active
# obtain generator
rows = sheet.rows
dataset.headers = [cell.value for cell in next(rows)]
for row in rows:
row_values = [cell.value for cell in row]
dataset.append(row_values)
return dataset
#: These are the default formats for import and export. Whether they can be
#: used or not is depending on their implementation in the tablib library.
DEFAULT_FORMATS = [fmt for fmt in (
CSV,
XLS,
XLSX,
TSV,
ODS,
JSON,
YAML,
HTML,
) if fmt.is_available()]
|
{
"content_hash": "0f46b1bb06d6fc2f0d0d73028a9edf20",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 122,
"avg_line_length": 28.54824561403509,
"alnum_prop": 0.6543247810723614,
"repo_name": "takeflight/wagtail",
"id": "a56f1a82861be323d23ec08743810cdce734dfa0",
"size": "6509",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/contrib/redirects/base_formats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "181889"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "367981"
},
{
"name": "JavaScript",
"bytes": "255453"
},
{
"name": "Makefile",
"bytes": "738"
},
{
"name": "Python",
"bytes": "3459754"
},
{
"name": "Shell",
"bytes": "7868"
}
],
"symlink_target": ""
}
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import shutil
import sys
import time
from unittest import SkipTest, skipUnless
import warnings
from django.conf import settings
from django.core import management
from django.core.management import execute_from_command_line
from django.core.management.utils import find_command
from django.test import SimpleTestCase
from django.test import override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import six
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
has_xgettext = find_command('xgettext')
this_directory = os.path.dirname(upath(__file__))
@skipUnless(has_xgettext, 'xgettext is mandatory for extraction tests')
class ExtractorTests(SimpleTestCase):
test_dir = os.path.abspath(os.path.join(this_directory, 'commands'))
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def _run_makemessages(self, **options):
os.chdir(self.test_dir)
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2,
stdout=out, **options)
output = out.getvalue()
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
return output, po_contents
def assertMsgId(self, msgid, s, use_quotes=True):
q = '"'
if use_quotes:
msgid = '"%s"' % msgid
q = "'"
needle = 'msgid %s' % msgid
msgid = re.escape(msgid)
return self.assertTrue(re.search('^msgid %s' % msgid, s, re.MULTILINE), 'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
def _assertPoLocComment(self, assert_presence, po_filename, line_number, *comment_parts):
with open(po_filename, 'r') as fp:
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
parts = ['#: ']
parts.append(os.path.join(cwd_prefix, *comment_parts))
if line_number is not None:
parts.append(':%d' % line_number)
needle = ''.join(parts)
if assert_presence:
return self.assertIn(needle, po_contents, '"%s" not found in final .po file.' % needle)
else:
return self.assertNotIn(needle, po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts):
"""
self.assertLocationCommentPresent('django.po', 42, 'dirA', 'dirB', 'foo.py')
verifies that the django.po file has a gettext-style location comment of the form
`#: dirA/dirB/foo.py:42`
(or `#: .\dirA\dirB\foo.py:42` on Windows)
None can be passed for the line_number argument to skip checking of the :42 suffix part.
"""
return self._assertPoLocComment(True, po_filename, line_number, *comment_parts)
def assertLocationCommentNotPresent(self, po_filename, line_number, *comment_parts):
"""Check the opposite of assertLocationComment()"""
return self._assertPoLocComment(False, po_filename, line_number, *comment_parts)
def assertRecentlyModified(self, path):
"""
Assert that file was recently modified (modification time was less than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertLess(delta, 10, "%s was recently modified" % path)
def assertNotRecentlyModified(self, path):
"""
Assert that file was not recently modified (modification time was more than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertGreater(delta, 10, "%s wasn't recently modified" % path)
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertIn('#. Translators: This comment should be extracted', po_contents)
self.assertNotIn('This comment should not be extracted', po_contents)
# Comments in templates
self.assertIn('#. Translators: Django template comment for translators', po_contents)
self.assertIn("#. Translators: Django comment block for translators\n#. string's meaning unveiled", po_contents)
self.assertIn('#. Translators: One-line translator comment #1', po_contents)
self.assertIn('#. Translators: Two-line translator comment #1\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #2', po_contents)
self.assertIn('#. Translators: Two-line translator comment #2\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #3', po_contents)
self.assertIn('#. Translators: Two-line translator comment #3\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #4', po_contents)
self.assertIn('#. Translators: Two-line translator comment #4\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #5 -- with non ASCII characters: áéíóúö', po_contents)
self.assertIn('#. Translators: Two-line translator comment #5 -- with non ASCII characters: áéíóúö\n#. continued here.', po_contents)
def test_templatize_trans_tag(self):
# ticket #11240
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Literal with a percent symbol at the end %%', po_contents)
self.assertMsgId('Literal with a percent %% symbol in the middle', po_contents)
self.assertMsgId('Completed 50%% of all the tasks', po_contents)
self.assertMsgId('Completed 99%% of all the tasks', po_contents)
self.assertMsgId("Shouldn't double escape this sequence: %% (two percent signs)", po_contents)
self.assertMsgId("Shouldn't double escape this sequence %% either", po_contents)
self.assertMsgId("Looks like a str fmt spec %%s but shouldn't be interpreted as such", po_contents)
self.assertMsgId("Looks like a str fmt spec %% o but shouldn't be interpreted as such", po_contents)
def test_templatize_blocktrans_tag(self):
# ticket #11966
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('I think that 100%% is more that 50%% of anything.', po_contents)
self.assertMsgId('I think that 100%% is more that 50%% of %(obj)s.', po_contents)
self.assertMsgId("Blocktrans extraction shouldn't double escape this: %%, a=%(a)s", po_contents)
def test_blocktrans_trimmed(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# should not be trimmed
self.assertNotMsgId('Text with a few line breaks.', po_contents)
# should be trimmed
self.assertMsgId("Again some text with a few line breaks, this time should be trimmed.", po_contents)
# #21406 -- Should adjust for eaten line numbers
self.assertMsgId("I'm on line 97", po_contents)
self.assertLocationCommentPresent(self.PO_FILE, 97, 'templates', 'test.html')
def test_force_en_us_locale(self):
"""Value of locale-munging option used by the command is the right one"""
from django.core.management.commands.makemessages import Command
self.assertTrue(Command.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
self.assertRaises(SyntaxError, management.call_command, 'makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
with self.assertRaises(SyntaxError) as context_manager:
management.call_command('makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
six.assertRegex(
self, str(context_manager.exception),
r'Translation blocks must not include other block tags: blocktrans \(file templates[/\\]template_with_error\.tpl, line 3\)'
)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(out.getvalue()))
def test_extraction_warning(self):
"""test xgettext warning about multiple bare interpolation placeholders"""
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("code_sample.py:4", force_text(out.getvalue()))
def test_template_message_context_extractor(self):
"""
Ensure that message contexts are correctly extracted for the
{% trans %} and {% blocktrans %} template tags.
Refs #14806.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Special trans context #1"', po_contents)
self.assertMsgId("Translatable literal #7a", po_contents)
self.assertIn('msgctxt "Special trans context #2"', po_contents)
self.assertMsgId("Translatable literal #7b", po_contents)
self.assertIn('msgctxt "Special trans context #3"', po_contents)
self.assertMsgId("Translatable literal #7c", po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context #1"', po_contents)
self.assertMsgId("Translatable literal #8a", po_contents)
self.assertIn('msgctxt "Special blocktrans context #2"', po_contents)
self.assertMsgId("Translatable literal #8b-singular", po_contents)
self.assertIn("Translatable literal #8b-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #3"', po_contents)
self.assertMsgId("Translatable literal #8c-singular", po_contents)
self.assertIn("Translatable literal #8c-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #4"', po_contents)
self.assertMsgId("Translatable literal #8d %(a)s", po_contents)
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Context wrapped in single quotes"', po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Special blocktrans context wrapped in single quotes"', po_contents)
def test_template_comments(self):
"""Template comment tags on the same line of other constructs (#19552)"""
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(
self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #1' \(file templates[/\\]comments.thtml, line 4\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #3' \(file templates[/\\]comments.thtml, line 6\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #4' \(file templates[/\\]comments.thtml, line 8\) was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertNotIn('ignored comment #1', po_contents)
self.assertNotIn('Translators: ignored i18n comment #1', po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertNotIn('ignored i18n comment #2', po_contents)
self.assertNotIn('ignored comment #2', po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertNotIn('ignored comment #3', po_contents)
self.assertNotIn('ignored i18n comment #3', po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertNotIn('ignored comment #4', po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertNotIn('ignored comment #5', po_contents)
self.assertNotIn('ignored i18n comment #4', po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertIn('#. Translators: valid i18n comment #5', po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertIn('#. Translators: valid i18n comment #6', po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertIn('#. Translators: valid i18n comment #7', po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
def test_makemessages_find_files(self):
"""
Test that find_files only discover files having the proper extensions.
"""
from django.core.management.commands.makemessages import Command
cmd = Command()
cmd.ignore_patterns = ['CVS', '.*', '*~', '*.pyc']
cmd.symlinks = False
cmd.domain = 'django'
cmd.extensions = ['html', 'txt', 'py']
cmd.verbosity = 0
cmd.locale_paths = []
cmd.default_locale_path = os.path.join(self.test_dir, 'locale')
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.py', '.html', '.txt'}), set())
cmd.extensions = ['js']
cmd.domain = 'djangojs'
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.js'}), set())
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('gettext_noop should, too.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
"""
Regression test for #23583.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
self.assertNotMsgId("Content from STATIC_ROOT should not be included", po_contents)
@override_settings(STATIC_ROOT=None, MEDIA_ROOT='')
def test_default_root_settings(self):
"""
Regression test for #23717.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
os.path.join('ignore_dir', '*'),
])
self.assertIn("ignoring directory ignore_dir", out)
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'templates/*/ignore.html',
'templates/subdir/*',
])
self.assertIn("ignoring directory subdir", out)
self.assertNotMsgId('This subdir should be ignored too.', po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'xxx_*',
])
self.assertIn("ignoring file xxx_ignored.html", out)
self.assertNotMsgId('This should be ignored too.', po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
super(SymlinkExtractorTests, self).setUp()
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# WIndows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertIn('templates_symlinked/test.html', po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertIn('Plural-Forms: nplurals=2; plural=(n != 1)', po_contents)
def test_override_plural_forms(self):
"""Ticket #20311."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should also be included wrapped or not wrapped depending on the use of the --no-wrap option.', po_contents)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('""\n"This literal should also be included wrapped or not wrapped depending on the "\n"use of the --no-wrap option."', po_contents, use_quotes=False)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
"""Behavior is correct if --no-location switch is specified. See #16903."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, 55, 'templates', 'test.html.py')
def test_no_location_disabled(self):
"""Behavior is correct if --no-location switch isn't specified."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
# #16903 -- Standard comment with source file relative path should be present
self.assertLocationCommentPresent(self.PO_FILE, 55, 'templates', 'test.html')
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
self.assertLocationCommentNotPresent(self.PO_FILE, None, 'templates', 'test.html.py')
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def setUp(self):
super(KeepPotFileExtractorTests, self).setUp()
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
class ExcludedLocaleExtractionTests(ExtractorTests):
LOCALES = ['en', 'fr', 'it']
PO_FILE = 'locale/%s/LC_MESSAGES/django.po'
test_dir = os.path.abspath(os.path.join(this_directory, 'exclude'))
def _set_times_for_all_po_files(self):
"""
Set access and modification times to the Unix epoch time for all the .po files.
"""
for locale in self.LOCALES:
os.utime(self.PO_FILE % locale, (0, 0))
def setUp(self):
super(ExcludedLocaleExtractionTests, self).setUp()
os.chdir(self.test_dir) # ExtractorTests.tearDown() takes care of restoring.
shutil.copytree('canned_locale', 'locale')
self._set_times_for_all_po_files()
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
try:
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'makemessages'])
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def test_one_locale_excluded(self):
management.call_command('makemessages', exclude=['it'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded(self):
management.call_command('makemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_one_locale_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
class CustomLayoutExtractionTests(ExtractorTests):
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.join(this_directory, 'project_dir')
def test_no_locale_raises(self):
os.chdir(self.test_dir)
with six.assertRaisesRegex(self, management.CommandError,
"Unable to find a locale path to store translations for file"):
management.call_command('makemessages', locale=LOCALE, verbosity=0)
@override_settings(
LOCALE_PATHS=(os.path.join(
this_directory, 'project_dir', 'project_locale'),)
)
def test_project_locale_paths(self):
"""
Test that:
* translations for an app containing a locale folder are stored in that folder
* translations outside of that app are in LOCALE_PATHS[0]
"""
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree,
os.path.join(settings.LOCALE_PATHS[0], LOCALE), True)
self.addCleanup(shutil.rmtree,
os.path.join(self.test_dir, 'app_with_locale', 'locale', LOCALE), True)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, 'project_locale', 'de', 'LC_MESSAGES', 'django.po')
app_de_locale = os.path.join(
self.test_dir, 'app_with_locale', 'locale', 'de', 'LC_MESSAGES', 'django.po')
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has no locale directory', po_contents)
self.assertMsgId('This is a project-level string', po_contents)
with open(app_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has a locale directory', po_contents)
|
{
"content_hash": "03bdcf2516bf7608ebd0eb5872bef051",
"timestamp": "",
"source": "github",
"line_count": 713,
"max_line_length": 194,
"avg_line_length": 45.859747545582046,
"alnum_prop": 0.6322099210960915,
"repo_name": "szopu/django",
"id": "3081776d96a269eaa1f1ba604645c78aa9cc936e",
"size": "32710",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/i18n/test_extraction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Python",
"bytes": "10219556"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
"""
Provides utility classes to develop REST-like API and to simplify the routing
of HTTP requests.
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import inspect
import re
import uuid
# Standard typing module should be optional
try:
# pylint: disable=W0611
from typing import Any, Callable, Dict, Pattern, Tuple
from pelix.http import (
AbstractHTTPServletRequest,
AbstractHTTPServletResponse,
)
except ImportError:
pass
# Pelix utility methods
from pelix.utilities import get_method_arguments
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
HTTP_ROUTE_ATTRIBUTE = "__pelix_http_route__"
"""
Name of the attribute injected in methods to indicate their configuration
"""
# Same types as in Flask:
# string accepts any text without a slash (the default)
# int accepts integers
# float like int but for floating point values
# path like the default but also accepts slashes
# uuid accepts UUID strings
# TODO: handle missing types
# any matches one of the items provided
# Type name -> regex pattern
TYPE_PATTERNS = {
"string": r"(?:[^/]+)",
"int": r"(?:[+\-]?\d+)",
"float": r"(?:[+\-]?\d+\.?\d*)",
"path": r"(?:[\w\s/]+)",
"uuid": r"(?:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}"
r"-[0-9a-fA-F]{12})",
}
TYPE_PATTERNS[None] = TYPE_PATTERNS["string"]
# Constant patterns
_MARKER_PATTERN = re.compile(r"<[^<>]*>")
_TYPED_MARKER_PATTERN = re.compile(r"<(\w+):?(\w+)?>")
def path_filter(path):
# type: (str) -> str
"""
Removes the trailing '/' of a path, if any
:param path: A parsed path
:return: The parsed path without its trailing /
"""
return path[:-1] if path and path[-1] == "/" else path
# Type name -> conversion method (for types other than str)
TYPE_CONVERTERS = {
"int": int,
"float": float,
"path": path_filter,
"uuid": uuid.UUID,
} # type: Dict[str, Callable[[str], Any]]
# ------------------------------------------------------------------------------
class Http(object):
"""
Decorator indicating which route a method handles
"""
def __init__(self, route, methods=None):
"""
:param route: Path handled by the method (beginning with a '/')
:param methods: List of HTTP methods allowed (GET, POST, ...)
"""
if not route:
# Normalize route
route = "/"
else:
# Remove surrounding spaces
route = route.strip()
if methods and not isinstance(methods, (list, tuple, set, frozenset)):
# Normalize methods
raise TypeError("methods should be a list")
self._route = route
self._methods = methods or ["GET"]
def __call__(self, decorated_method):
"""
Injects the HTTP_ROUTE_ATTRIBUTE to the decorated method to store the
description of the route
:param decorated_method: The decorated method
"""
if not inspect.isroutine(decorated_method):
raise TypeError(
"@Http can decorate only methods, not {0}".format(
type(decorated_method).__name__
)
)
try:
config = getattr(decorated_method, HTTP_ROUTE_ATTRIBUTE)
except AttributeError:
config = {}
setattr(decorated_method, HTTP_ROUTE_ATTRIBUTE, config)
# Use sets to avoid duplications
config.setdefault("routes", set()).add(self._route)
config.setdefault("methods", set()).update(self._methods)
return decorated_method
class HttpGet(Http):
"""
Decorates a method handling GET requests
"""
def __init__(self, route):
"""
:param route: Path handled by the method (beginning with a '/')
"""
super(HttpGet, self).__init__(route, methods=["GET"])
class HttpHead(Http):
"""
Decorates a method handling HEAD requests
"""
def __init__(self, route):
"""
:param route: Path handled by the method (beginning with a '/')
"""
super(HttpHead, self).__init__(route, methods=["HEAD"])
class HttpPost(Http):
"""
Decorates a method handling POST requests
"""
def __init__(self, route):
"""
:param route: Path handled by the method (beginning with a '/')
"""
super(HttpPost, self).__init__(route, methods=["POST"])
class HttpPut(Http):
"""
Decorates a method handling PUT requests
"""
def __init__(self, route):
"""
:param route: Path handled by the method (beginning with a '/')
"""
super(HttpPut, self).__init__(route, methods=["PUT"])
class HttpDelete(Http):
"""
Decorates a method handling DELETE requests
"""
def __init__(self, route):
"""
:param route: Path handled by the method (beginning with a '/')
"""
super(HttpDelete, self).__init__(route, methods=["DELETE"])
# ------------------------------------------------------------------------------
class RestDispatcher(object):
"""
Parent class for servlets: dispatches requests according to the @Http
decorator
"""
def __init__(self):
"""
Looks for the methods where to dispatch requests
"""
# HTTP verb -> route pattern -> function
self.__routes = {}
# function -> arg name -> arg converter
self.__methods_args = {}
# Find all REST methods
self._setup_rest_dispatcher()
def do_GET(self, request, response):
# pylint: disable=C0103
"""
Handles a GET request
"""
self._rest_dispatch(request, response)
def do_HEAD(self, request, response):
# pylint: disable=C0103
"""
Handles a HEAD request
"""
self._rest_dispatch(request, response)
def do_POST(self, request, response):
# pylint: disable=C0103
"""
Handles a POST request
"""
self._rest_dispatch(request, response)
def do_PUT(self, request, response):
# pylint: disable=C0103
"""
Handles a PUT request
"""
self._rest_dispatch(request, response)
def do_DELETE(self, request, response):
# pylint: disable=C0103
"""
Handles a DELETE request
"""
self._rest_dispatch(request, response)
def _rest_dispatch(self, request, response):
# type: (AbstractHTTPServletRequest, AbstractHTTPServletResponse) -> None
"""
Dispatches the request
:param request: Request bean
:param response: Response bean
"""
# Extract request information
http_verb = request.get_command()
sub_path = request.get_sub_path()
# Find the best matching method, according to the number of
# readable arguments
max_valid_args = -1
best_method = None
best_args = None
best_match = None
for route, method in self.__routes.get(http_verb, {}).items():
# Parse the request path
match = route.match(sub_path)
if not match:
continue
# Count the number of valid arguments
method_args = self.__methods_args[method]
nb_valid_args = 0
for name in method_args:
try:
match.group(name)
nb_valid_args += 1
except IndexError:
# Argument not found
pass
if nb_valid_args > max_valid_args:
# Found a better match
max_valid_args = nb_valid_args
best_method = method
best_args = method_args
best_match = match
if best_method is None:
# No match: return a 404 plain text error
response.send_content(
404,
"No method to handle path {0}".format(sub_path),
"text/plain",
)
else:
# Found a method
# ... convert arguments
kwargs = {}
if best_args:
for name, converter in best_args.items():
try:
str_value = best_match.group(name)
except IndexError:
# Argument is missing: do nothing
pass
else:
if str_value:
# Keep the default value when an argument is
# missing, i.e. don't give it in kwargs
if converter is not None:
# Convert the argument
kwargs[name] = converter(str_value)
else:
# Use the string value as is
kwargs[name] = str_value
# Prepare positional arguments
extra_pos_args = []
if kwargs:
# Ignore the first two parameters (request and response)
method_args = get_method_arguments(best_method).args[:2]
for pos_arg in method_args:
try:
extra_pos_args.append(kwargs.pop(pos_arg))
except KeyError:
pass
# ... call the method (exceptions will be handled by the server)
best_method(request, response, *extra_pos_args, **kwargs)
def _setup_rest_dispatcher(self):
"""
Finds all methods to call when handling a route
"""
for _, method in inspect.getmembers(self, inspect.isroutine):
try:
config = getattr(method, HTTP_ROUTE_ATTRIBUTE)
except AttributeError:
# Not a REST method
continue
for route in config["routes"]:
pattern, arguments = self.__convert_route(route)
self.__methods_args.setdefault(method, {}).update(arguments)
for http_verb in config["methods"]:
self.__routes.setdefault(http_verb, {})[pattern] = method
@staticmethod
def __convert_route(route):
# type: (str) -> Tuple[Pattern[str], Dict[str, Callable[[str], Any]]]
"""
Converts a route pattern into a regex.
The result is a tuple containing the regex pattern to match and a
dictionary associating arguments names and their converter (if any)
A route can be: "/hello/<name>/<age:int>"
:param route: A route string, i.e. a path with type markers
:return: A tuple (pattern, {argument name: converter})
"""
arguments = {} # type: Dict[str, Callable[[str], Any]]
last_idx = 0
final_pattern = []
match_iter = _MARKER_PATTERN.finditer(route)
for match_pattern in match_iter:
# Copy intermediate string
final_pattern.append(route[last_idx : match_pattern.start()])
last_idx = match_pattern.end() + 1
# Extract type declaration
match_type = _TYPED_MARKER_PATTERN.match(match_pattern.group())
if not match_type:
raise ValueError(
"Invalid argument declaration: {0}".format(
match_pattern.group()
)
)
name, kind = match_type.groups()
if kind:
kind = kind.lower()
# Choose a pattern for each type (can raise a KeyError)
regex = TYPE_PATTERNS[kind]
# Keep track of argument name and converter
arguments[name] = TYPE_CONVERTERS.get(kind)
# Generate the regex pattern for this part
final_pattern.append("((?P<")
final_pattern.append(match_type.group(1))
final_pattern.append(">")
final_pattern.append(regex)
final_pattern.append(")/?)?")
# Copy trailing string
final_pattern.append(route[last_idx:])
# Ensure we don't accept trailing values
final_pattern.append("$")
return re.compile("".join(final_pattern)), arguments
|
{
"content_hash": "acd334d25e91665e344aa7da1a0c2db2",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 81,
"avg_line_length": 30.707373271889402,
"alnum_prop": 0.5396563367599609,
"repo_name": "tcalmant/ipopo",
"id": "3d43dfe3e526efd1467880c6dc520b3f3e70e40e",
"size": "13381",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1",
"path": "pelix/http/routing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2183067"
}
],
"symlink_target": ""
}
|
import os
import os.path
def get_boolean(arg):
if isinstance(arg, bool):
return arg
elif isinstance(arg, str):
if arg.lower() in ['true']:
return True
elif arg.lower() in ['false']:
return False
else:
raise ValueError('%s (string) cannot be parsed to a boolean.', arg)
else:
raise ValueError('%s (%s) cannot be parsed to a boolean.', arg, type(arg))
def ensure_directory_exists(path):
try:
os.mkdir(path)
except OSError as e:
if 'File exists' in repr(e):
if not os.path.isdir(path):
raise
|
{
"content_hash": "45a6e1e090b1449e0a960dbaad21b94f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 26.375,
"alnum_prop": 0.5529225908372828,
"repo_name": "IS-ENES-Data/esgf-pid",
"id": "64022d96bd295b0559536a6ad0dad3bfb4316142",
"size": "633",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "esgfpid/utils/miscutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "774918"
}
],
"symlink_target": ""
}
|
import fnmatch
import os
import lxml.html
# Get all missing images on FS from nav's htmls
# =================================
# just some fancy colors for output
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = "\033[1m"
def disable():
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
FAIL = ''
ENDC = ''
def infog(msg):
print OKGREEN + msg + ENDC
def info(msg):
print OKBLUE + msg + ENDC
def warn(msg):
print WARNING + msg + ENDC
def err(msg):
print FAIL + 'ERROR: ' + msg + ENDC
# =================================
def main():
# nav_matches contains all found *nav*html files. Useful later for checking against opf files
nav_matches = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*nav*html'):
nav_matches.append(os.path.join(root, filename))
# Check if all files in nav files exist and check if images exist
for nav_file in nav_matches:
print '====== Analysing navigation HTML: ' + nav_file
nav_htmls = lxml.html.parse(nav_file).xpath("//a/@href")
for nav_html in nav_htmls:
nav_html = os.path.join(os.path.dirname(nav_file), nav_html)
if not(os.path.isfile(nav_html)):
err('HTML file {0} in {1} does not exist!'.format(nav_html, os.path.basename(nav_file)))
continue
#print '----- Analysing: ' + nav_html
img_links = lxml.html.parse(nav_html).xpath("//img/@src")
for img_link in img_links:
img_file = os.path.join(os.path.dirname(nav_html), img_link)
if not(os.path.isfile(img_file)):
err('Image file {0} in {1} does not exist!'.format(img_file, nav_html))
continue
# =================================
if __name__ == '__main__':
main()
|
{
"content_hash": "dd51dc66a95cb2fb216d55d5a4a27c7e",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 104,
"avg_line_length": 29.029850746268657,
"alnum_prop": 0.5465295629820052,
"repo_name": "therealmarv/epub-validator",
"id": "75d9b66b5009dfc17c3611090a245138cbe9509b",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get-all-missing-images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10111"
}
],
"symlink_target": ""
}
|
from google.cloud import automl_v1beta1 as automl
def import_dataset(
project_id="YOUR_PROJECT_ID",
dataset_id="YOUR_DATASET_ID",
path="gs://YOUR_BUCKET_ID/path/to/data.csv",
):
"""Import a dataset."""
client = automl.AutoMlClient()
# Get the full path of the dataset.
dataset_full_id = client.dataset_path(
project_id, "us-central1", dataset_id
)
# Get the multiple Google Cloud Storage URIs
input_uris = path.split(",")
gcs_source = automl.GcsSource(input_uris=input_uris)
input_config = automl.InputConfig(gcs_source=gcs_source)
# Import data from the input URI
response = client.import_data(name=dataset_full_id, input_config=input_config)
print("Processing import...")
print("Data imported. {}".format(response.result()))
# [END automl_import_data_beta]
|
{
"content_hash": "cd334b788348accd3030528ee7e0ce91",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 34.75,
"alnum_prop": 0.6786570743405276,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "d091061557cc0127302e85d7672898189934c11a",
"size": "1443",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "automl/beta/import_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
}
|
from tempest_lib.common.utils import data_utils
from tempest.api.compute import base
from tempest import test
class MultipleCreateTestJSON(base.BaseV2ComputeTest):
_name = 'multiple-create-test'
def _generate_name(self):
return data_utils.rand_name(self._name)
def _create_multiple_servers(self, name=None, wait_until=None, **kwargs):
"""
This is the right way to create_multiple servers and manage to get the
created servers into the servers list to be cleaned up after all.
"""
kwargs['name'] = kwargs.get('name', self._generate_name())
body = self.create_test_server(**kwargs)
return body
@test.idempotent_id('61e03386-89c3-449c-9bb1-a06f423fd9d1')
def test_multiple_create(self):
body = self._create_multiple_servers(wait_until='ACTIVE',
min_count=1,
max_count=2)
# NOTE(maurosr): do status response check and also make sure that
# reservation_id is not in the response body when the request send
# contains return_reservation_id=False
self.assertNotIn('reservation_id', body)
@test.idempotent_id('864777fb-2f1e-44e3-b5b9-3eb6fa84f2f7')
def test_multiple_create_with_reservation_return(self):
body = self._create_multiple_servers(wait_until='ACTIVE',
min_count=1,
max_count=2,
return_reservation_id=True)
self.assertIn('reservation_id', body)
|
{
"content_hash": "48061b538bb3241f70053f512e2a1c63",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 41.743589743589745,
"alnum_prop": 0.5939803439803439,
"repo_name": "eggmaster/tempest",
"id": "eed3be8c01d1c93c350f5978847e19b20af0b26a",
"size": "2252",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/compute/servers/test_multiple_create.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2724691"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
"""Support for iOS push notifications."""
import logging
import requests
from homeassistant.components import ios
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
BaseNotificationService,
)
from homeassistant.const import HTTP_CREATED, HTTP_TOO_MANY_REQUESTS
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
PUSH_URL = "https://ios-push.home-assistant.io/push"
# pylint: disable=invalid-name
def log_rate_limits(hass, target, resp, level=20):
"""Output rate limit log line at given level."""
rate_limits = resp["rateLimits"]
resetsAt = dt_util.parse_datetime(rate_limits["resetsAt"])
resetsAtTime = resetsAt - dt_util.utcnow()
rate_limit_msg = (
"iOS push notification rate limits for %s: "
"%d sent, %d allowed, %d errors, "
"resets in %s"
)
_LOGGER.log(
level,
rate_limit_msg,
ios.device_name_for_push_id(hass, target),
rate_limits["successful"],
rate_limits["maximum"],
rate_limits["errors"],
str(resetsAtTime).split(".", maxsplit=1)[0],
)
def get_service(hass, config, discovery_info=None):
"""Get the iOS notification service."""
if "notify.ios" not in hass.config.components:
# Need this to enable requirements checking in the app.
hass.config.components.add("notify.ios")
if not ios.devices_with_push(hass):
return None
return iOSNotificationService()
class iOSNotificationService(BaseNotificationService):
"""Implement the notification service for iOS."""
def __init__(self):
"""Initialize the service."""
@property
def targets(self):
"""Return a dictionary of registered targets."""
return ios.devices_with_push(self.hass)
def send_message(self, message="", **kwargs):
"""Send a message to the Lambda APNS gateway."""
data = {ATTR_MESSAGE: message}
# Remove default title from notifications.
if (
kwargs.get(ATTR_TITLE) is not None
and kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT
):
data[ATTR_TITLE] = kwargs.get(ATTR_TITLE)
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = ios.enabled_push_ids(self.hass)
if kwargs.get(ATTR_DATA) is not None:
data[ATTR_DATA] = kwargs.get(ATTR_DATA)
for target in targets:
if target not in ios.enabled_push_ids(self.hass):
_LOGGER.error("The target (%s) does not exist in .ios.conf", targets)
return
data[ATTR_TARGET] = target
req = requests.post(PUSH_URL, json=data, timeout=10)
if req.status_code != HTTP_CREATED:
fallback_error = req.json().get("errorMessage", "Unknown error")
fallback_message = (
f"Internal server error, please try again later: {fallback_error}"
)
message = req.json().get("message", fallback_message)
if req.status_code == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(message)
log_rate_limits(self.hass, target, req.json(), 30)
else:
_LOGGER.error(message)
else:
log_rate_limits(self.hass, target, req.json())
|
{
"content_hash": "c5cdfe673b0d37b76bb5ab23d5f75e48",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 86,
"avg_line_length": 31.84259259259259,
"alnum_prop": 0.6027915091596394,
"repo_name": "sander76/home-assistant",
"id": "a096a43ac85d4aad75cf80421093023494cc614f",
"size": "3439",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ios/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import autocomplete_light
from models import Client, RelationType
# This will generate a PersonAutocomplete class
autocomplete_light.register(Client,
# Just like in ModelAdmin.search_fields
search_fields=['^first_name', 'last_name'],
attrs={
# This will set the input placeholder attribute:
'placeholder': 'Client name ?',
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-autocomplete-minimum-characters': 1,
},
# This will set the data-widget-maximum-values attribute on the
# widget container element, and will be set to
# yourlabs.Widget.maximumValues (jQuery handles the naming
# conversion).
widget_attrs={
'data-widget-maximum-values': 4,
# Enable modern-style widget !
'class': 'modern-style',
},
)
class AutoCompleteRelationType(autocomplete_light.AutocompleteModelBase):
autocomplete_js_attributes={'placeholder': 'relation with client ...'}
def choices_for_request(self):
q = self.request.GET.get('q', '')
contact_type = self.request.GET.get('contact_type', None)
choices = self.choices.all()
if q:
choices = choices.filter(type_en__icontains=q)
if contact_type:
choices = choices.filter(contact_type=contact_type)
return self.order_choices(choices)[0:self.limit_choices]
autocomplete_light.register(RelationType, AutoCompleteRelationType)
|
{
"content_hash": "5630ca600a86d6cd94028c5ca3e97e5c",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 36.095238095238095,
"alnum_prop": 0.679419525065963,
"repo_name": "delphcf/sis",
"id": "702f4875416f58b1d84b1fbd1f1d361541f957e6",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sis/clients/autocomplete_light_registry.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "238944"
},
{
"name": "HTML",
"bytes": "152294"
},
{
"name": "JavaScript",
"bytes": "615251"
},
{
"name": "Python",
"bytes": "273835"
}
],
"symlink_target": ""
}
|
import unittest2
from uuid import uuid4
from .data import \
InMemoryDatabase, UserSpecifiedIdProvider, FileSystemDatabase, \
StringDelimitedKeyBuilder
import shutil
class InMemoryDatabaseTest(unittest2.TestCase):
def setUp(self):
self.db = InMemoryDatabase()
def set(self, k, v):
flo = self.db.write_stream(k, 'text/plain')
flo.write(v)
flo.close()
def get(self, k):
return self.db.read_stream(k)
def test_can_write_data(self):
self.set('key', b'test data')
def test_can_read_data(self):
self.set('key', b'test data')
rs = self.get('key')
self.assertEqual(b'test data', rs.read())
def test_can_overwrite_key(self):
self.set('key', b'test data')
rs = self.get('key')
self.assertEqual(b'test data', rs.read())
self.set('key', b'test data2')
rs = self.get('key')
self.assertEqual(b'test data2', rs.read())
class UserSpecifiedIdProviderTest(unittest2.TestCase):
def test_raises_when_no_key_is_provided(self):
self.assertRaises(ValueError, lambda: UserSpecifiedIdProvider())
class FileSystemDatabaseTests(unittest2.TestCase):
def setUp(self):
self._key_builder = StringDelimitedKeyBuilder()
self._path = '/tmp/{path}'.format(path=uuid4().hex)
def tearDown(self):
try:
shutil.rmtree(self._path)
except OSError:
pass
def _make_db(self, createdirs):
return FileSystemDatabase(
path=self._path,
key_builder=self._key_builder,
createdirs=createdirs)
def test_creates_path_when_asked(self):
db = self._make_db(createdirs=True)
with db.write_stream('key', 'text/plain') as s:
s.write('text')
with db.read_stream('key') as s:
self.assertEqual(b'text', s.read())
def test_does_not_create_path_when_not_asked(self):
db = self._make_db(createdirs=False)
def write():
with db.write_stream('key', 'text/plain') as f:
f.write('value')
self.assertRaises(IOError, write)
def test_key_does_not_exist_when_no_bytes_written(self):
db = self._make_db(createdirs=True)
with db.write_stream('key', 'text/plain') as s:
pass
self.assertFalse('key' in db)
def test_key_does_not_exist_when_no_zero_bytes_written(self):
db = self._make_db(createdirs=True)
with db.write_stream('key', 'text/plain') as s:
s.write('')
self.assertFalse('key' in db)
|
{
"content_hash": "b6b75c71503b578744d446a5123f9be3",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 72,
"avg_line_length": 30,
"alnum_prop": 0.6003831417624521,
"repo_name": "JohnVinyard/featureflow",
"id": "1e5e9a316a1b648fa5f47698e5199191d05e95c6",
"size": "2610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "featureflow/test_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "164752"
},
{
"name": "Shell",
"bytes": "64"
}
],
"symlink_target": ""
}
|
from behave import given
@given(u'🎸')
def step_impl(context):
"""Step implementation example with emoji(s)."""
pass
|
{
"content_hash": "2c35d06abcbf21930c95542282dc401c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 52,
"avg_line_length": 18,
"alnum_prop": 0.6666666666666666,
"repo_name": "jenisys/behave",
"id": "381d2bbdf29035b2bdb535ea63f17f2b64879d7e",
"size": "195",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "features/steps/i18n_emoji_steps.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "8799"
},
{
"name": "Python",
"bytes": "720530"
},
{
"name": "Shell",
"bytes": "272"
}
],
"symlink_target": ""
}
|
import json
import glob
import os
from tornado.httpclient import AsyncHTTPClient
from vizydrop.sdk.source import SourceFilter
from vizydrop.fields import *
class FileChooserFilter(SourceFilter):
def get_file_options(account, **kwargs):
# note that this function will be run as a Tornado coroutine (see gen.coroutine)
# but does not require the decorator
data_dir = os.path.join(os.path.dirname(__file__), 'data')
files = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
return [{"title": f, "value": f} for f in files]
file = TextField(name="File", description="Name of the flatfile to open", optional=False,
get_options=get_file_options)
|
{
"content_hash": "98d98bf680d134815eb34bbddc01e2e4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 94,
"avg_line_length": 39.05263157894737,
"alnum_prop": 0.6846361185983828,
"repo_name": "vizydrop/vizydrop-python-sdk",
"id": "23966a1d705852f715b5faa7a0138b766d88d9d5",
"size": "742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/flatfile/filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54553"
}
],
"symlink_target": ""
}
|
"""cavelanguage URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, patterns, include
from django.contrib import admin
urlpatterns = patterns('cavelanguage.views',
url(r'^$','home'),
url(r'^symbols/$','symbol_library'),
url(r'^symbol/(?P<slug>[\w-]+)/$','symbol'),
url(r'^collection/(?P<slug>[\w-]+)/$','collection'),
url(r'^collection/(?P<collection_slug>[\w-]+)/category/(?P<slug>[\w-]+)/$','category'),
url(r'^diagram/(?P<diagram_id>\d+)/(?P<diagram_slug>[\w-]+)/$','diagram'),
url(r'^diagrams/$','diagrams'),
url(r'^contributors/$','contributors'),
)
# urlpatterns = [
# url(r'^admin/', admin.site.urls),
# ]
|
{
"content_hash": "118e40301434370672316b062f8e7531",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 91,
"avg_line_length": 39.4375,
"alnum_prop": 0.6465927099841522,
"repo_name": "Axilent/cave-language",
"id": "00092841f207797599baa7196c31b36500a37d9a",
"size": "1262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cavelanguage/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "245"
},
{
"name": "HTML",
"bytes": "23813"
},
{
"name": "Python",
"bytes": "24840"
}
],
"symlink_target": ""
}
|
from gevent import monkey; monkey.patch_all()
import os
VERSION = (0, 1, 'alpha')
def get_version(version=None):
version = version or VERSION
assert(len(version) == 3)
return '%s.%s %s' % version
# The file path will have `metis.zip` in it if its being run on Spark workers.
# In that case we don't want to run the following initialization code because
# it can (and does) break things.
if 'metis.zip' in str(__file__):
app = None
else:
from flask import Flask
METIS_PATH = os.path.realpath(os.path.dirname(__file__))
app = Flask(__name__)
app.config.from_pyfile('%s/conf/default_settings.py' % METIS_PATH)
app.config['PATH'] = METIS_PATH
import metis.views # noqa
|
{
"content_hash": "e84777c2c46403f191e4a9097b8315fb",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 24.821428571428573,
"alnum_prop": 0.6834532374100719,
"repo_name": "Locu/chronology",
"id": "98ea3e2bf7ae377468dfaf39696c2325c7dd37e1",
"size": "695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metis/metis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "74202"
},
{
"name": "Go",
"bytes": "20558"
},
{
"name": "HTML",
"bytes": "26265"
},
{
"name": "JavaScript",
"bytes": "371721"
},
{
"name": "Makefile",
"bytes": "2332"
},
{
"name": "Mako",
"bytes": "816"
},
{
"name": "Nginx",
"bytes": "2785"
},
{
"name": "Python",
"bytes": "468475"
},
{
"name": "Shell",
"bytes": "3556"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='remix',
version='1.0',
description='Collection of scripts',
author='Samar',
author_email='samar@enstino.com',
packages = find_packages(),
license = 'MIT',
package_dir = {'': '.'},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development'
],
zip_safe=True,
)
|
{
"content_hash": "00dee501da2db2e481fef138ea107fb5",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 49,
"avg_line_length": 29.625,
"alnum_prop": 0.5822784810126582,
"repo_name": "samar-agrawal/remix",
"id": "00c96b71b10790b9e093afcb521f1f780c123daf",
"size": "711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "296"
},
{
"name": "Python",
"bytes": "9290"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
}
|
import mock
from neutron.agent.linux import bridge_lib
from neutron.tests import base
class BridgeLibTest(base.BaseTestCase):
"""A test suite to exercise the bridge libraries """
_NAMESPACE = 'test-namespace'
_BR_NAME = 'test-br'
_IF_NAME = 'test-if'
def setUp(self):
super(BridgeLibTest, self).setUp()
ip_wrapper = mock.patch('neutron.agent.linux.ip_lib.IPWrapper').start()
self.execute = ip_wrapper.return_value.netns.execute
def _verify_bridge_mock(self, cmd):
self.execute.assert_called_once_with(cmd, run_as_root=True)
self.execute.reset_mock()
def _verify_bridge_mock_check_exit_code(self, cmd):
self.execute.assert_called_once_with(cmd, run_as_root=True,
check_exit_code=True)
self.execute.reset_mock()
def test_is_bridged_interface(self):
exists = lambda path: path == "/sys/class/net/tapOK/brport"
with mock.patch('os.path.exists', side_effect=exists):
self.assertTrue(bridge_lib.is_bridged_interface("tapOK"))
self.assertFalse(bridge_lib.is_bridged_interface("tapKO"))
def test_get_interface_bridge(self):
with mock.patch('os.readlink', side_effect=["prefix/br0", OSError()]):
br = bridge_lib.BridgeDevice.get_interface_bridge('tap0')
self.assertIsInstance(br, bridge_lib.BridgeDevice)
self.assertEqual("br0", br.name)
br = bridge_lib.BridgeDevice.get_interface_bridge('tap0')
self.assertIsNone(br)
def _test_br(self, namespace=None):
br = bridge_lib.BridgeDevice.addbr(self._BR_NAME, namespace)
self.assertEqual(namespace, br.namespace)
self._verify_bridge_mock(['brctl', 'addbr', self._BR_NAME])
br.setfd(0)
self._verify_bridge_mock(['brctl', 'setfd', self._BR_NAME, '0'])
br.disable_stp()
self._verify_bridge_mock(['brctl', 'stp', self._BR_NAME, 'off'])
br.disable_ipv6()
cmd = 'net.ipv6.conf.%s.disable_ipv6=1' % self._BR_NAME
self._verify_bridge_mock_check_exit_code(['sysctl', '-w', cmd])
br.addif(self._IF_NAME)
self._verify_bridge_mock(
['brctl', 'addif', self._BR_NAME, self._IF_NAME])
br.delif(self._IF_NAME)
self._verify_bridge_mock(
['brctl', 'delif', self._BR_NAME, self._IF_NAME])
br.delbr()
self._verify_bridge_mock(['brctl', 'delbr', self._BR_NAME])
def test_addbr_with_namespace(self):
self._test_br(self._NAMESPACE)
def test_addbr_without_namespace(self):
self._test_br()
def test_addbr_exists(self):
self.execute.side_effect = RuntimeError()
with mock.patch.object(bridge_lib.BridgeDevice, 'exists',
return_value=True):
bridge_lib.BridgeDevice.addbr(self._BR_NAME)
bridge_lib.BridgeDevice.addbr(self._BR_NAME)
def test_owns_interface(self):
br = bridge_lib.BridgeDevice('br-int')
exists = lambda path: path == "/sys/class/net/br-int/brif/abc"
with mock.patch('os.path.exists', side_effect=exists):
self.assertTrue(br.owns_interface("abc"))
self.assertFalse(br.owns_interface("def"))
def test_get_interfaces(self):
br = bridge_lib.BridgeDevice('br-int')
interfaces = ["tap1", "tap2"]
with mock.patch('os.listdir', side_effect=[interfaces, OSError()]):
self.assertEqual(interfaces, br.get_interfaces())
self.assertEqual([], br.get_interfaces())
|
{
"content_hash": "e4f2624be828fbafac24ad138c991107",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 38.67741935483871,
"alnum_prop": 0.6105087572977481,
"repo_name": "cloudbase/neutron",
"id": "10b8d2cfe69274c47b5b130bb0efc0d93997ad1e",
"size": "4355",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/agent/linux/test_bridge_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9942988"
},
{
"name": "Shell",
"bytes": "14325"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import responses
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from sentry.testutils import APITestCase
from sentry.utils import json
class SentryAppInstallationExternalRequestsEndpointTest(APITestCase):
def setUp(self):
self.user = self.create_user(email="boop@example.com")
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org)
self.sentry_app = self.create_sentry_app(
name="Testin", organization=self.org, webhook_url="https://example.com"
)
self.install = self.create_sentry_app_installation(
organization=self.org, slug=self.sentry_app.slug, user=self.user
)
self.url = reverse(
"sentry-api-0-sentry-app-installation-external-requests", args=[self.install.uuid]
)
@responses.activate
def test_makes_external_request(self):
self.login_as(user=self.user)
options = [{"label": "Project Name", "value": "1234"}]
responses.add(
method=responses.GET,
url=u"https://example.com/get-projects?projectSlug={}&installationId={}&query=proj".format(
self.project.slug, self.install.uuid
),
json=options,
status=200,
content_type="application/json",
match_querystring=True,
)
url = self.url + u"?projectId={}&uri={}&query={}".format(
self.project.id, "/get-projects", "proj"
)
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data == {"choices": [["1234", "Project Name"]]}
@responses.activate
def test_makes_external_request_with_dependent_data(self):
self.login_as(user=self.user)
options = [{"label": "Project Name", "value": "1234"}]
query = {
"projectSlug": self.project.slug,
"installationId": self.install.uuid,
"query": "proj",
"dependentData": json.dumps({"org_id": "A"}),
}
responses.add(
method=responses.GET,
url=u"https://example.com/get-projects?%s" % urlencode(query),
json=options,
status=200,
content_type="application/json",
match_querystring=True,
)
query = {
"projectId": self.project.id,
"uri": "/get-projects",
"query": "proj",
"dependentData": json.dumps({"org_id": "A"}),
}
url = u"%s?%s" % (self.url, urlencode(query))
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data == {"choices": [["1234", "Project Name"]]}
@responses.activate
def test_external_request_fails(self):
self.login_as(user=self.user)
responses.add(
method=responses.GET,
url=u"https://example.com/get-projects?installationId={}".format(
self.project.slug, self.install.uuid
),
status=500,
content_type="application/json",
)
url = self.url + u"?uri={}".format(self.project.id, "/get-projects")
response = self.client.get(url, format="json")
assert response.status_code == 400
|
{
"content_hash": "33a5d563b8fcd638d9afeecef6af8bf5",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 103,
"avg_line_length": 37.108695652173914,
"alnum_prop": 0.5837727006444053,
"repo_name": "beeftornado/sentry",
"id": "9203053e149be606468f0561b67c8320f2792568",
"size": "3414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/api/endpoints/test_sentry_app_installation_external_requests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
"""Utility for eagerly executing operations in parallel on multiple devices."""
import threading
import weakref
from tensorflow.python import _pywrap_parallel_device
from tensorflow.python.distribute import device_util
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import nest
_next_device_number = 0
_next_device_number_lock = threading.Lock()
_all_parallel_devices = weakref.WeakValueDictionary()
def unpack(tensor):
"""Finds `tensor`'s parallel device and unpacks its components."""
parallel_device = _all_parallel_devices.get(tensor.device, None)
if parallel_device is None:
raise ValueError("{} is not a parallel device".format(tensor.device))
return parallel_device.unpack(tensor)
# TODO(allenl): Expand this docstring once things like getting components on and
# off the device are stable.
#
# TODO(allenl): Make multi-client work; we need an offset for device IDs, and an
# indication of how many other devices there are total for collectives which
# don't have a number of participants hard-coded in their attributes.
class ParallelDevice(object):
"""A device which executes operations in parallel."""
def __init__(self, components):
"""Creates a device which executes operations in parallel on `components`.
Args:
components: A list of device names. Each operation executed on the
returned device executes on these component devices.
Returns:
A string with the name of the newly created device.
"""
global _next_device_number, _next_device_number_lock
self.components = tuple(device_util.canonicalize(d) for d in components)
if not self.components:
raise ValueError("ParallelDevice requires at least one component.")
ctx = context.context()
with _next_device_number_lock:
# TODO(allenl): Better names for parallel devices (right now "CUSTOM" is
# special-cased).
self._name = "{}/device:CUSTOM:{}".format(ctx.host_address_space(),
_next_device_number)
_next_device_number += 1
device, device_info = _pywrap_parallel_device.GetParallelDeviceCapsules(
self._name, self.components)
context.register_custom_device(device, self._name, device_info)
self._device_ids = None
self._device_scope = None
_all_parallel_devices[self._name] = self
def _pack_tensor(self, *tensors):
"""Helper to pack plain-old-tensors, not structures or composites."""
for tensor in tensors:
if not isinstance(tensor, (ops.Tensor, composite_tensor.CompositeTensor,
variables.Variable)):
raise ValueError(
("Every component to pack onto the ParallelDevice must already be "
"a tensor, got {}. Consider running `tf.constant` or "
"`tf.convert_to_tensor` first on literal values.")
.format(tensors))
with ops.device(None):
# Explicitly read variable values. This can not be done on the parallel
# device since the tensors are to be packed.
tensors = [t.read_value() if isinstance(t, variables.Variable)
else t for t in tensors]
with ops.device(self._name):
return tpu_ops.tpu_replicated_input(inputs=tensors)
def pack(self, tensors):
"""Create a tensor on the parallel device from a sequence of tensors.
Args:
tensors: A list of tensors, one per device in `self.components`. The list
can contain composite tensors and nests (lists, dicts, etc. supported by
`tf.nest`) with the same structure for each device, but every component
of nests must already be a `tf.Tensor` or composite. Passing
`tf.Variable` objects reads their value, it does not share a mutable
reference between the packed and unpacked forms.
Returns:
A tensor placed on the ParallelDevice. For nested structures, returns a
single structure containing tensors placed on the ParallelDevice (same
structure as each component of `tensors`).
Raises:
ValueError: If the length of `tensors` does not match the number of
component devices, or if there are non-tensor inputs.
"""
self._assert_eager()
if len(tensors) != len(self.components):
raise ValueError(
("Creating a parallel tensor requires one tensor per component. "
"Got {} but was expecting {}.")
.format(len(tensors), len(self.components)))
return nest.map_structure(self._pack_tensor, *tensors,
expand_composites=True)
def _unpack_tensor(self, parallel_tensor):
"""Helper to unpack a single tensor."""
if not isinstance(parallel_tensor, (
ops.Tensor, composite_tensor.CompositeTensor, variables.Variable)):
raise ValueError(
"Expected a tensor, got {}.".format(parallel_tensor))
with ops.device(self._name):
return tpu_ops.tpu_replicated_output(
parallel_tensor, num_replicas=len(self.components))
def unpack(self, parallel_tensor):
"""Unpack a parallel tensor into its components.
Args:
parallel_tensor: A tensor, composite tensor, or `tf.nest` of such placed
on the ParallelDevice. Passing `tf.Variable` objects reads their value,
it does not share a mutable reference between the packed and unpacked
forms.
Returns:
A list with the same length as `self.components` each with the same
structure as `parallel_tensor`, containing component tensors.
"""
self._assert_eager()
unpacked_components = [[] for _ in range(len(self.components))]
for tensor in nest.flatten(parallel_tensor, expand_composites=True):
for accumulator, unpacked_tensor in zip(
unpacked_components, self._unpack_tensor(tensor)):
accumulator.append(unpacked_tensor)
return [nest.pack_sequence_as(parallel_tensor, unpacked,
expand_composites=True)
for unpacked in unpacked_components]
@property
def device_ids(self):
"""A parallel tensor with scalar integers numbering component devices.
Each device ID is placed on its corresponding device, in the same order as
the `components` constructor argument.
Returns:
A parallel tensor containing 0 on the first device, 1 on the second, etc.
"""
if self._device_ids is None:
# device_ids may be called from inside a tf.function, in which case the
# function captures the eager tensor. We can't pack tensors in a function
# at the moment, and even if we could we don't want to hold on to a
# symbolic tensor, so we need to init_scope out of the function
# temporarily.
with ops.init_scope():
# TODO(allenl): Functions which capture eager device ID tensors won't be
# saveable in SavedModels. Ideally we'd run a DeviceID op every time
# device IDs are required, with functions using the op in their bodies
# but not hard-coding a fixed number of devices (so they can be re-used
# with a different replica count).
device_ids_list = []
for index, device in enumerate(self.components):
with ops.device(device):
# The identity op ensures each device ID tensor is placed on its
# device.
device_ids_list.append(
array_ops.identity(constant_op.constant(index)))
self._device_ids = self.pack(device_ids_list)
return self._device_ids
def _assert_eager(self):
"""Verifies that tracing is not active."""
if not context.executing_eagerly():
raise NotImplementedError(
"ParallelDevice is currently not supported inside `tf.function`. It "
"can however run calls to a `tf.function` in parallel:\n\n"
"with ParallelDevice() as p:\n f()")
def __enter__(self):
"""Runs ops in parallel, makes variables which save independent buffers."""
if self._device_scope is not None:
raise AssertionError(
"Re-entered a ParallelDevice scope without first exiting it.")
self._assert_eager()
self._device_scope = ops.device(self._name)
self._device_scope.__enter__()
return self
def __exit__(self, typ, exc, tb):
self._device_scope.__exit__(typ, exc, tb)
self._device_scope = None
|
{
"content_hash": "301a3c32edda0ccbca1fb80af9f97dfe",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 42.41379310344828,
"alnum_prop": 0.6797909407665506,
"repo_name": "Intel-Corporation/tensorflow",
"id": "6da7c2fd078dc75e1a75481a499765bb8f61629c",
"size": "9299",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/parallel_device/parallel_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
import datetime
import logging
import simplejson
from oauth import oauth
from django.conf import settings
from django.core import mail
from google.appengine.api import images
from common import api
from common import exception
from common import mail as common_mail
from common import models
from common import oauth_util
from common import profile
from common.protocol import sms
from common.test import base
class ApiIntegrationTest(base.FixturesTestCase):
def setUp(self):
super(ApiIntegrationTest, self).setUp()
self.post_params = {"method": "post",
"nick": "popular@example.com",
"message": "message_1",
}
self.url = "http://%s/api/json" % settings.DOMAIN
self.old_utcnow = api.utcnow
self.now = api.utcnow()
def tearDown(self):
api.utcnow = self.old_utcnow
settings._target = None
super(ApiIntegrationTest, self).tearDown()
def get(self, url, parameters={}):
response = self.client.get(url, parameters)
try:
rv = simplejson.loads(response.content)
return rv
except ValueError, e:
print response.content
raise
def test_json_no_method(self):
rv = self.get('/api/json')
self.assertEqual(rv['status'], 'error')
self.assertEqual(rv['code'], exception.NO_METHOD)
def test_json_invalid_method(self):
rv = self.get('/api/json', {"method": "INVALID_METHOD"})
self.assertEqual(rv['status'], 'error')
self.assertEqual(rv['code'], exception.INVALID_METHOD)
def test_json_invalid_args(self):
settings.API_DISABLE_VERIFICATION = True
rv = self.get('/api/json', {"method": "actor_get"})
self.assertEqual(rv['status'], 'error')
self.assertEqual(rv['code'], exception.INVALID_ARGUMENTS)
def test_json_invalid_oauth_request_no_oauth(self):
settings.API_DISABLE_VERIFICATION = False
rv = self.get('/api/json', {"method": "post"})
self.assertEqual(rv['status'], 'error')
# TODO(termie): update this when we have an error code, it is no longer
# an OAUTH_ERROR
#self.assertEqual(rv['code'], exception.OAUTH_ERROR)
def test_json_invalid_oauth_request_bad_tokens(self):
settings.API_DISABLE_VERIFICATION = False
settings.API_ALLOW_ROOT_PLAINTEXT = True
sig_method = oauth_util.PLAINTEXT
# test invalid consumer
bad_consumer = oauth.OAuthConsumer('BAD_KEY', 'BAD_SECRET')
request = oauth.OAuthRequest.from_consumer_and_token(
bad_consumer,
oauth_util.ROOT_TOKEN,
http_url=self.url,
parameters=self.post_params)
request.sign_request(sig_method, bad_consumer, oauth_util.ROOT_TOKEN)
rv = self.get('/api/json', request.parameters)
self.assertEqual(rv['status'], 'error')
self.assertEqual(rv['code'], exception.OAUTH_ERROR)
# test invalid access token
pass
def test_json_hmacsha1_root_access(self):
settings.API_DISABLE_VERIFICATION = False
settings.API_ALLOW_ROOT_HMAC_SHA1 = False
def _req():
request = oauth.OAuthRequest.from_consumer_and_token(
oauth_util.ROOT_CONSUMER,
oauth_util.ROOT_TOKEN,
http_url=self.url,
parameters=self.post_params,
)
request.sign_request(oauth_util.HMAC_SHA1,
oauth_util.ROOT_CONSUMER,
oauth_util.ROOT_TOKEN
)
rv = self.get('/api/json', request.parameters)
return rv
# fail
rv = _req()
self.assertEqual(rv['status'], 'error', "root hmac_sha1 should be disabled")
self.assertEqual(rv['code'], exception.OAUTH_ERROR)
# succeed
settings.API_ALLOW_ROOT_HMAC_SHA1 = True
rv = _req()
self.assertEqual(rv['status'], 'ok', str(rv))
def test_json_plaintext_root_access(self):
settings.API_DISABLE_VERIFICATION = False
settings.API_ALLOW_ROOT_PLAINTEXT = False
def _req():
request = oauth.OAuthRequest.from_consumer_and_token(
oauth_util.ROOT_CONSUMER,
oauth_util.ROOT_TOKEN,
http_url=self.url,
parameters=self.post_params,
)
request.sign_request(oauth_util.PLAINTEXT,
oauth_util.ROOT_CONSUMER,
oauth_util.ROOT_TOKEN
)
rv = self.get('/api/json', request.parameters)
return rv
# fail
rv = _req()
self.assertEqual(rv['status'], 'error', "root plaintext should be disabled")
self.assertEqual(rv['code'], exception.OAUTH_ERROR)
# succeed
settings.API_ALLOW_PLAINTEXT = True
settings.API_ALLOW_ROOT_PLAINTEXT = True
rv = _req()
self.assertEqual(rv['status'], 'ok', str(rv))
def test_json_params(self):
settings.API_DISABLE_VERIFICATION = True
params = { 'json_params': simplejson.dumps(self.post_params) }
rv = self.get('/api/json', params)
self.assertEqual(rv['status'], 'ok', str(rv))
def test_presence_get_contacts(self):
settings.API_DISABLE_VERIFICATION = True
timestamp1 = datetime.datetime(2007, 01, 01, 02, 03, 04)
timestamp2 = datetime.datetime(2008, 01, 01, 02, 03, 04)
timestamp_between = datetime.datetime(2007, 06, 01, 02, 03, 04)
api.utcnow = lambda: timestamp1
rv = self.get('/api/json', {'method': 'presence_set',
'nick': 'root@example.com',
'status': 'bar'})
api.utcnow = lambda: timestamp2
rv = self.get('/api/json', {'method': 'presence_set',
'nick': 'celebrity@example.com',
'status': 'baz'})
rv = self.get('/api/json', {'method': 'presence_get_contacts',
'nick': 'popular@example.com',
'since_time': str(timestamp_between)})
self.assertEqual(rv.get('servertime', ''), str(timestamp2), str(rv))
self.assertEqual(len(rv.get('rv', {}).get('contacts', [])), 2, str(rv))
self.assertEqual(rv['rv']['contacts'][0]['presence'].get('extra', {}).get('status', ''),
'baz', str(rv))
self.assertEqual(rv['rv']['contacts'][0]['presence'].get('extra', {}).get('given_name', ''),
'Cele', str(rv))
self.assertEqual(rv['rv']['contacts'][0]['presence'].get('extra', {}).get('family_name', ''),
'Brity', str(rv))
rv = self.get('/api/json', {'method': 'presence_get_contacts',
'nick': 'popular@example.com',
'since_time': str(timestamp2 +
datetime.timedelta(0, 1))})
self.assertEqual(len(rv.get('rv', {}).get('contacts', [])), 1, str(rv))
class ApiUnitTest(base.FixturesTestCase):
"""a plethora of tests to make sure all the interfaces keep working """
unpopular_nick = 'unpopular@example.com'
popular_nick = 'popular@example.com'
celebrity_nick = 'celebrity@example.com'
hermit_nick = 'hermit@example.com'
nonexist_nick = 'nonexist@example.com'
annoying_nick = 'annoying@example.com'
obligated_nick = 'obligated@example.com'
root_nick = 'root@example.com'
def setUp(self):
super(ApiUnitTest, self).setUp()
self.popular = api.actor_get(api.ROOT, self.popular_nick)
self.hermit = api.actor_get(api.ROOT, self.hermit_nick)
models.CachingModel.reset_cache()
models.CachingModel.enable_cache(True)
def tearDown(self):
super(ApiUnitTest, self).tearDown()
models.CachingModel.enable_cache(False)
def _exercise_permissions(self, f, expected, *args, **kw):
"""helper to run through the 4 permissions on a target"""
for k, v in expected.iteritems():
self._pre_setup() # Reinit the datastore
self.setUp() # Standard setup
threw = False
try:
temp_actor = models.Actor(nick='root@example.com')
temp_actor.access_level = k
f(temp_actor, *args, **kw)
except exception.ApiException, e:
threw = True
perm_error = (e.code == exception.PERMISSION_ERROR)
matched = (v != perm_error)
self.assert_(matched, "Permissions %s, %s" % (k, v))
except:
# No 'finally' in 2.4
self.tearDown()
raise
# no 'else' in 2.4
if not threw:
self.assert_(v, "Permissions %s, %s" % (k, v))
self.tearDown()
class ApiUnitTestBasic(ApiUnitTest):
def test_actor_get(self):
# root public case
root_public = api.actor_get(api.ROOT, self.popular_nick)
self.assertEqual(root_public.nick, self.popular_nick)
# root private case
root_private = api.actor_get(api.ROOT, self.celebrity_nick)
self.assertEqual(root_private.nick, self.celebrity_nick)
# contact public case
contact_public = api.actor_get(self.popular, self.popular_nick)
self.assertEqual(contact_public.nick, self.popular_nick)
# contact private case
contact_private = api.actor_get(self.popular, self.celebrity_nick)
self.assertEqual(contact_private.nick, self.celebrity_nick)
# test user public case
test_public = api.actor_get(self.hermit, self.popular_nick)
self.assertEqual(test_public.nick, self.popular_nick)
# test perms
self._exercise_permissions(api.actor_get,
{'read': True,
'write': True,
'delete': True,
'admin': True},
self.popular_nick)
def test_actor_get_actors(self):
popular_nicks = [self.popular_nick, self.unpopular_nick]
half_nicks = [self.popular_nick, self.celebrity_nick]
celebrity_nicks = [self.celebrity_nick]
# root public case
root_public = api.actor_get_actors(api.ROOT, popular_nicks)
self.assertEqual(len(root_public), 2)
# root half case
root_half = api.actor_get_actors(api.ROOT, half_nicks)
self.assertEqual(len(root_half), 2)
# root private case
root_private = api.actor_get_actors(api.ROOT, celebrity_nicks)
self.assertEqual(len(root_private), 1)
# contact public case
contact_public = api.actor_get_actors(self.popular, popular_nicks)
self.assertEqual(len(contact_public), 2)
# contact half case
contact_half = api.actor_get_actors(self.popular, half_nicks)
self.assertEqual(len(contact_half), 2)
# contact private case
contact_private = api.actor_get_actors(self.popular, celebrity_nicks)
self.assertEqual(len(contact_private), 1)
# test public case
test_public = api.actor_get_actors(self.hermit, popular_nicks)
self.assertEqual(len(test_public), 2)
# test perms
self._exercise_permissions(api.actor_get_actors,
{'read': True,
'write': True,
'delete': True,
'admin': True},
[self.popular_nick])
def test_actor_has_contact(self):
# root public case
root_public = api.actor_has_contact(api.ROOT, self.popular_nick,
self.celebrity_nick) # YES
self.assert_(root_public)
root_private = api.actor_has_contact(api.ROOT, self.celebrity_nick,
self.popular_nick) # YES
self.assert_(root_private)
root_notcontact = api.actor_has_contact(api.ROOT, self.popular_nick,
self.unpopular_nick) # NO
self.assert_(not root_notcontact)
# test perms
self._exercise_permissions(api.actor_has_contact,
{'read': True,
'write': True,
'delete': True,
'admin': True},
self.popular_nick,
self.celebrity_nick)
def test_actor_add_contact(self):
# Make sure it works between local users though
actor_before = api.actor_get(api.ROOT, self.popular_nick)
other_before = api.actor_get(api.ROOT, self.unpopular_nick)
contacts_before = actor_before.extra['contact_count']
followers_before = other_before.extra['follower_count']
is_contact_before = api.actor_has_contact(api.ROOT,
self.popular_nick,
self.unpopular_nick)
self.assert_(not is_contact_before)
api.actor_add_contact(api.ROOT, self.popular_nick, self.unpopular_nick)
actor_after = api.actor_get(api.ROOT, self.popular_nick)
other_after = api.actor_get(api.ROOT, self.unpopular_nick)
contacts_after = actor_after.extra['contact_count']
followers_after = other_after.extra['follower_count']
is_contact_after = api.actor_has_contact(api.ROOT,
self.popular_nick,
self.unpopular_nick)
self.assertEqual(contacts_before + 1, contacts_after, "contacts count")
self.assertEqual(followers_before + 1, followers_after, "followers count")
self.assert_(is_contact_after)
# Make sure we can repeat the whole process without nasty sideeffects
actor_before = api.actor_get(api.ROOT, self.popular_nick)
other_before = api.actor_get(api.ROOT, self.unpopular_nick)
contacts_before = actor_before.extra['contact_count']
followers_before = other_before.extra['follower_count']
is_contact_before = api.actor_has_contact(api.ROOT,
self.popular_nick,
self.unpopular_nick)
self.assert_(is_contact_before)
api.actor_add_contact(api.ROOT, self.popular_nick, self.unpopular_nick)
actor_after = api.actor_get(api.ROOT, self.popular_nick)
other_after = api.actor_get(api.ROOT, self.unpopular_nick)
contacts_after = actor_after.extra['contact_count']
followers_after = other_after.extra['follower_count']
is_contact_after = api.actor_has_contact(api.ROOT,
self.popular_nick,
self.unpopular_nick)
self.assertEqual(contacts_before, contacts_after, "contacts count")
self.assertEqual(followers_before, followers_after, "followers count")
self.assert_(is_contact_after)
# Make sure it requires write permissions
self._exercise_permissions(api.actor_add_contact,
{'read': False,
'write': True,
'delete': True,
'admin': True},
self.popular_nick,
self.unpopular_nick)
# Make sure we can't modify other people
def _modify_other():
api.actor_add_contact(self.hermit, self.popular_nick,
self.unpopular_nick)
self.assertRaises(exception.ApiException, _modify_other)
# TODO notification checks
def test_actor_remove_contact(self):
# must be a contact
def _not_a_contact():
api.actor_remove_contact(api.ROOT, self.popular_nick, self.unpopular_nick)
self.assertRaises(exception.ApiException, _not_a_contact)
# test perms
self._exercise_permissions(api.actor_remove_contact,
{'read': False,
'write': False,
'delete': True,
'admin': True},
self.unpopular_nick,
self.popular_nick)
def test_actor_add_contact_subscriptions(self):
# test that after we add a public user we are subscribed to
# all their streams
subscriber = api.actor_get(api.ROOT, self.popular_nick)
subscriber_inbox = "inbox/%s/overview" % subscriber.nick
public_target = api.actor_get(api.ROOT, self.unpopular_nick)
public_streams = api.stream_get_actor(api.ROOT, self.unpopular_nick)
self.assert_(len(public_streams))
api.actor_add_contact(api.ROOT, subscriber.nick, public_target.nick)
for stream in public_streams:
self.assert_(api.subscription_is_active(api.ROOT,
stream.key().name(),
subscriber_inbox))
# test that after we add a contacts only user we have created
# pending subscriptions for all their streams
private_target = api.actor_get(api.ROOT, self.hermit_nick)
private_streams = api.stream_get_actor(api.ROOT, private_target.nick)
self.assert_(len(private_streams))
api.actor_add_contact(api.ROOT, subscriber.nick, private_target.nick)
for stream in private_streams:
self.assert_(api.subscription_exists(api.ROOT,
stream.key().name(),
subscriber_inbox))
self.assert_(not api.subscription_is_active(api.ROOT,
stream.key().name(),
subscriber_inbox))
# test that after a contacts-only user adds a user their subscriptions
# that were marked as pending are allowed
# XXX this test relies on the results of the previous test being correct
subscriber_streams = api.stream_get_actor(api.ROOT, subscriber.nick)
self.assert_(len(subscriber_streams))
api.actor_add_contact(api.ROOT, private_target.nick, subscriber.nick)
private_inbox = "inbox/%s/overview" % private_target.nick
for stream in subscriber_streams:
self.assert_(api.subscription_is_active(api.ROOT,
stream.key().name(),
private_inbox))
for stream in private_streams:
self.assert_(api.subscription_is_active(api.ROOT,
stream.key().name(),
subscriber_inbox))
# test that after we remove a user we are unsubscribed
# from all their feeds
api.actor_remove_contact(api.ROOT, subscriber.nick, public_target.nick)
for stream in public_streams:
self.assert_(not api.subscription_exists(api.ROOT,
stream.key().name(),
subscriber_inbox))
# test that after a private user removes a user they are
# unsubscribed from all their feeds
api.actor_remove_contact(api.ROOT, private_target.nick, subscriber.nick)
for stream in subscriber_streams:
self.assert_(not api.subscription_exists(api.ROOT,
stream.key().name(),
private_inbox))
for stream in private_streams:
self.assert_(not api.subscription_is_active(api.ROOT,
stream.key().name(),
subscriber_inbox))
self.assert_(api.subscription_exists(api.ROOT,
stream.key().name(),
subscriber_inbox))
def test_login_forgot_email(self):
api.login_forgot(api.ROOT, self.celebrity_nick)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Password reset')
self.assertTrue(mail.outbox[0].body,
'password has been reset.' > 0)
def test_invite_request_email(self):
api.invite_request_email(api.ROOT, self.celebrity_nick, 'foo@bar.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,
'Cele Brity invited you to %s' % (settings.SITE_NAME))
self.assertTrue(mail.outbox[0].body,
'Cele Brity (celebrity) has invited you to join %s' % (settings.SITE_NAME))
class ApiUnitTestSubscriptions(ApiUnitTest):
def test_subscription_request(self):
topic = "stream/%s/presence"
inbox = "inbox/%s/overview"
# local to public local
local_public = api.subscription_request(self.popular,
topic % self.unpopular_nick,
inbox % self.popular_nick)
self.assertEqual(local_public.state, 'subscribed')
# local to private local
local_private = api.subscription_request(self.popular,
topic % self.hermit_nick,
inbox % self.popular_nick)
self.assertEqual(local_private.state, 'pending')
# local to nonexist local
def _local_nonexist():
local_public = api.subscription_request(self.popular,
topic % self.nonexist_nick,
inbox % self.popular_nick)
self.assertRaises(exception.ApiException, _local_nonexist)
# local to private already contact
local_already = api.subscription_request(self.popular,
topic % self.annoying_nick,
inbox % self.popular_nick)
self.assertEqual(local_already.state, 'subscribed')
# can't add subscription with other user's target
def _local_sneaky():
local_sneaky = api.subscription_request(self.popular,
topic % self.unpopular_nick,
inbox % self.unpopular_nick)
self.assertRaises(exception.ApiException, _local_sneaky)
def test_post(self):
public_actor = api.actor_get(api.ROOT, self.popular_nick)
private_actor = api.actor_get(api.ROOT, self.hermit_nick)
test_message = "test message"
public_stream = 'stream/%s/presence' % public_actor.nick
# minimal posting
entry_ref = api.post(public_actor,
nick=public_actor.nick,
message=test_message)
self.exhaust_queue_any()
self.assert_(entry_ref)
self.assert_(entry_ref.uuid)
entry_ref_from_uuid = api.entry_get_uuid(public_actor, entry_ref.uuid)
self.assert_(entry_ref_from_uuid)
entry_key = entry_ref.key().name()
entry_ref_from_key = api.entry_get(public_actor, entry_key)
for check_ref in (entry_ref, entry_ref_from_uuid, entry_ref_from_key):
self.assertEqual(check_ref.stream,
public_stream)
self.assertEqual(check_ref.owner,
public_actor.nick)
self.assertEqual(check_ref.actor,
public_actor.nick)
self.assertEqual(entry_ref.stream, check_ref.stream)
self.assertEqual(entry_ref.key(), check_ref.key())
# prevent duplicates via uuid
def _duplicate_uuid_same_actor():
api.post(public_actor,
nick=public_actor.nick,
message=test_message,
uuid=entry_ref.uuid)
self.assertRaises(exception.ApiException, _duplicate_uuid_same_actor)
def _duplicate_uuid_different_actor():
api.post(private_actor,
nick=private_actor.nick,
message=test_message,
uuid=entry_ref.uuid)
self.assertRaises(exception.ApiException, _duplicate_uuid_different_actor)
# prevent non-owner
def _nonowner_nick():
api.post(public_actor,
nick=private_actor.nick,
message=test_message,
)
self.assertRaises(exception.ApiException, _nonowner_nick)
# prevent invalid nick
def _invalid_nick(nick):
api.post(public_actor,
nick=nick,
message=test_message,
)
for nick in ('!@#', ):
self.assertRaises((exception.ApiException, exception.ValidationError),
_invalid_nick, nick)
# prevent unknown nick
def _unknown_nick():
api.post(public_actor,
nick=self.nonexist_nick,
message=test_message,
)
self.assertRaises(exception.ApiException, _unknown_nick)
# prevent invalid message
def _invalid_message(msg):
api.post(public_actor,
nick=public_actor.nick,
message=msg,
)
for msg in ('', ' '):
self.assertRaises(exception.ApiException, _invalid_message, msg)
# message is viewable to all active subscribers
public_subscribers = api.subscription_get_topic(api.ROOT, public_stream)
inbox_key = 'inboxentry/%s' % entry_key
for sub in public_subscribers:
sub_ref = api.actor_get(api.ROOT, sub.subscriber)
overview_inbox = api.inbox_get_actor_overview(sub_ref, sub_ref.nick)
self.assertEqual(overview_inbox[0], entry_key)
# private posting creates a message
private_stream = 'stream/%s/presence' % private_actor.nick
other_public_actor_ref = api.actor_get(api.ROOT, 'unpopular@example.com')
# create an active subscription
api.actor_add_contact(public_actor,
public_actor.nick,
private_actor.nick
)
api.actor_add_contact(private_actor,
private_actor.nick,
public_actor.nick,
)
# and an inactive one
api.actor_add_contact(other_public_actor_ref,
other_public_actor_ref.nick,
private_actor.nick)
# minimal posting
entry_ref = api.post(private_actor,
nick=private_actor.nick,
message=test_message)
self.exhaust_queue_any()
self.assert_(entry_ref)
self.assert_(entry_ref.uuid)
entry_key = entry_ref.key().name()
def _private_entry_from_uuid():
entry_ref_from_uuid = api.entry_get_uuid(other_public_actor_ref,
entry_ref.uuid)
self.assertRaises(exception.ApiException, _private_entry_from_uuid)
def _private_entry_from_key():
entry_ref_from_key = api.entry_get(other_public_actor_ref, entry_key)
self.assertRaises(exception.ApiException, _private_entry_from_key)
entry_ref_from_uuid = api.entry_get_uuid(public_actor, entry_ref.uuid)
entry_ref_from_key = api.entry_get(public_actor, entry_key)
entry_ref_from_uuid_self = api.entry_get_uuid(private_actor, entry_ref.uuid)
entry_ref_from_key_self = api.entry_get(private_actor, entry_key)
for check_ref in (entry_ref,
entry_ref_from_uuid,
entry_ref_from_key,
entry_ref_from_uuid_self,
entry_ref_from_key_self):
self.assertEqual(check_ref.stream,
private_stream)
self.assertEqual(check_ref.owner,
private_actor.nick)
self.assertEqual(check_ref.actor,
private_actor.nick)
self.assertEqual(entry_ref.stream, check_ref.stream)
self.assertEqual(entry_ref.key(), check_ref.key())
# message is not viewable to all inactive subscribers
private_subscribers = api.subscription_get_topic(api.ROOT, private_stream)
inbox_key = 'inboxentry/%s' % entry_key
self.assert_(private_subscribers)
for sub in private_subscribers:
sub_ref = api.actor_get(api.ROOT, sub.subscriber)
overview_inbox = api.inbox_get_actor_overview(sub_ref, sub_ref.nick)
if sub.state == 'pending':
self.assertNotEqual(overview_inbox[0], entry_key)
elif sub.state == 'subscribed':
self.assertEqual(overview_inbox[0], entry_key)
# thumbnail_url
entry_ref = api.post(public_actor,
nick=public_actor.nick,
message=test_message,
thumbnail_url='http://flickr.com/')
self.assertEqual(entry_ref.extra['thumbnail_url'], 'http://flickr.com/')
pass
def test_post_channel(self):
# test that using the hash (#) notation posts to a channel
pass
def test_post_reply(self):
# test that using the at (@) notation attempts to reply
pass
def test_comment(self):
""" test that commentors are subscribed to further comments on posts
they have commented on
"""
popular_ref = api.actor_get(api.ROOT, 'popular')
celebrity_ref = api.actor_get(api.ROOT, 'celebrity')
unpopular_ref = api.actor_get(api.ROOT, 'unpopular')
hermit_ref = api.actor_get(api.ROOT, 'hermit')
# an entry with no comments
entry_ref = api.entry_get(
api.ROOT, 'stream/popular@example.com/presence/12346')
# first comment, unpopular should not see it
comment_first_ref = api.entry_add_comment(
hermit_ref,
stream=entry_ref.stream,
entry=entry_ref.keyname(),
nick=hermit_ref.nick,
content='hermit comment')
self.exhaust_queue_any()
unpopular_inbox = api.inbox_get_actor_overview(unpopular_ref, unpopular_ref.nick)
self.assertNotEqual(unpopular_inbox[0], comment_first_ref.keyname())
# unpopular comments, a subscription should be created
comment_second_ref = api.entry_add_comment(
unpopular_ref,
stream=entry_ref.stream,
entry=entry_ref.keyname(),
nick=unpopular_ref.nick,
content='unpopular comment')
self.exhaust_queue_any()
unpopular_inbox = api.inbox_get_actor_overview(unpopular_ref, unpopular_ref.nick)
self.assertEqual(unpopular_inbox[0], comment_second_ref.keyname())
# hermit comments again, unpopular should see this one
comment_third_ref = api.entry_add_comment(
hermit_ref,
stream=entry_ref.stream,
entry=entry_ref.keyname(),
nick=hermit_ref.nick,
content='hermit comment 2')
self.exhaust_queue_any()
unpopular_inbox = api.inbox_get_actor_overview(unpopular_ref, unpopular_ref.nick)
self.assertEqual(unpopular_inbox[0], comment_third_ref.keyname())
# Should see them via the post
comments = api.entry_get_comments(unpopular_ref, entry_ref.keyname())
self.assertEqual(3, len(comments))
comments = api.entry_get_comments(popular_ref, entry_ref.keyname())
self.assertEqual(3, len(comments))
comments = api.entry_get_comments(celebrity_ref, entry_ref.keyname())
self.assertEqual(3, len(comments))
# test commenting via entry's uuid, unpopular should see
comment_fourth_ref = api.entry_add_comment_with_entry_uuid(
unpopular_ref,
entry_uuid=entry_ref.uuid,
nick=unpopular_ref.nick,
content='unpopular comment 3')
self.exhaust_queue_any()
unpopular_inbox = api.inbox_get_actor_overview(unpopular_ref, unpopular_ref.nick)
self.assertEqual(unpopular_inbox[0], comment_fourth_ref.keyname())
def test_keyvalue(self):
key = 'key1'
value = 'value1'
put_keyvalue = api.keyvalue_put(api.ROOT, self.popular_nick,
key, value)
got_keyvalue = api.keyvalue_get(api.ROOT, self.popular_nick,
key);
for keyvalue in (put_keyvalue, got_keyvalue):
self.assertEquals(keyvalue.keyname, key)
self.assertEquals(keyvalue.value, value)
got_keyvalue = api.keyvalue_get(api.ROOT, self.popular_nick,
'nosuchkey');
self.assertEquals(got_keyvalue, None)
def _write_somebody_elses():
put_keyvalue = api.keyvalue_put(self.popular, self.unpopular_nick,
key, value)
self.assertRaises(exception.ApiException, _write_somebody_elses)
def _read_somebody_elses():
got_keyvalue = api.keyvalue_get(self.popular, self.unpopular_nick,
key)
self.assertRaises(exception.ApiException, _read_somebody_elses)
put_keyvalue = api.keyvalue_put(api.ROOT, self.popular_nick,
'p1', value)
put_keyvalue = api.keyvalue_put(api.ROOT, self.popular_nick,
'p2', value)
put_keyvalue = api.keyvalue_put(api.ROOT, self.unpopular_nick,
'p2', value)
put_keyvalue = api.keyvalue_put(api.ROOT, self.popular_nick,
'r', value)
values = api.keyvalue_prefix_list(api.ROOT, self.popular_nick, 'p')
self.assertEquals(len(values), 2)
values = api.keyvalue_prefix_list(api.ROOT, self.popular_nick, 'p1')
self.assertEquals(len(values), 1)
values = api.keyvalue_prefix_list(api.ROOT, self.popular_nick, 'r')
self.assertEquals(len(values), 1)
values = api.keyvalue_prefix_list(api.ROOT, self.popular_nick, 'x')
self.assertEquals(len(values), 0)
values = api.keyvalue_prefix_list(api.ROOT, self.unpopular_nick, 'p')
self.assertEquals(len(values), 1)
def test_stream_get_streams(self):
public_streams = ['stream/popular@example.com/presence',
'stream/unpopular@example.com/presence']
private_streams = ['stream/hermit@example.com/presence']
nonexist_streams = ['stream/nonexist@example.com/presence']
all_streams = public_streams + private_streams + nonexist_streams
# basic test
streams = api.stream_get_streams(self.popular, public_streams)
for x in public_streams:
self.assert_(streams[x])
# filter private
streams = api.stream_get_streams(self.popular, all_streams)
for x in public_streams:
self.assert_(streams[x])
for x in (private_streams + nonexist_streams):
self.assert_(not streams.get(x, None))
class ApiUnitTestRemove(ApiUnitTest):
def setUp(self):
super(ApiUnitTestRemove, self).setUp()
self.public_actor = api.actor_get(api.ROOT, self.popular_nick)
def _post(self, actor):
test_message = "test message"
entry_ref = api.post(actor,
nick = actor.nick,
message = test_message)
self.exhaust_queue_any()
keyname = entry_ref.key().name()
got_entry = api.entry_get(actor, keyname)
self.assertTrue(got_entry)
return got_entry
def _comment(self, actor, entry):
test_comment = "test comment"
comment = api.entry_add_comment(
actor,
nick = actor.nick,
content = test_comment,
entry = entry.key().name(),
stream = entry.stream)
self.exhaust_queue_any()
keyname = comment.key().name()
got_entry = api.entry_get(actor, keyname)
self.assertTrue(got_entry)
comments = api.entry_get_comments(actor, entry.key().name())
self.assertEqual(len(comments), 1)
entry_and_comments = api.entry_get_comments_with_entry_uuid(actor,
entry.uuid)
self.assertEqual(len(entry_and_comments), 1)
self.assertEqual(entry_and_comments.to_api()['entry']['uuid'], entry.uuid)
return got_entry
def _test_in_overview(self, stream, keyname, should_exist):
public_subscribers = api.subscription_get_topic(api.ROOT, stream)
self.assertTrue(len(public_subscribers) > 0)
for sub in public_subscribers:
sub_ref = api.actor_get(api.ROOT, sub.subscriber)
# getting the list of overview entries doesn't yet filter out
# deleted items
overview_inbox = api.inbox_get_actor_overview(sub_ref, sub_ref.nick)
overview_inbox = api.entry_get_entries_dict(sub_ref, overview_inbox)
if should_exist:
self.assertTrue(keyname in overview_inbox)
else:
self.assertFalse(keyname in overview_inbox)
def test_remove_post(self):
entry_ref = self._post(self.public_actor)
keyname = entry_ref.key().name()
stream = entry_ref.stream
self._test_in_overview(stream, keyname, True)
api.entry_remove(self.public_actor, keyname)
no_entry = api.entry_get_safe(self.public_actor, keyname)
self.assertFalse(no_entry)
self._test_in_overview(stream, keyname, False)
def test_remove_comment(self):
entry = self._post(self.public_actor)
comment = self._comment(self.public_actor, entry)
stream = comment.stream
keyname = comment.key().name()
self._test_in_overview(stream, keyname, True)
entry_ref_pre = api.entry_get(api.ROOT, entry.key().name())
self.assertEqual(entry_ref_pre.extra['comment_count'], 1)
api.entry_remove_comment(self.public_actor, keyname)
no_entry = api.entry_get_safe(self.public_actor, keyname)
self.assertFalse(no_entry)
self._test_in_overview(stream, keyname, False)
comments = [c for c in api.entry_get_comments(self.public_actor,
entry.key().name()) if c]
self.assertEqual(len(comments), 0)
entry_ref_post = api.entry_get(api.ROOT, entry.key().name())
self.assertEqual(entry_ref_post.extra['comment_count'], 0)
def test_remove_post_with_comment(self):
entry = self._post(self.public_actor)
comment = self._comment(self.public_actor, entry)
stream = comment.stream
comment_keyname = comment.key().name()
self._test_in_overview(stream, comment_keyname, True)
api.entry_remove(self.public_actor, entry.key().name())
no_entry = api.entry_get_safe(self.public_actor, comment_keyname)
self.assertFalse(no_entry)
self._test_in_overview(stream, comment_keyname, False)
self.assertRaises(exception.ApiException, api.entry_get_comments, self.public_actor, entry.key().name())
class ApiUnitTestPrivacy(ApiUnitTest):
def test_change_public_to_private(self):
public_actor = api.actor_get(api.ROOT, self.popular_nick)
test_message = "test message"
public_stream = 'stream/%s/presence' % public_actor.nick
entry_ref = api.post(public_actor,
nick=public_actor.nick,
message=test_message)
public_subscribers = api.subscription_get_topic(api.ROOT, public_stream)
self.assertTrue(public_subscribers)
for sub in public_subscribers:
sub_ref = api.actor_get(api.ROOT, sub.subscriber)
overview_inbox = api.inbox_get_actor_overview(sub_ref, sub_ref.nick)
self.assertEqual(overview_inbox[0], entry_ref.keyname())
api.settings_change_privacy(public_actor, public_actor.nick,
api.PRIVACY_CONTACTS)
for sub in public_subscribers:
sub_ref = api.actor_get(api.ROOT, sub.subscriber)
overview_inbox = api.inbox_get_actor_overview(sub_ref, sub_ref.nick)
overview_entries = api.entry_get_entries(sub_ref, overview_inbox)
if (api.actor_has_contact(api.ROOT, public_actor.nick, sub_ref.nick) or
public_actor.nick == sub_ref.nick):
self.assertEqual(overview_entries[0].keyname(), entry_ref.keyname())
else:
if not overview_entries:
pass
else:
self.assertNotEqual(overview_entries[0].keyname(),
entry_ref.keyname(),
"non-contact %s sees entry %s" % (
sub_ref.nick, entry_ref.keyname()))
class ApiUnitTestPresence(ApiUnitTest):
def setUp(self):
super(ApiUnitTestPresence, self).setUp()
self.public_actor = api.actor_get(api.ROOT, self.popular_nick)
self.old_utcnow = api.utcnow
self.now = api.utcnow()
api.utcnow = lambda: self.now
def tearDown(self):
api.utcnow = self.old_utcnow
def _set(self, actor, nick, timestamp, status):
if timestamp:
self.now = timestamp
else:
utcnow = self.old_utcnow
self.now = utcnow()
presence = api.presence_set(
actor, nick = nick, senders_timestamp = timestamp, status = status)
self.assertTrue(presence)
if timestamp:
self.assertEqual(presence.updated_at, timestamp)
self.assertEqual(presence.extra['senders_timestamp'], timestamp)
return presence
def test_set_and_get(self):
timestamp = datetime.datetime.utcnow()
presence = self._set(self.public_actor, self.public_actor.nick,
timestamp, 'pl1')
got_presence = api.presence_get(self.public_actor, self.public_actor.nick)
self.assertTrue(got_presence)
self.assertEqual(got_presence.updated_at, timestamp)
self.assertEqual(got_presence, presence)
api.presence_set(self.public_actor, self.public_actor.nick,
location = 'loc1')
# test previous fields are kept unless overridden
got_presence = api.presence_get(self.public_actor, self.public_actor.nick)
self.assertEqual(got_presence.extra['status'], 'pl1')
self.assertEqual(got_presence.extra['location'], 'loc1')
def test_history(self):
timestamp1 = datetime.datetime(2007, 01, 01, 02, 03, 04, 5)
timestamp2 = datetime.datetime(2008, 01, 01, 02, 03, 04, 5)
timestamp_between = datetime.datetime(2007, 06, 01, 02, 03, 04, 5)
timestamp_before = datetime.datetime(2006, 01, 01, 02, 03, 04, 5)
timestamp_after = datetime.datetime(2009, 01, 01, 02, 03, 04, 5)
self._set(self.public_actor, self.public_actor.nick, timestamp1, 'bar')
self._set(self.public_actor, self.public_actor.nick, timestamp2, 'baz')
presence = api.presence_get(
self.public_actor, self.public_actor.nick)
presence1 = api.presence_get(
self.public_actor, self.public_actor.nick, at_time = timestamp1)
presence2 = api.presence_get(
self.public_actor, self.public_actor.nick, at_time = timestamp2)
presence_between = api.presence_get(
self.public_actor, self.public_actor.nick, at_time = timestamp_between)
self.assertEquals(presence1.extra['status'], 'bar')
self.assertEquals(presence, presence2)
self.assertEquals(presence2.extra['status'], 'baz')
self.assertEquals(presence_between, presence1)
presence_before = api.presence_get(
self.public_actor, self.public_actor.nick, at_time = timestamp_before)
self.assertEquals(presence_before, None)
presence_after = api.presence_get(
self.public_actor, self.public_actor.nick, at_time = timestamp_after)
self.assertEquals(presence_after, presence2)
def test_permissions(self):
private_actor = api.actor_get(api.ROOT, self.celebrity_nick)
unpopular_actor = api.actor_get(api.ROOT, self.unpopular_nick)
hermit_actor = api.actor_get(api.ROOT, self.hermit_nick)
def _set_by_other():
self._set(self.public_actor, private_actor.nick, None, '')
self.assertRaises(exception.ApiException, _set_by_other)
self._set(private_actor, private_actor.nick, None, '')
def _get_private_by_noncontact():
api.presence_get(unpopular_actor, private_actor.nick)
self.assertRaises(exception.ApiException, _get_private_by_noncontact)
# Get private by contact
api.presence_get(self.public_actor, private_actor.nick)
# Get public by non-contact
self._set(self.public_actor, self.public_actor.nick, None, '')
api.presence_get(hermit_actor, self.public_actor.nick)
def test_contacts(self):
publics_contact_1 = api.actor_get(api.ROOT, self.celebrity_nick)
publics_contact_2 = api.actor_get(api.ROOT, self.root_nick)
timestamp1 = datetime.datetime(2007, 01, 01, 02, 03, 04, 5)
timestamp2 = datetime.datetime(2008, 01, 01, 02, 03, 04, 5)
timestamp_before = datetime.datetime(2006, 01, 01, 02, 03, 04, 5)
timestamp_between = datetime.datetime(2007, 06, 01, 02, 03, 04, 5)
timestamp_after = datetime.datetime(2009, 01, 01, 02, 03, 04, 5)
self._set(publics_contact_1, publics_contact_1.nick, timestamp1, 'bar')
self._set(publics_contact_2, publics_contact_2.nick, timestamp2, 'baz')
# Get current (set + autogenerated at 2008)
presences = api.presence_get_contacts(self.public_actor,
self.public_actor.nick)
self.assertEquals(len(presences), 3)
# Get all by timestamp (set + autogenerated at 2008)
presences = api.presence_get_contacts(self.public_actor,
self.public_actor.nick,
timestamp_before)
self.assertEquals(len(presences), 3)
# Get one by timestamp (set + autogenerated at 2008)
presences = api.presence_get_contacts(self.public_actor,
self.public_actor.nick,
timestamp_between)
self.assertEquals(len(presences), 2)
# Get none by timestamp
presences = api.presence_get_contacts(self.public_actor,
self.public_actor.nick,
timestamp_after)
self.assertEquals(len(presences), 0)
class ApiUnitTestActivation(ApiUnitTest):
def test_activation_request_email(self):
actor = api.actor_get(api.ROOT, self.celebrity_nick)
self.assertTrue(actor)
activation_ref = api.activation_request_email(actor, actor.nick, settings.DEFAULT_UNITTEST_TO_EMAIL)
self.assertTrue(activation_ref)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Welcome! Confirm your email')
self.assertTrue(mail.outbox[0].body, 'Thanks for joining' > 0)
def test_activation_clear_old_email(self):
settings.EMAIL_LIMIT_DOMAIN = None
actor = api.actor_get(api.ROOT, self.celebrity_nick)
activation_ref = api.activation_request_email(
actor, actor.nick, 'example@example.com')
activations = api.activation_get_actor_email(api.ROOT, actor.nick)
self.assertEqual(len(activations), 1)
activation_ref = api.activation_request_email(
actor, actor.nick, 'example2@example.com')
activations = api.activation_get_actor_email(api.ROOT, actor.nick)
self.assertEqual(len(activations), 1)
def test_activation_request_mobile(self):
actor = api.actor_get(api.ROOT, self.celebrity_nick)
activation_ref = api.activation_request_mobile(
actor, actor.nick, '+14085551212')
self.assertEqual(len(sms.outbox), 1)
activations = api.activation_get_actor_mobile(api.ROOT, actor.nick)
self.assertEqual(len(activations), 1)
activation_ref = api.activation_request_mobile(
actor, actor.nick, '+16505551212')
activations = api.activation_get_actor_mobile(api.ROOT, actor.nick)
self.assertEqual(len(activations), 1)
def test_activation_activate_mobile(self):
actor_ref = api.actor_get(api.ROOT, self.celebrity_nick)
mobile = '+14085551212'
activation_ref = api.activation_request_mobile(
actor_ref, actor_ref.nick, mobile)
rel_ref = api.activation_activate_mobile(
actor_ref, actor_ref.nick, activation_ref.code)
lookup_ref = api.actor_lookup_mobile(actor_ref, mobile)
self.assertEqual(lookup_ref.nick, actor_ref.nick)
def _checkRepeat():
rel_ref = api.activation_activate_mobile(
actor_ref, actor_ref.nick, activation_ref.code)
self.assertRaises(exception.ApiException, _checkRepeat)
def _checkDuplicate():
activation_ref = api.activation_request_mobile(
actor_ref, actor_ref.nick, mobile)
self.assertRaises(exception.ApiException, _checkDuplicate)
class ApiUnitTestPost(ApiUnitTest):
def test_post_simple(self):
popular_ref = api.actor_get(api.ROOT, self.popular_nick)
test_message = "test_message"
l = profile.label('api_simple_post')
entry_ref = api.post(popular_ref,
nick=popular_ref.nick,
message=test_message)
l.stop()
self.assertEqual(entry_ref.stream, 'stream/popular@example.com/presence')
def test_post_channel(self):
l = profile.label('api_actor_get_as_root')
popular_ref = api.actor_get(api.ROOT, self.popular_nick)
l.stop()
test_messages = [('#popular', 'test_message'),
('#popular@example.com', 'test message 2'),
('#popular:', 'test message')
]
for target, message in test_messages:
test_message = '%s %s' % (target, message)
l = profile.label('api_post_channel')
entry_ref = api.post(popular_ref,
nick=popular_ref.nick,
message=test_message)
l.stop()
self.assertEqual(entry_ref.stream, 'stream/#popular@example.com/presence')
self.assertEqual(entry_ref.extra['title'], message)
def test_post_too_long(self):
popular_ref = api.actor_get(api.ROOT, self.popular_nick)
test_message = "a" * 200;
expected = test_message[:140]
entry_ref = api.post(popular_ref,
nick=popular_ref.nick,
message=test_message)
self.assertEqual(entry_ref.stream, 'stream/popular@example.com/presence')
self.assertEqual(entry_ref.extra['title'], expected)
class ApiUnitTestSpam(ApiUnitTest):
def setUp(self):
self.popular_ref = api.actor_get(api.ROOT, self.popular_nick)
self.unpopular_ref = api.actor_get(api.ROOT, self.unpopular_nick)
self.celebrity_ref = api.actor_get(api.ROOT, self.celebrity_nick)
self.entry_ref = api.post(self.popular_ref,
nick=self.popular_nick,
message='foo')
def test_entry_mark_as_spam_single_user(self):
abuse_ref = api.entry_mark_as_spam(self.unpopular_ref,
self.entry_ref.keyname())
self.assertEqual(abuse_ref.entry, self.entry_ref.keyname())
self.assertEqual(abuse_ref.actor, self.popular_nick)
self.assertEqual(abuse_ref.reports, [self.unpopular_nick])
self.assertEqual(abuse_ref.count, 1)
def test_entry_mark_as_spam_single_user_multiple_times(self):
api.entry_mark_as_spam(self.unpopular_ref, self.entry_ref.keyname())
abuse_ref = api.entry_mark_as_spam(self.unpopular_ref,
self.entry_ref.keyname())
self.assertEqual(abuse_ref.entry, self.entry_ref.keyname())
self.assertEqual(abuse_ref.actor, self.popular_nick)
self.assertEqual(abuse_ref.reports, [self.unpopular_nick])
# the count shouldn't increase just because the same user marks spam twice
self.assertEqual(abuse_ref.count, 1)
def test_entry_mark_as_spam_multiple_users(self):
api.entry_mark_as_spam(self.unpopular_ref, self.entry_ref.keyname())
abuse_ref = api.entry_mark_as_spam(self.celebrity_ref,
self.entry_ref.keyname())
self.assertEqual(abuse_ref.count, 2)
self.assertEqual(set(abuse_ref.reports),
set([self.unpopular_nick, self.celebrity_nick]))
class ApiUnitTestOAuthAccess(ApiUnitTest):
def setUp(self):
super(ApiUnitTestOAuthAccess, self).setUp()
settings.API_DISABLE_VERIFICATION = False
settings.API_ALLOW_ROOT_HMAC_SHA1 = False
def popular_request(self, url):
consumer = oauth.OAuthConsumer('TESTDESKTOPCONSUMER', 'secret')
access_token = oauth.OAuthToken('POPULARDESKTOPACCESSTOKEN', 'secret')
url = 'http://%s%s' % (settings.DOMAIN, url)
request = oauth.OAuthRequest.from_consumer_and_token(
consumer,
access_token,
http_url=url,
)
request.sign_request(oauth_util.HMAC_SHA1,
consumer,
access_token
)
return request
def test_overview(self):
request = self.popular_request('/user/popular/overview')
r = self.client.get('/user/popular/overview', request.parameters)
self.assertContains(r, "Hi popular! Here's the latest from your contacts")
self.assertTemplateUsed(r, 'actor/templates/overview.html')
class EmailTest(ApiUnitTest):
default_recipient = settings.DEFAULT_UNITTEST_TO_EMAIL
def test_send_email(self):
# Underlying Django's send_email method uses a mock object for SMTP server,
# when running under tests.
r = common_mail.send(self.default_recipient,
'Unit tests single email',
'Send at ' + str(datetime.datetime.now()))
self.assertEquals(r, 1)
def test_send_mass_email(self):
subject = 'Unit tests mass email'
message = 'Send at ' + str(datetime.datetime.now())
recipients = [
[self.default_recipient, 'teemu+unittest1@google.com'],
[self.default_recipient]
]
message_tuples = [(subject, message, 'root@example.com', r)
for r in recipients];
r = common_mail.mass_send(message_tuples)
# If a real email sending is not working in your environment,
# comment out this test to test your setup.
#def test_smtp_server(self):
# import smtplib
# from_addr = settings.DEFAULT_FROM_EMAIL
# to_addrs = [settings.DEFAULT_FROM_EMAIL]
# msg = 'A Python standard library level email test, sent at ' + str(datetime.datetime.now())
# server = smtplib.SMTP('localhost')
# server.set_debuglevel(1)
# server.sendmail(from_addr, to_addrs, msg)
# server.quit()
def test_email_confirmation_message(self):
actor_with_name = api.actor_get(api.ROOT, self.celebrity_nick)
(subject, message, html_message) = common_mail.email_confirmation_message(
actor_with_name,
'4124')
self.assertTrue(message.count(actor_with_name.extra['given_name']) > 0)
self.assertTrue(html_message.count(actor_with_name.extra['given_name']) > 0)
actor_without_name = api.actor_get(api.ROOT, self.popular_nick)
(subject, message, html_message) = common_mail.email_confirmation_message(
actor_without_name,
'4124')
self.assertTrue(message.count(actor_without_name.display_nick()) > 0)
self.assertTrue(html_message.count(actor_without_name.display_nick()) > 0)
class ImageErrorDecoratorTest(ApiUnitTest):
"""Tests the image error decorator transforms image error to api exception"""
def _test_specific_error_message(self, callable, error_message):
try:
callable()
self.fail("did not raise ApiException")
except exception.ApiException, e:
self.assertEquals(error_message, e.message)
@staticmethod
@api.catch_image_error
def no_error():
return True
def test_no_error(self):
self.assertTrue(ImageErrorDecoratorTest.no_error())
@staticmethod
@api.catch_image_error
def large_image_error():
raise images.LargeImageError()
def test_large_image_error(self):
self._test_specific_error_message(ImageErrorDecoratorTest.large_image_error,
"Uploaded image size is too large")
@staticmethod
@api.catch_image_error
def not_image_error():
raise images.NotImageError()
def test_not_image_error(self):
self._test_specific_error_message(ImageErrorDecoratorTest.not_image_error,
"Uploaded image is not in a recognized image format")
@staticmethod
@api.catch_image_error
def generic_image_error():
raise images.Error()
def test_generic_image_error(self):
self.assertRaises(exception.ApiException,
ImageErrorDecoratorTest.generic_image_error)
|
{
"content_hash": "735c09972b9f7823c8aedb50b1665808",
"timestamp": "",
"source": "github",
"line_count": 1391,
"max_line_length": 108,
"avg_line_length": 39.48598130841121,
"alnum_prop": 0.6202275830678198,
"repo_name": "jimpick/jaikuengine",
"id": "b3c82369c9b417a4528cbfdd8337fbd6b0fd40c2",
"size": "55501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/test/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "101456"
},
{
"name": "Python",
"bytes": "829875"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "4091"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, division
from contextlib import contextmanager
from petl.compat import string_types
from petl.errors import ArgumentError
from petl.util.base import Table, iterpeek, data
from petl.io.numpy import infer_dtype
def fromhdf5(source, where=None, name=None, condition=None,
condvars=None, start=None, stop=None, step=None):
"""
Provides access to an HDF5 table. E.g.::
>>> import petl as etl
>>> import tables
>>> # set up a new hdf5 table to demonstrate with
... h5file = tables.open_file('example.h5', mode='w',
... title='Example file')
>>> h5file.create_group('/', 'testgroup', 'Test Group')
/testgroup (Group) 'Test Group'
children := []
>>> class FooBar(tables.IsDescription):
... foo = tables.Int32Col(pos=0)
... bar = tables.StringCol(6, pos=2)
...
>>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar,
... 'Test Table')
>>> # load some data into the table
... table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
>>> for row in table1[1:]:
... for i, f in enumerate(table1[0]):
... h5table.row[f] = row[i]
... h5table.row.append()
...
>>> h5file.flush()
>>> h5file.close()
>>> #
... # now demonstrate use of fromhdf5
... table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable')
>>> table1
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
>>> # alternatively just specify path to table node
... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable')
>>> # ...or use an existing tables.File object
... h5file = tables.open_file('example.h5')
>>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable')
>>> # ...or use an existing tables.Table object
... h5tbl = h5file.get_node('/testgroup/testtable')
>>> table1 = etl.fromhdf5(h5tbl)
>>> # use a condition to filter data
... table2 = etl.fromhdf5(h5tbl, condition='foo < 3')
>>> table2
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
>>> h5file.close()
"""
return HDF5View(source, where=where, name=name,
condition=condition, condvars=condvars,
start=start, stop=stop, step=step)
class HDF5View(Table):
def __init__(self, source, where=None, name=None, condition=None,
condvars=None, start=None, stop=None, step=None):
self.source = source
self.where = where
self.name = name
self.condition = condition
self.condvars = condvars
self.start = start
self.stop = stop
self.step = step
def __iter__(self):
return iterhdf5(self.source, self.where, self.name, self.condition,
self.condvars, self.start, self.stop, self.step)
@contextmanager
def _get_hdf5_table(source, where, name, mode='r'):
import tables
needs_closing = False
h5file = None
# allow for polymorphic args
if isinstance(source, tables.Table):
# source is a table
h5tbl = source
elif isinstance(source, string_types):
# assume source is the name of an HDF5 file, try to open it
h5file = tables.open_file(source, mode=mode)
needs_closing = True
h5tbl = h5file.get_node(where, name=name)
elif isinstance(source, tables.File):
# source is an HDF5 file object
h5file = source
h5tbl = h5file.get_node(where, name=name)
else:
# invalid source
raise ArgumentError('invalid source argument, expected file name or '
'tables.File or tables.Table object, found: %r'
% source)
try:
yield h5tbl
finally:
# tidy up
if needs_closing:
h5file.close()
@contextmanager
def _get_hdf5_file(source, mode='r'):
import tables
needs_closing = False
# allow for polymorphic args
if isinstance(source, string_types):
# assume source is the name of an HDF5 file, try to open it
h5file = tables.open_file(source, mode=mode)
needs_closing = True
elif isinstance(source, tables.File):
# source is an HDF5 file object
h5file = source
else:
# invalid source
raise ArgumentError('invalid source argument, expected file name or '
'tables.File object, found: %r' % source)
try:
yield h5file
finally:
if needs_closing:
h5file.close()
def iterhdf5(source, where, name, condition, condvars, start, stop, step):
with _get_hdf5_table(source, where, name) as h5tbl:
# header row
hdr = tuple(h5tbl.colnames)
yield hdr
# determine how to iterate over the table
if condition is not None:
it = h5tbl.where(condition, condvars=condvars,
start=start, stop=stop, step=step)
else:
it = h5tbl.iterrows(start=start, stop=stop, step=step)
# data rows
for row in it:
yield row[:] # access row as a tuple
def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False,
start=None, stop=None, step=None):
"""
Provides access to an HDF5 table, sorted by an indexed column, e.g.::
>>> import petl as etl
>>> import tables
>>> # set up a new hdf5 table to demonstrate with
... h5file = tables.open_file('example.h5', mode='w', title='Test file')
>>> h5file.create_group('/', 'testgroup', 'Test Group')
/testgroup (Group) 'Test Group'
children := []
>>> class FooBar(tables.IsDescription):
... foo = tables.Int32Col(pos=0)
... bar = tables.StringCol(6, pos=2)
...
>>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar, 'Test Table')
>>> # load some data into the table
... table1 = (('foo', 'bar'),
... (3, b'asdfgh'),
... (2, b'qwerty'),
... (1, b'zxcvbn'))
>>> for row in table1[1:]:
... for i, f in enumerate(table1[0]):
... h5table.row[f] = row[i]
... h5table.row.append()
...
>>> h5table.cols.foo.create_csindex() # CS index is required
0
>>> h5file.flush()
>>> h5file.close()
>>> #
... # access the data, sorted by the indexed column
... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable',
... sortby='foo')
>>> table2
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'zxcvbn' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'asdfgh' |
+-----+-----------+
"""
assert sortby is not None, 'no column specified to sort by'
return HDF5SortedView(source, where=where, name=name,
sortby=sortby, checkCSI=checkCSI,
start=start, stop=stop, step=step)
class HDF5SortedView(Table):
def __init__(self, source, where=None, name=None, sortby=None,
checkCSI=False, start=None, stop=None, step=None):
self.source = source
self.where = where
self.name = name
self.sortby = sortby
self.checkCSI = checkCSI
self.start = start
self.stop = stop
self.step = step
def __iter__(self):
return iterhdf5sorted(self.source, self.where, self.name, self.sortby,
self.checkCSI, self.start, self.stop, self.step)
def iterhdf5sorted(source, where, name, sortby, checkCSI, start, stop, step):
with _get_hdf5_table(source, where, name) as h5tbl:
# header row
hdr = tuple(h5tbl.colnames)
yield hdr
it = h5tbl.itersorted(sortby,
checkCSI=checkCSI,
start=start,
stop=stop,
step=step)
for row in it:
yield row[:] # access row as a tuple
def tohdf5(table, source, where=None, name=None, create=False, drop=False,
description=None, title='', filters=None, expectedrows=10000,
chunkshape=None, byteorder=None, createparents=False,
sample=1000):
"""
Write to an HDF5 table. If `create` is `False`, assumes the table
already exists, and attempts to truncate it before loading. If `create`
is `True`, a new table will be created, and if `drop` is True,
any existing table will be dropped first. If `description` is `None`,
the description will be guessed. E.g.::
>>> import petl as etl
>>> table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
>>> etl.tohdf5(table1, 'example.h5', '/testgroup', 'testtable',
... drop=True, create=True, createparents=True)
>>> etl.fromhdf5('example.h5', '/testgroup', 'testtable')
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
"""
import tables
it = iter(table)
if create:
with _get_hdf5_file(source, mode='a') as h5file:
if drop:
try:
h5file.get_node(where, name)
except tables.NoSuchNodeError:
pass
else:
h5file.remove_node(where, name)
# determine datatype
if description is None:
peek, it = iterpeek(it, sample)
# use a numpy dtype
description = infer_dtype(peek)
# create the table
h5file.create_table(where, name, description,
title=title,
filters=filters,
expectedrows=expectedrows,
chunkshape=chunkshape,
byteorder=byteorder,
createparents=createparents)
with _get_hdf5_table(source, where, name, mode='a') as h5table:
# truncate the existing table
h5table.truncate(0)
# load the data
_insert(it, h5table)
Table.tohdf5 = tohdf5
def appendhdf5(table, source, where=None, name=None):
"""
As :func:`petl.io.hdf5.tohdf5` but don't truncate the target table before
loading.
"""
with _get_hdf5_table(source, where, name, mode='a') as h5table:
# load the data
_insert(table, h5table)
Table.appendhdf5 = appendhdf5
def _insert(table, h5table):
it = data(table) # don't need header
for row in it:
for i, f in enumerate(h5table.colnames):
# depends on order of fields being the same in input table
# and hd5 table, but field names don't need to match
h5table.row[f] = row[i]
h5table.row.append()
h5table.flush()
|
{
"content_hash": "ed626778b8f285cd0e95d144dca063a9",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 90,
"avg_line_length": 31.400523560209425,
"alnum_prop": 0.506294289287203,
"repo_name": "psnj/petl",
"id": "841110d6a3aa5c587984191022d25e13abc93925",
"size": "12019",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "petl/io/pytables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1823"
},
{
"name": "Jupyter Notebook",
"bytes": "146738"
},
{
"name": "Python",
"bytes": "880922"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import topics
from neutron.openstack.common import importutils
from neutron.openstack.common import rpc
from neutron.plugins.nicira.common import config
from neutron.plugins.nicira.dhcp_meta import rpc as nvp_rpc
class DhcpMetadataAccess(object):
def setup_dhcpmeta_access(self):
"""Initialize support for DHCP and Metadata services."""
if cfg.CONF.NVP.agent_mode == config.AgentModes.AGENT:
self._setup_rpc_dhcp_metadata()
self.handle_network_dhcp_access_delegate = (
nvp_rpc.handle_network_dhcp_access
)
self.handle_port_dhcp_access_delegate = (
nvp_rpc.handle_port_dhcp_access
)
self.handle_port_metadata_access_delegate = (
nvp_rpc.handle_port_metadata_access
)
self.handle_metadata_access_delegate = (
nvp_rpc.handle_router_metadata_access
)
elif cfg.CONF.NVP.agent_mode == config.AgentModes.AGENTLESS:
# In agentless mode the following extensions, and related
# operations, are not supported; so do not publish them
if "agent" in self.supported_extension_aliases:
self.supported_extension_aliases.remove("agent")
if "dhcp_agent_scheduler" in self.supported_extension_aliases:
self.supported_extension_aliases.remove(
"dhcp_agent_scheduler")
# TODO(armando-migliaccio): agentless support is not yet complete
# so it's better to raise an exception for now, in case some admin
# decides to jump the gun
raise NotImplementedError()
def _setup_rpc_dhcp_metadata(self):
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.dispatcher = nvp_rpc.NVPRpcCallbacks().create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI())
self.conn.consume_in_thread()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
def handle_network_dhcp_access(self, context, network, action):
self.handle_network_dhcp_access_delegate(self, context,
network, action)
def handle_port_dhcp_access(self, context, port_data, action):
self.handle_port_dhcp_access_delegate(self, context, port_data, action)
def handle_port_metadata_access(self, context, port, is_delete=False):
self.handle_port_metadata_access_delegate(context, port, is_delete)
def handle_router_metadata_access(self, context,
router_id, do_create=True):
self.handle_metadata_access_delegate(self, context,
router_id, do_create)
|
{
"content_hash": "3c98d97d20c2876d026a6cf9ead18067",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 45.88405797101449,
"alnum_prop": 0.6295009475679091,
"repo_name": "rickerc/neutron_audit",
"id": "150d0feecd8361cb06ac68c416981e8515da7a51",
"size": "3840",
"binary": false,
"copies": "4",
"ref": "refs/heads/cis-havana-staging",
"path": "neutron/plugins/nicira/dhcpmeta_modes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "7052151"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
"""
WSGI config for chatdemo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chatdemo.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "23b30b16945b17fd2d89644fb480842b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.928571428571427,
"alnum_prop": 0.7749360613810742,
"repo_name": "Thinker-ru/sepiida-chat",
"id": "a76a4f46fb210fbbfa911560c1275f133730b64c",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chatdemo/chatdemo/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3398"
}
],
"symlink_target": ""
}
|
def orientation_from_dcm_header(header):
# borrowed from hcp xnat dicom juggling java
# get and check the base values
if not header:
raise ValueError("didn't get a header")
o = getattr(header, "ImageOrientationPatient", None)
o = [float(a) for a in o]
if not o:
raise ValueError("couldn't find ImageOrientationPatient in header")
if len(o) != 6:
raise ValueError("cannot be translated to cosine vectors")
# consistency checks
epsilon = 0.001
if abs(o[0] * o[3] + o[1] * o[4] + o[2] * o[5]) > 0.001:
raise ValueError("cosine vectors not orthogonal")
if abs(1.0 - o[0] * o[0] - o[1] * o[1] - o[2] * o[2]) > epsilon:
raise ValueError("cosine vectors not normal")
# looks like we're good to go. derive the value
absNormalX = abs(o[1] * o[5] - o[2] * o[4])
absNormalY = abs(o[2] * o[3] - o[0] * o[5])
absNormalZ = abs(o[0] * o[4] - o[1] * o[3])
if absNormalX > absNormalY:
return "sagittal" if absNormalX > absNormalZ else "transverse"
else:
return "coronal" if absNormalY > absNormalZ else "transverse"
def numberfy(s):
n = s
try:
n = float(n)
return n
except Exception:
return s
def float_or_none(s):
n = s
try:
n = float(n)
return n
except Exception:
return None
def int_or_none(s):
n = s
try:
n = int(n)
return n
except ValueError:
return None
|
{
"content_hash": "1f2b29200d108f2cb76a163a8df4e4e7",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 75,
"avg_line_length": 30.142857142857142,
"alnum_prop": 0.5761679079214624,
"repo_name": "beOn/hcpre",
"id": "f030b32fa77755897204410757ac9b98918dd7f3",
"size": "1477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hcpre/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "145966"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from kivy.config import Config
from kivy.logger import Logger
from zipfile import ZipFile
import subprocess
import shutil
import sys
import os
# Avoid kivy log spam when importing main
Config.set('kivy', 'log_level', 'error')
try:
os.chdir('mmplayer/')
sys.path.append(os.getcwd())
from mmplayer.app import __version__, __author__, __author_email__
from mmplayer.app import __description__, __url__, __icon_path__
except Exception as e:
Logger.exception('make_deb_package: %s' % (e))
raise Exception(e)
Config.set('kivy', 'log_level', 'info')
os.chdir('..')
Logger.info('Running setup.py')
subprocess.check_call(['python', 'setup.py', 'bdist', '--format=zip'])
Logger.info('Done setup.py')
APP_NAME = 'mmplayer'
APP_VERSION = str(__version__)
DIR_ROOT = '{}_{}'.format(APP_NAME, APP_VERSION)
DIR_APP = '{}/'.format(DIR_ROOT)
DIR_APP_INSTALL = '/usr/local/share/{}'.format(APP_NAME)
DIR_BIN0 = '/usr/local/bin'
DIR_BIN = '{}{}'.format(DIR_ROOT, DIR_BIN0)
DIR_SHARE = '{}/usr/local/share'.format(DIR_ROOT)
DIR_APPS = '{}/usr/share/applications'.format(DIR_ROOT)
DIR_DEBIAN = '{}/DEBIAN'.format(DIR_ROOT)
PATH_DESKTOP_FILE = '{}/{}.desktop'.format(DIR_APPS, APP_NAME)
PATH_BIN_LAUNCHER = '{}/{}'.format(DIR_BIN, APP_NAME)
PATH_DEBIAN_CONTROL = '{}/control'.format(DIR_DEBIAN)
PATH_DEBIAN_POSTINST = '{}/postinst'.format(DIR_DEBIAN)
PATH_EXEC = '/usr/local/bin/mmplayer'
PATH_ICON = '{}/{}'.format(DIR_APP, __icon_path__)
MAKEDIRS = (DIR_BIN, DIR_SHARE, DIR_APPS, DIR_DEBIAN)
if os.path.exists(DIR_ROOT):
shutil.rmtree(DIR_ROOT)
for d in MAKEDIRS:
if not os.path.exists(d):
os.makedirs(d)
Logger.info('Made missing dir {}'.format(d))
fzip = 'mmplayer-11.0.linux-x86_64.zip'
path_fzip = 'dist/%s' % (fzip)
Logger.info('Extracting {} into {}'.format(path_fzip, DIR_APP))
fzip = ZipFile(path_fzip)
fzip.extractall(path=DIR_APP)
Logger.info('Extracting done')
class TextFile(object):
path_open = ''
path_save = ''
chmod = 0
replacables = (
('%APP_NAME%', APP_NAME), ('%DESCRIPTION%', __description__),
('%AUTHOR%', __author__), ('%AUTHOR_EMAIL%', __author_email__),
('%PATH_ICON%', PATH_ICON),
('%APP_VERSION%', APP_VERSION), ('%VERSION%', APP_VERSION),
('%PATH_BIN_LAUNCHER%', PATH_BIN_LAUNCHER),
('%PATH_EXEC%', PATH_EXEC)
)
def __init__(self, path_open, path_save, chmod=0):
self.path_open = path_open
self.path_save = path_save
self.chmod = chmod
def do_it(self):
with open(self.path_open, 'r') as f:
text = f.read()
for a, b in self.replacables:
text = text.replace(a, b)
with open(self.path_save, 'w') as f:
f.write(text)
text_lines = text.splitlines()
for i, x in enumerate(text_lines):
if x.find('%') != -1:
Logger.warning(''.join(
'TextFile: % character still remains in line '
'{} of file {}: "{}"'.format(i, self.path_open, x)
))
if self.chmod:
cmd = ('chmod', str(self.chmod), self.path_save)
Logger.info('TextFile: subprocess {}'.format(' '.join(cmd)))
subprocess.call(cmd)
Logger.info('Parsing and saving launchers, text files')
res_linux = 'resources/linux/'
TEXT_LAUNCHER = TextFile(
'%s/launcher.in' % (res_linux), PATH_BIN_LAUNCHER).do_it()
TEXT_DESKTOP_FILE = TextFile(
'%s/mmplayer.desktop.in' % (res_linux), PATH_DESKTOP_FILE).do_it()
TEXT_DEBIAN = TextFile(
'%s/debian_control.in' % (res_linux), PATH_DEBIAN_CONTROL).do_it()
TEXT_POSTINST = TextFile(
'%s/postinst.in' % (res_linux), PATH_DEBIAN_POSTINST, chmod=555).do_it()
Logger.info('Done')
cmd_dpkg = ('dpkg-deb', '--build', DIR_ROOT)
Logger.info('Running {}'.format(' '.join(cmd_dpkg)))
subprocess.call(cmd_dpkg)
Logger.info('Cleaning up')
for x in ('build', DIR_ROOT, APP_NAME + '.egg-info'):
if os.path.exists(x):
shutil.rmtree(x)
deb_name = '%s_%s.deb' % (APP_NAME, APP_VERSION)
shutil.move(deb_name, 'dist/' + deb_name)
Logger.info('Done')
|
{
"content_hash": "ce6328462814f8398abe718f1cab8a12",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 76,
"avg_line_length": 34.56666666666667,
"alnum_prop": 0.6125843780135005,
"repo_name": "Bakterija/mmplayer",
"id": "d8ccb79f78735921c2ee6da36849ea4a01e0ee15",
"size": "4148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make_deb_package.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "450641"
}
],
"symlink_target": ""
}
|
import subprocess
import os
def _find_matching_file(filename):
"""Finds a matching filename, defaults to the original filename
if it exists. If it doesn't exist, try to find a file with the same
name but different extension
Args:
filename(str): The filename to match includes name and extension
Returns:
string representing the file to convert.
Empty string ("") if no file can be found.
(This is so that os.path.isfile doesn't break)
Notes:
This is because YoutubeDL seems to give the wrong file extension
of the downloaded file sometimes.
"""
# We want the chosen filename to have the first priority
if os.path.isfile(filename):
return filename
# Match same filename, different extension
name, _ = os.path.splitext(filename)
path = os.path.join(os.getcwd(), 'temp')
files = [f for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))]
print("Found all files:", " ".join(files))
files = [f for f in files if f.startswith(os.path.basename(name))]
if len(files) > 0:
new_filename = os.path.join(path, files[0])
print("Converting", new_filename, "instead of", filename)
return new_filename
else:
print("Cannot find matching filename for", filename)
return ""
def convert(source, target, download):
"""Converts the file from 'source' to 'target' using FFMPEG
Args:
source(str): Filepath to the source file
target(str): Filepath to the target file
download(DownloadRequest): The download request.
Returns:
The process that is converting the file.
"""
print('Converting file:', source, 'to', target)
# Find any matching file
source = _find_matching_file(source)
if not os.path.isfile(source):
print('Not converting because source file is missing')
return
if os.path.isfile(target):
print('Not converting because target file exists')
return
if download.should_time_trim():
start_time, duration = download.get_time_trimming_data()
command = 'ffmpeg -y -i {source} -ss {start_time} -t {duration} -f wav - | lame -V 0 - {target}'.format(
start_time=start_time,
duration=duration,
source=source,
target=target,
)
else:
command = 'ffmpeg -y -i {source} -f wav - | lame -V 0 - {target}'.format(
source=source,
target=target,
)
print('Running command:', command)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=True,
)
# Blocking call
process.wait()
# Save some disk space
if source != target:
os.remove(source)
return process.communicate()
|
{
"content_hash": "f2d593955940166bc35b29fd93e83254",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 112,
"avg_line_length": 30.54736842105263,
"alnum_prop": 0.618538938662991,
"repo_name": "tpcstld/youtube",
"id": "d12beb74c3495210a9f76746f69ea54f391a9b51",
"size": "2902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "youtube/convertor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "504"
},
{
"name": "HTML",
"bytes": "3638"
},
{
"name": "JavaScript",
"bytes": "3881"
},
{
"name": "Python",
"bytes": "24373"
}
],
"symlink_target": ""
}
|
import datetime
import os
from sqlalchemy.exc import IntegrityError
from sqlalchemy import func
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask import Flask, flash, redirect, render_template, request, url_for
import redis
from rq import Queue
import requests
from urllib.parse import urlparse, urlunparse
from indexer import Indexer
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL')
app.config['REDIS_URL'] = os.environ.get('REDIS_URL', 'redis://localhost:6379')
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'aohi49fjnorj')
db = SQLAlchemy(app)
migrate = Migrate(app, db)
redis_conn = redis.from_url(app.config['REDIS_URL'])
q = Queue(connection=redis_conn)
from models import EsriServer, Service, Layer
def index_esri_server(server_id):
app.logger.info('Indexing ESRI server %s', server_id)
server = EsriServer.query.get(server_id)
if not server:
app.logger.error('ESRI server %s was not found', server_id)
return
server.status = 'importing'
db.session.add(server)
db.session.commit()
resulting_status = 'errored'
try:
indexer = Indexer(app.logger)
services = indexer.spider_services(server.url)
for service in services:
service_url = service.get('url')
try:
service_details = indexer.get_service_details(service_url)
except ValueError:
app.logger.exception('Error getting details for service %s', service_url)
continue
db_service = Service.query.filter_by(
server=server,
name=service.get('name'),
service_type=service.get('type'),
).first()
if not db_service:
db_service = Service(
server=server,
name=service.get('name'),
service_type=service.get('type'),
)
db_service.service_data = service_details
db.session.add(db_service)
layers = service_details.get('layers', [])
for layer in layers:
db_layer = Layer.query.filter_by(
service=db_service,
name=layer.get('name'),
).first()
if not db_layer:
db_layer = Layer(
service=db_service,
name=layer.get('name'),
)
db_layer.layer_data = layer
db.session.add(db_layer)
resulting_status = 'imported'
except requests.exceptions.RequestException:
app.logger.exception('Problem indexing ESRI server %s', server_id)
except ValueError:
app.logger.exception('Problem indexing ESRI server %s', server_id)
server.status = resulting_status
server.job_id = None
server.last_crawled = func.now()
db.session.add(server)
db.session.commit()
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
url = request.form['url']
url_parts = urlparse(url)
if url_parts.scheme not in ('http', 'https'):
flash("That doesn't seem to be a valid URL", category="error")
return redirect(url_for('index'))
server = EsriServer(url=urlunparse(url_parts))
db.session.add(server)
try:
db.session.commit()
except IntegrityError:
flash("That URL has already been added", category="error")
return redirect(url_for('index'))
job = q.enqueue_call(
func='app.index_esri_server',
args=(server.id,),
result_ttl=5000,
)
server.status = 'queued'
server.job_id = job.get_id()
db.session.add(server)
db.session.commit()
flash("Queued the server for crawling.", category="success")
return redirect(url_for('index'))
servers = EsriServer.query.paginate(page=int(request.args.get('page', 1)))
return render_template('index.html', servers=servers)
@app.route('/servers/<int:server_id>', methods=['GET', 'POST'])
def show_server(server_id):
server = EsriServer.query.get_or_404(server_id)
if request.method == 'POST':
if request.form.get('action') == 'Spider Again':
if server.status not in ('errored', 'imported'):
flash("Can't re-crawl a server with state %s" % server.status)
return redirect(url_for('index'))
job = q.enqueue_call(
func='app.index_esri_server',
args=(server.id,),
result_ttl=5000,
)
server.status = 'queued'
server.job_id = job.get_id()
db.session.add(server)
db.session.commit()
flash("Queued the server for crawling.", category="success")
return redirect(url_for('index'))
return render_template('show_server.html', server=server)
@app.route('/servers/<int:server_id>/services/<int:service_id>', methods=['GET'])
def show_service(server_id, service_id):
server = EsriServer.query.get_or_404(server_id)
service = Service.query.get_or_404(service_id)
return render_template('show_service.html', server=server, service=service)
@app.route('/search', methods=['GET'])
def search():
results = Layer.query \
.filter(Layer.name.ilike('%{}%'.format(request.args.get('q'))))
which_server = request.args.get('server_id')
if which_server and which_server.isdigit():
results = results.join(Service).filter(Service.server_id == int(which_server))
service_type = request.args.get('service_type')
if service_type:
results = results.join(Service).filter(Service.service_type == service_type)
page = request.args.get('page', 1)
if not isinstance(page, int) and not page.isdigit():
page = 1
results = results.paginate(page=page)
return render_template('show_search.html', results=results)
if __name__ == '__main__':
app.run()
|
{
"content_hash": "ee71d163a326d0f906f6564c9e902ead",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 89,
"avg_line_length": 31.989690721649485,
"alnum_prop": 0.5978085723493394,
"repo_name": "openaddresses/esri-indexer",
"id": "3a744c857f77871839d4bfe09e9c5d46b22ac843",
"size": "6206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5780"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "16439"
}
],
"symlink_target": ""
}
|
"""The tests for the signal_messenger platform."""
import os
import tempfile
import unittest
from pysignalclirestapi import SignalCliRestApi
import requests_mock
import homeassistant.components.signal_messenger.notify as signalmessenger
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
BASE_COMPONENT = "notify"
async def test_signal_messenger_init(hass):
"""Test that service loads successfully."""
config = {
BASE_COMPONENT: {
"name": "test",
"platform": "signal_messenger",
"url": "http://127.0.0.1:8080",
"number": "+43443434343",
"recipients": ["+435565656565"],
}
}
with patch("pysignalclirestapi.SignalCliRestApi.send_message", return_value=None):
assert await async_setup_component(hass, BASE_COMPONENT, config)
await hass.async_block_till_done()
# Test that service loads successfully
assert hass.services.has_service(BASE_COMPONENT, "test")
class TestSignalMesssenger(unittest.TestCase):
"""Test the signal_messenger notify."""
def setUp(self):
"""Set up things to be run when tests are started."""
recipients = ["+435565656565"]
number = "+43443434343"
client = SignalCliRestApi("http://127.0.0.1:8080", number)
self._signalmessenger = signalmessenger.SignalNotificationService(
recipients, client
)
@requests_mock.Mocker()
def test_send_message(self, mock):
"""Test send message."""
message = "Testing Signal Messenger platform :)"
mock.register_uri(
"POST", "http://127.0.0.1:8080/v2/send", status_code=201,
)
mock.register_uri(
"GET",
"http://127.0.0.1:8080/v1/about",
status_code=200,
json={"versions": ["v1", "v2"]},
)
with self.assertLogs(
"homeassistant.components.signal_messenger.notify", level="DEBUG"
) as context:
self._signalmessenger.send_message(message)
self.assertIn("Sending signal message", context.output[0])
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 2)
@requests_mock.Mocker()
def test_send_message_should_show_deprecation_warning(self, mock):
"""Test send message."""
message = "Testing Signal Messenger platform with attachment :)"
mock.register_uri(
"POST", "http://127.0.0.1:8080/v2/send", status_code=201,
)
mock.register_uri(
"GET",
"http://127.0.0.1:8080/v1/about",
status_code=200,
json={"versions": ["v1", "v2"]},
)
with self.assertLogs(
"homeassistant.components.signal_messenger.notify", level="WARNING"
) as context:
with tempfile.NamedTemporaryFile(
suffix=".png", prefix=os.path.basename(__file__)
) as tf:
data = {"data": {"attachment": tf.name}}
self._signalmessenger.send_message(message, **data)
self.assertIn(
"The 'attachment' option is deprecated, please replace it with 'attachments'. This option will become invalid in version 0.108.",
context.output[0],
)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 2)
@requests_mock.Mocker()
def test_send_message_with_attachment(self, mock):
"""Test send message."""
message = "Testing Signal Messenger platform :)"
mock.register_uri(
"POST", "http://127.0.0.1:8080/v2/send", status_code=201,
)
mock.register_uri(
"GET",
"http://127.0.0.1:8080/v1/about",
status_code=200,
json={"versions": ["v1", "v2"]},
)
with self.assertLogs(
"homeassistant.components.signal_messenger.notify", level="DEBUG"
) as context:
with tempfile.NamedTemporaryFile(
suffix=".png", prefix=os.path.basename(__file__)
) as tf:
data = {"data": {"attachments": [tf.name]}}
self._signalmessenger.send_message(message, **data)
self.assertIn("Sending signal message", context.output[0])
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 2)
|
{
"content_hash": "15be81f2fb026f4a8409c74f17dd4158",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 141,
"avg_line_length": 35.72357723577236,
"alnum_prop": 0.5914883932635412,
"repo_name": "robbiet480/home-assistant",
"id": "a44be249f223858aba226b1f90d6a01e446b9e87",
"size": "4394",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/signal_messenger/test_notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import time
from ...protobuf import *
from ...protobuf import gt_req_pb2
from ...payload.APNPayload import APNPayload, DictionaryAlertMsg
class BaseTemplate:
def __init__(self):
self.appKey = ""
self.appId = ""
self.pushInfo = gt_req_pb2.PushInfo()
self.pushInfo.invalidAPN = True
self.pushInfo.invalidMPN = True
self.duration = 0
def getTransparent(self):
transparent = gt_req_pb2.Transparent()
transparent.id = ""
transparent.action = "pushmessage"
transparent.taskId = ""
transparent.appKey = self.appKey
transparent.appId = self.appId
transparent.messageId = ""
transparent.pushInfo.CopyFrom(self.getPushInfo())
actionChains = self.getActionChains()
for actionChain in actionChains:
tmp = transparent.actionChain.add()
tmp.CopyFrom(actionChain)
# add condition
transparent.condition.append(self.getDurCondition())
return transparent
def getActionChains(self):
return []
def getPushInfo(self):
return self.pushInfo
def setApnInfo(self, payload):
if payload is None:
return
payload = payload.getPayload()
if payload is None or payload is "":
return
length = len(payload)
if length > APNPayload.PAYLOAD_MAX_BYTES:
raise Exception("APN payload length overlength (" + str(length) + ">"
+ str(APNPayload.PAYLOAD_MAX_BYTES) + ")")
self.pushInfo.apnJson = payload
self.pushInfo.invalidAPN = False
def setPushInfo(self, actionLocKey, badge, message, sound, payload, locKey, locArgs, launchImage,
contentAvailable=0):
self.pushInfo = gt_req_pb2.PushInfo()
self.pushInfo.invalidAPN = True
self.pushInfo.invalidMPN = True
alertMsg = DictionaryAlertMsg()
if locKey is not None and locKey is not "":
alertMsg.locKey = locKey.decode("utf-8")
if locArgs is not None and locArgs is not "":
alertMsg.locArgs.append(locArgs.decode("utf-8"))
if actionLocKey is not None and actionLocKey is not "":
alertMsg.actionLocKey = actionLocKey.decode("utf-8")
if message is not None and message is not "":
alertMsg.body = message.decode("utf-8")
if launchImage is not None and launchImage is not "":
alertMsg.launchImage = launchImage.decode("utf-8")
apn = APNPayload()
apn.alertMsg = alertMsg
if badge is not None:
alertMsg.badge = badge
if sound is not None and sound is not "":
apn.sound = sound.decode("utf-8")
if contentAvailable is not None:
apn.contentAvailable = contentAvailable
if payload is not None and payload is not "":
apn.addCustomMsg("payload", payload.decode("utf-8"))
self.setApnInfo(apn)
def getDurCondition(self):
return "duration=" + str(self.getDuration())
def getDuration(self):
return self.duration
def setDuration(self, begin, end):
s = long(time.mktime(time.strptime(begin, "%Y-%m-%d %H:%M:%S")) * 1000)
e = long(time.mktime(time.strptime(end, "%Y-%m-%d %H:%M:%S")) * 1000)
if s <= 0 or e <= 0:
raise ValueError("DateFormat: yyyy-MM-dd HH:mm:ss")
if s > e:
raise ValueError("startTime should be smaller than endTime")
self.duration = str(s) + "-" + str(e)
|
{
"content_hash": "f44fad6a226a73f5d6b77094425de5b1",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 101,
"avg_line_length": 37.44,
"alnum_prop": 0.5737179487179487,
"repo_name": "jerryjobs/thirdpartPushSystem",
"id": "d62ae6ad019363c34c291c3e0ea6e547a27a4bad",
"size": "3769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "push/getui/igetui/template/igt_base_template.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "82155"
},
{
"name": "HTML",
"bytes": "5277"
},
{
"name": "Python",
"bytes": "728813"
},
{
"name": "Shell",
"bytes": "68"
}
],
"symlink_target": ""
}
|
import numpy as np
import sys, os
from PIL import Image, ImageFilter
sys.path.append(os.path.abspath("./"))
from storage import ParamStorage
from interface import command
'''
This tool loads the model weight configuration stored in ./results/params.pkl and visualize the weights in the first layer.
The weight configuration of each kernel is converted to a RGB image. The tool assume there are only 64 kernels in the
first layer. A stored model can also be supplied (-model).
'''
is_model, model_path = command.get_command('-model', default='./results/params.pkl')
def make_visual(layer_weights):
max_scale = layer_weights.max(axis=-1).max(axis=-1)[...,
np.newaxis, np.newaxis]
min_scale = layer_weights.min(axis=-1).min(axis=-1)[...,
np.newaxis, np.newaxis]
return (255 * (layer_weights - min_scale) /
(max_scale - min_scale)).astype(np.uint8)
store = ParamStorage()
data = store.load_params(path=model_path)
first_layer = np.array(
data['params'][-2].eval())
print(first_layer.shape)
#Seems to give weird results, so must read paper it seems like.
i = 0
filters = []
for filter in first_layer:
filter_image = make_visual(filter)
#filter_image[0,:,:] = 0
#filter_image[1,:,:] = 0
filter_image = np.rollaxis(filter_image, 2)
filter_image = np.rollaxis(filter_image, 2)
image = Image.fromarray(filter_image)
image = image.resize((100, 100), Image.NEAREST)
#image = image.filter(ImageFilter.GaussianBlur(radius=12))
filters.append(image)
#64 filters. 100 pixels plus 5 pixel border. 8*8
#or 4*16 = 4 * 100 + 25, 16*100 *
width = 1685
height = 425
filter_showcase = np.zeros((height, width, 3), dtype=np.uint8)
filter_showcase[:, :, :] = (255, 255, 255)
for i in range(5,height, 105):
for j in range(5, width, 105):
filter_image = filters.pop()
pixels = np.array(filter_image.getdata())
pixels = pixels.reshape(100, 100, 3)
filter_showcase[i: i+100, j: j+100, :] = pixels[:,:,:]
im = Image.fromarray(filter_showcase)
im.show()
|
{
"content_hash": "8dda1e8ecc9c59ac59690fe45bdc5d1f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 123,
"avg_line_length": 34.07936507936508,
"alnum_prop": 0.6422915696320447,
"repo_name": "olavvatne/CNN",
"id": "f3b0c55bdc26dcd5c2d543c5a02d4b60f930d83e",
"size": "2147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/layer/visualize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88387"
},
{
"name": "Shell",
"bytes": "822"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import six
from collections import defaultdict
from django.db.models import Q
from django.db.models.aggregates import Count
from sentry.api.serializers import register, serialize, Serializer
from sentry.models import (
Project, ProjectBookmark, ProjectOption, ProjectPlatform, ProjectStatus,
UserOption
)
STATUS_LABELS = {
ProjectStatus.VISIBLE: 'active',
ProjectStatus.HIDDEN: 'deleted',
ProjectStatus.PENDING_DELETION: 'deleted',
ProjectStatus.DELETION_IN_PROGRESS: 'deleted',
}
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
project_ids = [i.id for i in item_list]
if user.is_authenticated() and item_list:
bookmarks = set(ProjectBookmark.objects.filter(
user=user,
project_id__in=project_ids,
).values_list('project_id', flat=True))
user_options = {
(u.project_id, u.key): u.value
for u in UserOption.objects.filter(
Q(user=user, project__in=item_list, key='mail:alert') |
Q(user=user, key='subscribe_by_default', project__isnull=True)
)
}
default_subscribe = (
user_options.get('subscribe_by_default', '1') == '1'
)
default_environments = {
o.project_id: o.value
for o in ProjectOption.objects.filter(
key='sentry:default_environment',
project__in=project_ids,
)
}
else:
bookmarks = set()
user_options = {}
default_subscribe = False
default_environments = {}
reviewed_callsigns = {
p.project_id: p.value
for p in ProjectOption.objects.filter(
project__in=item_list,
key='sentry:reviewed-callsign',
)
}
platforms = ProjectPlatform.objects.filter(
project_id__in=project_ids,
).values_list('project_id', 'platform')
platforms_by_project = defaultdict(list)
for project_id, platform in platforms:
platforms_by_project[project_id].append(platform)
num_issues_projects = Project.objects.filter(
id__in=project_ids
).annotate(num_issues=Count('processingissue')) \
.values_list('id', 'num_issues')
processing_issues_by_project = {}
for project_id, num_issues in num_issues_projects:
processing_issues_by_project[project_id] = num_issues
result = {}
for item in item_list:
result[item] = {
'is_bookmarked': item.id in bookmarks,
'is_subscribed': bool(user_options.get(
(item.id, 'mail:alert'),
default_subscribe,
)),
'default_environment': default_environments.get(item.id),
'reviewed-callsign': reviewed_callsigns.get(item.id),
'platforms': platforms_by_project[item.id],
'processing_issues': processing_issues_by_project.get(item.id, 0),
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
for feature in ('global-events', 'data-forwarding', 'rate-limits'):
if features.has('projects:' + feature, obj, actor=user):
feature_list.append(feature)
if obj.flags.has_releases:
feature_list.append('releases')
status_label = STATUS_LABELS.get(obj.status, 'unknown')
return {
'id': six.text_type(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'isBookmarked': attrs['is_bookmarked'],
'defaultEnvironment': attrs['default_environment'],
'callSign': obj.callsign,
'color': obj.color,
# TODO(mitsuhiko): eventually remove this when we will treat
# all short names as reviewed.
'callSignReviewed': bool(attrs['reviewed-callsign']),
'dateCreated': obj.date_added,
'firstEvent': obj.first_event,
'features': feature_list,
'status': status_label,
'platforms': attrs['platforms'],
'processingIssues': attrs['processing_issues'],
}
class ProjectWithOrganizationSerializer(ProjectSerializer):
def get_attrs(self, item_list, user):
attrs = super(ProjectWithOrganizationSerializer, self).get_attrs(
item_list, user
)
orgs = {
d['id']: d
for d in serialize(list(set(i.organization for i in item_list)), user)
}
for item in item_list:
attrs[item]['organization'] = orgs[six.text_type(item.organization_id)]
return attrs
def serialize(self, obj, attrs, user):
data = super(ProjectWithOrganizationSerializer, self).serialize(
obj, attrs, user
)
data['organization'] = attrs['organization']
return data
class SharedProjectSerializer(Serializer):
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
for feature in ('global-events',):
if features.has('projects:' + feature, obj, actor=user):
feature_list.append(feature)
return {
'slug': obj.slug,
'name': obj.name,
'callSign': obj.callsign,
'color': obj.color,
'features': feature_list,
'organization': {
'slug': obj.organization.slug,
'name': obj.organization.name,
},
}
|
{
"content_hash": "a681a121f935e0345aa3601c483cec7b",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 83,
"avg_line_length": 34.576470588235296,
"alnum_prop": 0.5592038108200068,
"repo_name": "JackDanger/sentry",
"id": "65043e89dedecbf3918bd782ebb506e9683943ce",
"size": "5878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/serializers/models/project.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class SessionDefault(Document):
pass
|
{
"content_hash": "b2d9d7ddded34eda6cd04abdca405fa2",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 23,
"alnum_prop": 0.8043478260869565,
"repo_name": "vjFaLk/frappe",
"id": "8a8db46ff1a6622847371c9344ac77324fce750b",
"size": "272",
"binary": false,
"copies": "3",
"ref": "refs/heads/parsimony-production",
"path": "frappe/core/doctype/session_default/session_default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "290337"
},
{
"name": "HTML",
"bytes": "179507"
},
{
"name": "JavaScript",
"bytes": "2179734"
},
{
"name": "Less",
"bytes": "146135"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2774237"
},
{
"name": "SCSS",
"bytes": "15721"
},
{
"name": "Shell",
"bytes": "3875"
},
{
"name": "Vue",
"bytes": "95109"
}
],
"symlink_target": ""
}
|
from Tkinter import *
import subprocess
class GUI(Tk):
def __init__(self, binary):
Tk.__init__(self)
self.bin = "./"+binary
# value holders
self.A = IntVar()
self.B = IntVar()
self.GD = IntVar()
self.GE = IntVar()
self.GM = IntVar()
self.SE = IntVar()
self.SD = IntVar()
self.VD = IntVar()
self.time = StringVar()
self.params =[self.A,self.B,self.GD,self.GE,self.GM,
self.SD,self.SE,self.VD]
self.container = Frame(self)
self.container.pack(fill=BOTH, expand=1)
# widgets
cb_A = Checkbutton(self.container,text="A", variable = self.A,
onvalue="1")
cb_B = Checkbutton(self.container, variable = self.B,text="B",
onvalue="2")
cb_GD = Checkbutton(self.container, variable = self.GD,text="Grainboundary diffusion",
onvalue="3")
cb_GE = Checkbutton(self.container, variable = self.GE,text="Grainboundary energy",
onvalue="4")
cb_GM = Checkbutton(self.container, variable = self.GM,text="Grainboundary mobility",
onvalue="5")
cb_SD = Checkbutton(self.container, variable = self.SD,text="Surface diffusion",
onvalue="6")
cb_SE = Checkbutton(self.container, variable = self.SE,text="Surface energy",
onvalue="7")
cb_VD = Checkbutton(self.container, variable = self.VD,text="Volume diffusion",
onvalue="8")
simulTime = Entry(self.container, width=5,text="final timer", textvariable=self.time)
simulTime.grid(row=9, column=2, sticky="WE")
label = Label(self.container,text="Final time",bg="black",fg="white")
label.grid(row=9,column=1,sticky="W")
command = Button(self.container,bg="green",text="Run",
command=self.process, width=10)
command.grid(row=10,column=2, sticky="E")
cb_A.grid(row=0,column=0, sticky="W")
cb_B.grid(row=1,column=0, sticky="W")
cb_GD.grid(row=3,column=0, sticky="W")
cb_GE.grid(row=2,column=0, sticky="W")
cb_GM.grid(row=4,column=0, sticky="W")
cb_SD.grid(row=5,column=0, sticky="W")
cb_SE.grid(row=6,column=0, sticky="W")
cb_VD.grid(row=7,column=0, sticky="W")
def process(self):
temp = list()
cmd = []
for var in self.params:
value = var.get()
if 0 != value:
value -= 1
temp.append(str(value))
time = self.time.get()
num = len(temp)
cmd.append(self.bin)
cmd.append(time)
cmd.append(time)
cmd.append(str(num))
cmd = cmd + temp
subprocess.call(cmd)
if __name__ == "__main__":
app = GUI("combinations") # Enter the name of the binary in the parenthesis
app.geometry("350x250")
app.title("Parameter search App")
app.mainloop()
|
{
"content_hash": "d233e92e0277e04eb4b87e645157c080",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 94,
"avg_line_length": 32.44,
"alnum_prop": 0.5095561035758323,
"repo_name": "CleverChuk/ices",
"id": "d7555904e931716585c8322b56294070b613e23c",
"size": "3294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "C++/GUI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "30825"
},
{
"name": "Python",
"bytes": "27531"
}
],
"symlink_target": ""
}
|
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class crazyflie_pos_t(object):
__slots__ = ["timestamp", "x", "y", "z", "roll", "pitch", "yaw"]
def __init__(self):
self.timestamp = 0
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.roll = 0.0
self.pitch = 0.0
self.yaw = 0.0
def encode(self):
buf = BytesIO()
buf.write(crazyflie_pos_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">qdddddd", self.timestamp, self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != crazyflie_pos_t._get_packed_fingerprint():
raise ValueError("Decode error")
return crazyflie_pos_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = crazyflie_pos_t()
self.timestamp, self.x, self.y, self.z, self.roll, self.pitch, self.yaw = struct.unpack(">qdddddd", buf.read(56))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if crazyflie_pos_t in parents: return 0
tmphash = (0x83f9d8d6ef5b5533) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if crazyflie_pos_t._packed_fingerprint is None:
crazyflie_pos_t._packed_fingerprint = struct.pack(">Q", crazyflie_pos_t._get_hash_recursive([]))
return crazyflie_pos_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
|
{
"content_hash": "64a8a46d381a4dd89e926b4fde9d9559",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 121,
"avg_line_length": 32.84126984126984,
"alnum_prop": 0.6143064282261962,
"repo_name": "peteflorence/crazyflie-tools",
"id": "6d9300119a588197157e2426f7fa0e9701518fee",
"size": "2069",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lcm/crazyflie_t/crazyflie_pos_t.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9573"
},
{
"name": "Limbo",
"bytes": "93"
},
{
"name": "M",
"bytes": "1235"
},
{
"name": "Matlab",
"bytes": "202375"
},
{
"name": "Python",
"bytes": "104885"
},
{
"name": "Shell",
"bytes": "155"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.Landing.as_view(), name='landing'),
url(r'^patient/$', views.PatientDashboard.as_view(), name='patient-dashboard'),
url(r'^patient/history/$', views.PatientHistory.as_view(), name='patient-history'),
url(r'^patient/history/datatable', views.PatientHistoryDatatable.as_view(), name='patient-history-datatable'),
url(r'^patient/edit/$', views.PatientEdit.as_view(), name='patient-edit'),
url(r'^doctor/$', views.DoctorDashboard.as_view(), name='doctor-dashboard'),
url(r'^doctor/patients/$', views.DoctorPatients.as_view(), name='doctor-patients'),
url(r'^doctor/patients/datatable', views.DoctorPatientsDatatable.as_view(), name='doctor-patients-datatable'),
url(r'^doctor/edit/$', views.DoctorEdit.as_view(), name='doctor-edit'),
]
|
{
"content_hash": "e4ade9c39b739f79ebcdac64b81abf9c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 114,
"avg_line_length": 61.07142857142857,
"alnum_prop": 0.6994152046783626,
"repo_name": "marcin-pwr/isrp",
"id": "557766637048846e4401cae274f7f02b0cf5f1c0",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isrp_app/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3197"
},
{
"name": "HTML",
"bytes": "29301"
},
{
"name": "JavaScript",
"bytes": "11565"
},
{
"name": "Python",
"bytes": "35960"
}
],
"symlink_target": ""
}
|
from six.moves import range
from congress.datasources import datasource_driver
def d6service(name, keys, inbox, datapath, args):
"""Create a dataservice instance.
This method is called by d6cage to create a dataservice
instance. There are a couple of parameters we found useful
to add to that call, so we included them here instead of
modifying d6cage (and all the d6cage.createservice calls).
"""
return BenchmarkDriver(name, keys, inbox, datapath, args)
class BenchmarkDriver(datasource_driver.DataSourceDriver):
BENCHTABLE = 'benchtable'
value_trans = {'translation-type': 'VALUE'}
translator = {
'translation-type': 'HDICT',
'table-name': BENCHTABLE,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'field1', 'translator': value_trans},
{'fieldname': 'field2', 'translator': value_trans})}
TRANSLATORS = [translator]
def __init__(self, name='', keys='', inbox=None, datapath=None, args=None):
super(BenchmarkDriver, self).__init__(name, keys,
inbox, datapath, args)
# used by update_from_datasources to manufacture data. Default small.
self.datarows = 10
self._init_end_start_poll()
def update_from_datasource(self):
self.state = {}
# TODO(sh): using self.convert_objs() takes about 10x the time. Needs
# optimization efforts.
row_data = tuple((self.BENCHTABLE, ('val1_%d' % i, 'val2_%d' % i))
for i in range(self.datarows))
for table, row in row_data:
if table not in self.state:
self.state[table] = set()
self.state[table].add(row)
def get_credentials(self, *args, **kwargs):
return {}
|
{
"content_hash": "d3ad6ba33419c55f7acc2c15dc1666f6",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 37.326530612244895,
"alnum_prop": 0.6112629852378348,
"repo_name": "ekcs/congress",
"id": "5faf56b7f61e11e81e37b267381ba422d89b6287",
"size": "2403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congress/datasources/benchmark_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2744"
},
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "HTML",
"bytes": "19644"
},
{
"name": "JavaScript",
"bytes": "9896"
},
{
"name": "Makefile",
"bytes": "503"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "1874341"
},
{
"name": "Shell",
"bytes": "8824"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.contenttypes import generic
from django.db import models
from pennyblack import settings
from pennyblack.options import NewsletterReceiverMixin, JobUnitMixin, JobUnitAdmin
from django.utils.timezone import now
class NewsletterSubscriberManager(models.Manager):
"""
Custom manager for NewsletterSubscriber to provide extra functionality
"""
use_for_related_fields = True
def get_or_add(self, email, **kwargs):
"""
Gets a subscriber, if he doesn't exist it creates him.
"""
try:
return self.get(email__iexact=email)
except self.model.DoesNotExist:
return self.create(email=email.lower(), **kwargs)
def active(self):
"""
Gives only the active subscribers
"""
return self.filter(is_active=True)
newsletter_subscriber_manager = NewsletterSubscriberManager()
class NewsletterSubscriber(models.Model, NewsletterReceiverMixin):
"""
A generic newsletter subscriber
"""
# from pennyblack.models.mail import Mail
email = models.EmailField(verbose_name="email address", unique=True)
groups = models.ManyToManyField('subscriber.SubscriberGroup', verbose_name="Groups",
related_name='subscribers')
date_subscribed = models.DateTimeField(verbose_name="Subscribe Date",
default=now)
mails = generic.GenericRelation('pennyblack.Mail')
is_active = models.BooleanField(verbose_name="Active", default=True)
objects = newsletter_subscriber_manager
default_manager = newsletter_subscriber_manager
class Meta:
verbose_name = "Subscriber"
verbose_name_plural = "Subscribers"
def __unicode__(self):
return self.email
def on_bounce(self, mail):
"""
A mail got bounced, consider deactivating this subscriber.
"""
bounce_count = 0
for mail in self.mails.order_by('pk'):
bounce_count += mail.bounced
if mail.viewed:
bounce_count = 0
if bounce_count >= settings.SUBSCRIBER_BOUNCES_UNTIL_DEACTIVATION:
self.is_active = False
self.save()
def unsubscribe(self):
self.is_active = False
self.save()
@classmethod
def register_extension(cls, register_fn):
"""
Call the register function of an extension. You must override this
if you provide a custom ModelAdmin class and want your extensions to
be able to patch stuff in.
"""
register_fn(cls, NewsletterSubscriberAdmin)
class NewsletterSubscriberAdmin(admin.ModelAdmin):
search_fields = ('email',)
list_filter = ('groups', 'is_active')
list_display = ('__unicode__', 'is_active')
filter_horizontal = ('groups',)
class SubscriberGroupManager(models.Manager):
"""
Custom manager for SubscriberGroup to provide extra functionality
"""
def get_or_add(self, name, **kwargs):
"""
Gets a group, if she doesn't exist it creates her.
"""
try:
return self.get(name__iexact=name)
except self.model.DoesNotExist:
return self.create(name=name, **kwargs)
class SubscriberGroup(models.Model, JobUnitMixin):
"""
Groups to add newsletter subscribers
"""
name = models.CharField(max_length=50, verbose_name="Name", unique=True)
objects = SubscriberGroupManager()
class Meta:
verbose_name = "Subscriber Group"
verbose_name_plural = "Subscriber Groups"
def __unicode__(self):
return self.name
@property
def member_count(self):
return self.subscribers.active().count()
def get_member_count(self):
return self.member_count
get_member_count.short_description = "Member Count"
def get_newsletter_receiver_collections(self):
"""
Every Group has only one collection
"""
return (('all', {}),)
def get_receiver_queryset(self):
"""
Return all group members
"""
return self.subscribers.active()
class SubscriberGroupAdmin(JobUnitAdmin):
list_display = ('__unicode__', 'get_member_count')
# register view links
from pennyblack.models import Newsletter
from pennyblack.module.subscriber.views import unsubscribe
Newsletter.register_view_link('subscriber.unsubscribe', unsubscribe)
|
{
"content_hash": "c2b190d22043dfe603e5c42db213547f",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 88,
"avg_line_length": 30.401360544217688,
"alnum_prop": 0.6464533452673976,
"repo_name": "nickburlett/pennyblack",
"id": "0499f4207de10cf68954232fd8e37962a0a8a304",
"size": "4469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pennyblack/module/subscriber/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "20283"
},
{
"name": "Python",
"bytes": "90654"
}
],
"symlink_target": ""
}
|
from PyQt4.QtGui import QDialog
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QPixmap
from PyQt4.QtCore import Qt
from bii_ide.common.style.icons import GUI_ICON_SUDO_128
class SudoError(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent, Qt.Dialog)
self.setWindowTitle(self.tr("Sudo required"))
vbox = QVBoxLayout(self)
pixmap = QPixmap(GUI_ICON_SUDO_128)
self.lblIcon = QLabel()
self.lblIcon.setPixmap(pixmap)
hbox = QHBoxLayout()
hbox.addWidget(self.lblIcon)
lblTitle = QLabel(
'<h1>Sudo required</h1>\n<h3>Execute bii-IDE as sudo</h3>')
lblTitle.setTextFormat(Qt.RichText)
lblTitle.setAlignment(Qt.AlignLeft)
hbox.addWidget(lblTitle)
vbox.addLayout(hbox)
|
{
"content_hash": "9f36b772d14a8aac16d9538c9f0423df",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 29.766666666666666,
"alnum_prop": 0.671892497200448,
"repo_name": "ArduinoIDE/Arduino-PyQt-IDE",
"id": "7e8d71451a773e41896142a0bf33c024829fca44",
"size": "893",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bii-ide/bii_ide/gui/widgets/popup/sudo_error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "330"
},
{
"name": "Python",
"bytes": "68657"
},
{
"name": "Shell",
"bytes": "576"
}
],
"symlink_target": ""
}
|
"""
Test cases for twisted.names.
"""
from __future__ import nested_scopes
import socket, operator, copy
from twisted.trial import unittest
from twisted.internet import reactor, defer, error
from twisted.internet.defer import succeed
from twisted.names import client, server, common, authority, hosts, dns
from twisted.python import failure
from twisted.names.error import DNSFormatError, DNSServerError, DNSNameError
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError
from twisted.names.error import DNSUnknownError
from twisted.names.dns import EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED
from twisted.names.dns import Message
from twisted.names.client import Resolver
def justPayload(results):
return [r.payload for r in results[0]]
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# Yes, skip FileAuthority
common.ResolverBase.__init__(self)
self.soa, self.records = soa, records
soa_record = dns.Record_SOA(
mname = 'test-domain.com',
rname = 'root.test-domain.com',
serial = 100,
refresh = 1234,
minimum = 7654,
expire = 19283784,
retry = 15,
ttl=1
)
reverse_soa = dns.Record_SOA(
mname = '93.84.28.in-addr.arpa',
rname = '93.84.28.in-addr.arpa',
serial = 120,
refresh = 54321,
minimum = 382,
expire = 11193983,
retry = 30,
ttl=3
)
my_soa = dns.Record_SOA(
mname = 'my-domain.com',
rname = 'postmaster.test-domain.com',
serial = 130,
refresh = 12345,
minimum = 1,
expire = 999999,
retry = 100,
)
test_domain_com = NoFileAuthority(
soa = ('test-domain.com', soa_record),
records = {
'test-domain.com': [
soa_record,
dns.Record_A('127.0.0.1'),
dns.Record_NS('39.28.189.39'),
dns.Record_MX(10, 'host.test-domain.com'),
dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know'),
dns.Record_CNAME('canonical.name.com'),
dns.Record_MB('mailbox.test-domain.com'),
dns.Record_MG('mail.group.someplace'),
dns.Record_TXT('A First piece of Text', 'a SecoNd piece'),
dns.Record_A6(0, 'ABCD::4321', ''),
dns.Record_A6(12, '0:0069::0', 'some.network.tld'),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net'),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?'),
dns.Record_MR('mail.redirect.or.whatever'),
dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box'),
dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com'),
dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text'),
dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP, '\x12\x01\x16\xfe\xc1\x00\x01'),
dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF')],
'http.tcp.test-domain.com': [
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool')
],
'host.test-domain.com': [
dns.Record_A('123.242.1.5'),
dns.Record_A('0.255.0.255'),
],
'host-two.test-domain.com': [
#
# Python bug
# dns.Record_A('255.255.255.255'),
#
dns.Record_A('255.255.255.254'),
dns.Record_A('0.0.0.0')
],
'cname.test-domain.com': [
dns.Record_CNAME('test-domain.com')
],
'anothertest-domain.com': [
dns.Record_A('1.2.3.4')],
}
)
reverse_domain = NoFileAuthority(
soa = ('93.84.28.in-addr.arpa', reverse_soa),
records = {
'123.93.84.28.in-addr.arpa': [
dns.Record_PTR('test.host-reverse.lookup.com'),
reverse_soa
]
}
)
my_domain_com = NoFileAuthority(
soa = ('my-domain.com', my_soa),
records = {
'my-domain.com': [
my_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')
]
}
)
class ServerDNSTestCase(unittest.TestCase):
"""Test cases for DNS server and client."""
def setUp(self):
self.factory = server.DNSServerFactory([
test_domain_com, reverse_domain, my_domain_com
], verbose=2)
p = dns.DNSDatagramProtocol(self.factory)
while 1:
self.listenerTCP = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
port = self.listenerTCP.getHost().port
try:
self.listenerUDP = reactor.listenUDP(port, p, interface="127.0.0.1")
except error.CannotListenError:
self.listenerTCP.stopListening()
else:
break
self.resolver = client.Resolver(servers=[('127.0.0.1', port)])
def tearDown(self):
"""Asynchronously disconnect listenerTCP, listenerUDP and resolver"""
d1 = self.listenerTCP.loseConnection()
d2 = defer.maybeDeferred(self.listenerUDP.stopListening)
d = defer.gatherResults([d1, d2])
def disconnectTransport(ignored):
if getattr(self.resolver.protocol, 'transport', None) is not None:
return self.resolver.protocol.transport.stopListening()
d.addCallback(disconnectTransport)
d.addCallback(lambda x : self.failUnless(
self.listenerUDP.disconnected
and self.listenerTCP.disconnected))
return d
def namesTest(self, d, r):
self.response = None
def setDone(response):
self.response = response
def checkResults(ignored):
if isinstance(self.response, failure.Failure):
raise self.response
results = justPayload(self.response)
assert len(results) == len(r), "%s != %s" % (map(str, results), map(str, r))
for rec in results:
assert rec in r, "%s not in %s" % (rec, map(str, r))
d.addBoth(setDone)
d.addCallback(checkResults)
return d
def testAddressRecord1(self):
"""Test simple DNS 'A' record queries"""
return self.namesTest(
self.resolver.lookupAddress('test-domain.com'),
[dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testAddressRecord2(self):
"""Test DNS 'A' record queries with multiple answers"""
return self.namesTest(
self.resolver.lookupAddress('host.test-domain.com'),
[dns.Record_A('123.242.1.5', ttl=19283784), dns.Record_A('0.255.0.255', ttl=19283784)]
)
def testAdressRecord3(self):
"""Test DNS 'A' record queries with edge cases"""
return self.namesTest(
self.resolver.lookupAddress('host-two.test-domain.com'),
[dns.Record_A('255.255.255.254', ttl=19283784), dns.Record_A('0.0.0.0', ttl=19283784)]
)
def testAuthority(self):
"""Test DNS 'SOA' record queries"""
return self.namesTest(
self.resolver.lookupAuthority('test-domain.com'),
[soa_record]
)
def testMailExchangeRecord(self):
"""Test DNS 'MX' record queries"""
return self.namesTest(
self.resolver.lookupMailExchange('test-domain.com'),
[dns.Record_MX(10, 'host.test-domain.com', ttl=19283784)]
)
def testNameserver(self):
"""Test DNS 'NS' record queries"""
return self.namesTest(
self.resolver.lookupNameservers('test-domain.com'),
[dns.Record_NS('39.28.189.39', ttl=19283784)]
)
def testHINFO(self):
"""Test DNS 'HINFO' record queries"""
return self.namesTest(
self.resolver.lookupHostInfo('test-domain.com'),
[dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know', ttl=19283784)]
)
def testPTR(self):
"""Test DNS 'PTR' record queries"""
return self.namesTest(
self.resolver.lookupPointer('123.93.84.28.in-addr.arpa'),
[dns.Record_PTR('test.host-reverse.lookup.com', ttl=11193983)]
)
def testCNAME(self):
"""Test DNS 'CNAME' record queries"""
return self.namesTest(
self.resolver.lookupCanonicalName('test-domain.com'),
[dns.Record_CNAME('canonical.name.com', ttl=19283784)]
)
def testCNAMEAdditional(self):
"""Test additional processing for CNAME records"""
return self.namesTest(
self.resolver.lookupAddress('cname.test-domain.com'),
[dns.Record_CNAME('test-domain.com', ttl=19283784), dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testMB(self):
"""Test DNS 'MB' record queries"""
return self.namesTest(
self.resolver.lookupMailBox('test-domain.com'),
[dns.Record_MB('mailbox.test-domain.com', ttl=19283784)]
)
def testMG(self):
"""Test DNS 'MG' record queries"""
return self.namesTest(
self.resolver.lookupMailGroup('test-domain.com'),
[dns.Record_MG('mail.group.someplace', ttl=19283784)]
)
def testMR(self):
"""Test DNS 'MR' record queries"""
return self.namesTest(
self.resolver.lookupMailRename('test-domain.com'),
[dns.Record_MR('mail.redirect.or.whatever', ttl=19283784)]
)
def testMINFO(self):
"""Test DNS 'MINFO' record queries"""
return self.namesTest(
self.resolver.lookupMailboxInfo('test-domain.com'),
[dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box', ttl=19283784)]
)
def testSRV(self):
"""Test DNS 'SRV' record queries"""
return self.namesTest(
self.resolver.lookupService('http.tcp.test-domain.com'),
[dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl=19283784)]
)
def testAFSDB(self):
"""Test DNS 'AFSDB' record queries"""
return self.namesTest(
self.resolver.lookupAFSDatabase('test-domain.com'),
[dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com', ttl=19283784)]
)
def testRP(self):
"""Test DNS 'RP' record queries"""
return self.namesTest(
self.resolver.lookupResponsibility('test-domain.com'),
[dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text', ttl=19283784)]
)
def testTXT(self):
"""Test DNS 'TXT' record queries"""
return self.namesTest(
self.resolver.lookupText('test-domain.com'),
[dns.Record_TXT('A First piece of Text', 'a SecoNd piece', ttl=19283784),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?', ttl=19283784)]
)
def testWKS(self):
"""Test DNS 'WKS' record queries"""
return self.namesTest(
self.resolver.lookupWellKnownServices('test-domain.com'),
[dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP, '\x12\x01\x16\xfe\xc1\x00\x01', ttl=19283784)]
)
def testSomeRecordsWithTTLs(self):
result_soa = copy.copy(my_soa)
result_soa.ttl = my_soa.expire
return self.namesTest(
self.resolver.lookupAllRecords('my-domain.com'),
[result_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')]
)
def testAAAA(self):
"""Test DNS 'AAAA' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupIPV6Address('test-domain.com'),
[dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF', ttl=19283784)]
)
def testA6(self):
"""Test DNS 'A6' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupAddress6('test-domain.com'),
[dns.Record_A6(0, 'ABCD::4321', '', ttl=19283784),
dns.Record_A6(12, '0:0069::0', 'some.network.tld', ttl=19283784),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net', ttl=19283784)]
)
def testZoneTransfer(self):
"""Test DNS 'AXFR' queries (Zone transfer)"""
default_ttl = soa_record.expire
results = [copy.copy(r) for r in reduce(operator.add, test_domain_com.records.values())]
for r in results:
if r.ttl is None:
r.ttl = default_ttl
return self.namesTest(
self.resolver.lookupZone('test-domain.com').addCallback(lambda r: (r[0][:-1],)),
results
)
def testSimilarZonesDontInterfere(self):
"""Tests that unrelated zones don't mess with each other."""
return self.namesTest(
self.resolver.lookupAddress("anothertest-domain.com"),
[dns.Record_A('1.2.3.4', ttl=19283784)]
)
class HelperTestCase(unittest.TestCase):
def testSerialGenerator(self):
f = self.mktemp()
a = authority.getSerial(f)
for i in range(20):
b = authority.getSerial(f)
self.failUnless(a < b)
a = b
class AXFRTest(unittest.TestCase):
def setUp(self):
self.results = None
self.d = defer.Deferred()
self.d.addCallback(self._gotResults)
self.controller = client.AXFRController('fooby.com', self.d)
self.soa = dns.RRHeader(name='fooby.com', type=dns.SOA, cls=dns.IN, ttl=86400, auth=False,
payload=dns.Record_SOA(mname='fooby.com',
rname='hooj.fooby.com',
serial=100,
refresh=200,
retry=300,
expire=400,
minimum=500,
ttl=600))
self.records = [
self.soa,
dns.RRHeader(name='fooby.com', type=dns.NS, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.MX, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_MX(preference=10, exchange='mail.mv3d.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.A, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_A(address='64.123.27.105', ttl=700)),
self.soa
]
def _makeMessage(self):
# hooray they all have the same message format
return dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1, rCode=0, trunc=0, maxSize=0)
def testBindAndTNamesStyle(self):
# Bind style = One big single message
m = self._makeMessage()
m.queries = [dns.Query('fooby.com', dns.AXFR, dns.IN)]
m.answers = self.records
self.controller.messageReceived(m, None)
self.assertEquals(self.results, self.records)
def _gotResults(self, result):
self.results = result
def testDJBStyle(self):
# DJB style = message per record
records = self.records[:]
while records:
m = self._makeMessage()
m.queries = [] # DJB *doesn't* specify any queries.. hmm..
m.answers = [records.pop(0)]
self.controller.messageReceived(m, None)
self.assertEquals(self.results, self.records)
class HostsTestCase(unittest.TestCase):
def setUp(self):
f = open('EtcHosts', 'w')
f.write('''
1.1.1.1 EXAMPLE EXAMPLE.EXAMPLETHING
1.1.1.2 HOOJY
::1 ip6thingy
''')
f.close()
self.resolver = hosts.Resolver('EtcHosts')
def testGetHostByName(self):
data = [('EXAMPLE', '1.1.1.1'),
('EXAMPLE.EXAMPLETHING', '1.1.1.1'),
('HOOJY', '1.1.1.2'),
]
ds = [self.resolver.getHostByName(n).addCallback(self.assertEqual, ip)
for n, ip in data]
return defer.gatherResults(ds)
def testLookupAddress(self):
d = self.resolver.lookupAddress('HOOJY')
d.addCallback(lambda x: self.assertEqual(x[0][0].payload.dottedQuad(),
'1.1.1.2'))
return d
def testIPv6(self):
d = self.resolver.lookupIPV6Address('ip6thingy')
d.addCallback(self.assertEqual, '::1')
return d
testIPv6.skip = 'IPv6 support is not in our hosts resolver yet'
def testNotImplemented(self):
return self.assertFailure(self.resolver.lookupMailExchange('EXAMPLE'),
NotImplementedError)
def testQuery(self):
d = self.resolver.query(dns.Query('EXAMPLE'))
d.addCallback(lambda x: self.assertEqual(x[0][0].payload.dottedQuad(),
'1.1.1.1'))
return d
def testNotFound(self):
return self.assertFailure(self.resolver.lookupAddress('foueoa'),
dns.DomainError)
class FakeDNSDatagramProtocol(object):
transport = object()
def __init__(self):
self.queries = []
def query(self, address, queries, timeout=10, id=None):
self.queries.append((address, queries, timeout, id))
return defer.fail(dns.DNSQueryTimeoutError(queries))
def removeResend(self, id):
# Ignore this for the time being.
pass
class RetryLogic(unittest.TestCase):
testServers = [
'1.2.3.4',
'4.3.2.1',
'a.b.c.d',
'z.y.x.w']
def testRoundRobinBackoff(self):
addrs = [(x, 53) for x in self.testServers]
r = client.Resolver(resolv=None, servers=addrs)
r.protocol = proto = FakeDNSDatagramProtocol()
return r.lookupAddress("foo.example.com"
).addCallback(self._cbRoundRobinBackoff
).addErrback(self._ebRoundRobinBackoff, proto
)
def _cbRoundRobinBackoff(self, result):
raise unittest.FailTest("Lookup address succeeded, should have timed out")
def _ebRoundRobinBackoff(self, failure, fakeProto):
failure.trap(defer.TimeoutError)
# Assert that each server is tried with a particular timeout
# before the timeout is increased and the attempts are repeated.
for t in (1, 3, 11, 45):
tries = fakeProto.queries[:len(self.testServers)]
del fakeProto.queries[:len(self.testServers)]
tries.sort()
expected = list(self.testServers)
expected.sort()
for ((addr, query, timeout, id), expectedAddr) in zip(tries, expected):
self.assertEquals(addr, (expectedAddr, 53))
self.assertEquals(timeout, t)
self.failIf(fakeProto.queries)
class ResolvConfHandling(unittest.TestCase):
def testMissing(self):
resolvConf = self.mktemp()
r = client.Resolver(resolv=resolvConf)
self.assertEquals(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
def testEmpty(self):
resolvConf = self.mktemp()
fObj = file(resolvConf, 'w')
fObj.close()
r = client.Resolver(resolv=resolvConf)
self.assertEquals(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
class FilterAnswersTests(unittest.TestCase):
"""
Test L{twisted.names.client.Resolver.filterAnswers}'s handling of various
error conditions it might encounter.
"""
def setUp(self):
# Create a resolver pointed at an invalid server - we won't be hitting
# the network in any of these tests.
self.resolver = Resolver(servers=[('0.0.0.0', 0)])
def test_truncatedMessage(self):
"""
Test that a truncated message results in an equivalent request made via
TCP.
"""
m = Message(trunc=True)
m.addQuery('example.com')
def queryTCP(queries):
self.assertEqual(queries, m.queries)
response = Message()
response.answers = ['answer']
response.authority = ['authority']
response.additional = ['additional']
return succeed(response)
self.resolver.queryTCP = queryTCP
d = self.resolver.filterAnswers(m)
d.addCallback(
self.assertEqual, (['answer'], ['authority'], ['additional']))
return d
def _rcodeTest(self, rcode, exc):
m = Message(rCode=rcode)
err = self.resolver.filterAnswers(m)
err.trap(exc)
def test_formatError(self):
"""
Test that a message with a result code of C{EFORMAT} results in a
failure wrapped around L{DNSFormatError}.
"""
return self._rcodeTest(EFORMAT, DNSFormatError)
def test_serverError(self):
"""
Like L{test_formatError} but for C{ESERVER}/L{DNSServerError}.
"""
return self._rcodeTest(ESERVER, DNSServerError)
def test_nameError(self):
"""
Like L{test_formatError} but for C{ENAME}/L{DNSNameError}.
"""
return self._rcodeTest(ENAME, DNSNameError)
def test_notImplementedError(self):
"""
Like L{test_formatError} but for C{ENOTIMP}/L{DNSNotImplementedError}.
"""
return self._rcodeTest(ENOTIMP, DNSNotImplementedError)
def test_refusedError(self):
"""
Like L{test_formatError} but for C{EREFUSED}/L{DNSQueryRefusedError}.
"""
return self._rcodeTest(EREFUSED, DNSQueryRefusedError)
def test_refusedError(self):
"""
Like L{test_formatError} but for an unrecognized error code and
L{DNSUnknownError}.
"""
return self._rcodeTest(EREFUSED + 1, DNSUnknownError)
|
{
"content_hash": "d774669c642803dd77594c3cc0f2ba9c",
"timestamp": "",
"source": "github",
"line_count": 649,
"max_line_length": 110,
"avg_line_length": 34.58859784283513,
"alnum_prop": 0.569627583749109,
"repo_name": "santisiri/popego",
"id": "768626cd68e0eb70263b29fad7ac49b0bd143c17",
"size": "22587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/names/test/test_names.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
}
|
import os
import dill
import matplotlib.ticker as ticker
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AllenCahn_1D_FD import (
allencahn_front_fullyimplicit,
allencahn_front_semiimplicit,
)
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.playgrounds.Allen_Cahn.AllenCahn_monitor_Bayreuth import monitor
def setup_parameters():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-03
level_params['dt'] = 16.0 / 1
level_params['nsweeps'] = 1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['Q1'] = ['LU']
sweeper_params['Q2'] = ['LU']
sweeper_params['QI'] = ['LU']
sweeper_params['QE'] = ['EE']
sweeper_params['initial_guess'] = 'zero'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nvars'] = 127
problem_params['dw'] = -0.04
problem_params['eps'] = 0.04
problem_params['newton_maxiter'] = 100
problem_params['newton_tol'] = 1e-04
problem_params['lin_tol'] = 1e-08
problem_params['lin_maxiter'] = 100
problem_params['radius'] = 0.25
problem_params['interval'] = (-0.5, 0.5)
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = monitor
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = None # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = None # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
return description, controller_params
def run_SDC_variant(variant=None, inexact=False):
"""
Routine to run particular SDC variant
Args:
variant (str): string describing the variant
inexact (bool): flag to use inexact nonlinear solve (or nor)
Returns:
results and statistics of the run
"""
# load (incomplete) default parameters
description, controller_params = setup_parameters()
# add stuff based on variant
if variant == 'fully-implicit':
description['problem_class'] = allencahn_front_fullyimplicit
# description['problem_class'] = allencahn_front_finel
description['sweeper_class'] = generic_implicit
if inexact:
description['problem_params']['newton_maxiter'] = 200
elif variant == 'semi-implicit':
description['problem_class'] = allencahn_front_semiimplicit
description['sweeper_class'] = imex_1st_order
if inexact:
description['problem_params']['lin_maxiter'] = 10
# elif variant == 'multi-implicit':
# description['problem_class'] = allencahn_multiimplicit
# description['sweeper_class'] = multi_implicit
# if inexact:
# description['problem_params']['newton_maxiter'] = 1
# description['problem_params']['lin_maxiter'] = 10
# elif variant == 'multi-implicit_v2':
# description['problem_class'] = allencahn_multiimplicit_v2
# description['sweeper_class'] = multi_implicit
# if inexact:
# description['problem_params']['newton_maxiter'] = 1
else:
raise NotImplemented('Wrong variant specified, got %s' % variant)
if inexact:
out = 'Working on inexact %s variant...' % variant
else:
out = 'Working on exact %s variant...' % variant
print(out)
# setup parameters "in time"
t0 = 0
Tend = 32.0
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# plt_helper.plt.plot(uinit.values)
# plt_helper.savefig('uinit', save_pdf=False, save_pgf=False, save_png=True)
#
# uex = P.u_exact(Tend)
# plt_helper.plt.plot(uex.values)
# plt_helper.savefig('uex', save_pdf=False, save_pgf=False, save_png=True)
# exit()
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# plt_helper.plt.plot(uend.values)
# plt_helper.savefig('uend', save_pdf=False, save_pgf=False, save_png=True)
# exit()
# filter statistics by variant (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (int(np.argmax(niters)), int(np.argmin(niters)))
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
print(' Iteration count (nonlinear/linear): %i / %i' % (P.newton_itercount, P.lin_itercount))
print(
' Mean Iteration count per call: %4.2f / %4.2f'
% (P.newton_itercount / max(P.newton_ncalls, 1), P.lin_itercount / max(P.lin_ncalls, 1))
)
timing = get_sorted(stats, type='timing_run', sortby='time')
print('Time to solution: %6.4f sec.' % timing[0][1])
print()
return stats
def show_results(fname, cwd=''):
"""
Plotting routine
Args:
fname (str): file name to read in and name plots
cwd (str): current working directory
"""
file = open(cwd + fname + '.pkl', 'rb')
results = dill.load(file)
file.close()
# plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
# set up plot for timings
fig, ax1 = plt_helper.newfig(textwidth=238.96, scale=1.5, ratio=0.4)
timings = {}
niters = {}
for key, item in results.items():
timings[key] = get_sorted(item, type='timing_run', sortby='time')[0][1]
iter_counts = get_sorted(item, type='niter', sortby='time')
niters[key] = np.mean(np.array([item[1] for item in iter_counts]))
xcoords = [i for i in range(len(timings))]
sorted_timings = sorted([(key, timings[key]) for key in timings], reverse=True, key=lambda tup: tup[1])
sorted_niters = [(k, niters[k]) for k in [key[0] for key in sorted_timings]]
heights_timings = [item[1] for item in sorted_timings]
heights_niters = [item[1] for item in sorted_niters]
keys = [(item[0][1] + ' ' + item[0][0]).replace('-', '\n').replace('_v2', ' mod.') for item in sorted_timings]
ax1.bar(xcoords, heights_timings, align='edge', width=-0.3, label='timings (left axis)')
ax1.set_ylabel('time (sec)')
ax2 = ax1.twinx()
ax2.bar(xcoords, heights_niters, color='r', align='edge', width=0.3, label='iterations (right axis)')
ax2.set_ylabel('mean number of iterations')
ax1.set_xticks(xcoords)
ax1.set_xticklabels(keys, rotation=90, ha='center')
# ask matplotlib for the plotted objects and their labels
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=0)
# save plot, beautify
f = fname + '_timings'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
# set up plot for radii
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
exact_radii = []
for key, item in results.items():
computed_radii = get_sorted(item, type='computed_radius', sortby='time')
xcoords = [item0[0] for item0 in computed_radii]
radii = [item0[1] for item0 in computed_radii]
if key[0] + ' ' + key[1] == 'fully-implicit exact':
ax.plot(xcoords, radii, label=(key[0] + ' ' + key[1]).replace('_v2', ' mod.'))
exact_radii = get_sorted(item, type='exact_radius', sortby='time')
diff = np.array([abs(item0[1] - item1[1]) for item0, item1 in zip(exact_radii, computed_radii)])
max_pos = int(np.argmax(diff))
assert max(diff) < 0.07, 'ERROR: computed radius is too far away from exact radius, got %s' % max(diff)
assert 0.028 < computed_radii[max_pos][0] < 0.03, (
'ERROR: largest difference is at wrong time, got %s' % computed_radii[max_pos][0]
)
xcoords = [item[0] for item in exact_radii]
radii = [item[1] for item in exact_radii]
ax.plot(xcoords, radii, color='k', linestyle='--', linewidth=1, label='exact')
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
# save plot, beautify
f = fname + '_radii'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
# set up plot for interface width
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
interface_width = []
for key, item in results.items():
interface_width = get_sorted(item, type='interface_width', sortby='time')
xcoords = [item[0] for item in interface_width]
width = [item[1] for item in interface_width]
if key[0] + ' ' + key[1] == 'fully-implicit exact':
ax.plot(xcoords, width, label=key[0] + ' ' + key[1])
xcoords = [item[0] for item in interface_width]
init_width = [interface_width[0][1]] * len(xcoords)
ax.plot(xcoords, init_width, color='k', linestyle='--', linewidth=1, label='exact')
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel(r'interface width ($\epsilon$)')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
# save plot, beautify
f = fname + '_interface'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
return None
def main(cwd=''):
"""
Main driver
Args:
cwd (str): current working directory (need this for testing)
"""
# Loop over variants, exact and inexact solves
results = {}
# for variant in ['multi-implicit', 'semi-implicit', 'fully-implicit', 'semi-implicit_v2', 'multi-implicit_v2']:
for variant in ['fully-implicit']:
# for variant in ['semi-implicit']:
# results[(variant, 'exact')] = run_SDC_variant(variant=variant, inexact=False)
results[(variant, 'inexact')] = run_SDC_variant(variant=variant, inexact=True)
# dump result
# fname = 'data/results_SDC_variants_AllenCahn_1E-03'
# file = open(cwd + fname + '.pkl', 'wb')
# dill.dump(results, file)
# file.close()
# assert os.path.isfile(cwd + fname + '.pkl'), 'ERROR: dill did not create file'
# visualize
# show_results(fname, cwd=cwd)
if __name__ == "__main__":
main()
|
{
"content_hash": "a1d1f3414693cacc5b509ff5fca725c0",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 118,
"avg_line_length": 36.43195266272189,
"alnum_prop": 0.6391911645281793,
"repo_name": "Parallel-in-Time/pySDC",
"id": "2977b2afe487a041b2cac2657237b9bf36f27080",
"size": "12314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pySDC/playgrounds/Allen_Cahn/AllenCahn_Bayreuth_front_1D.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4264000"
},
{
"name": "Python",
"bytes": "2450453"
},
{
"name": "Shell",
"bytes": "18105"
}
],
"symlink_target": ""
}
|
import requests
import json
import hashlib
import hmac
import urllib
import uuid
import sys
import logging
import time
from random import randint
from tqdm import tqdm
from . import config
from .api_photo import configurePhoto
from .api_photo import uploadPhoto
from .api_photo import downloadPhoto
from .api_video import configureVideo
from .api_video import uploadVideo
from .api_search import fbUserSearch
from .api_search import searchUsers
from .api_search import searchUsername
from .api_search import searchTags
from .api_search import searchLocation
from .api_profile import removeProfilePicture
from .api_profile import setPrivateAccount
from .api_profile import setPublicAccount
from .api_profile import getProfileData
from .api_profile import editProfile
from .api_profile import setNameAndPhone
from .prepare import get_credentials
from .prepare import delete_credentials
# The urllib library was split into other modules from Python 2 to Python 3
if sys.version_info.major == 3:
import urllib.parse
class API(object):
def __init__(self):
self.isLoggedIn = False
self.LastResponse = None
self.total_requests = 0
# handle logging
self.logger = logging.getLogger('[instabot]')
self.logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s %(message)s',
filename='instabot.log',
level=logging.INFO
)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def setUser(self, username, password):
self.username = username
self.password = password
self.uuid = self.generateUUID(True)
def login(self, username=None, password=None, force=False, proxy=None):
if password is None:
username, password = get_credentials(username=username)
m = hashlib.md5()
m.update(username.encode('utf-8') + password.encode('utf-8'))
self.proxy = proxy
self.device_id = self.generateDeviceId(m.hexdigest())
self.setUser(username, password)
if (not self.isLoggedIn or force):
self.session = requests.Session()
if self.proxy is not None:
proxies = {
'http': 'http://' + self.proxy,
'https': 'http://' + self.proxy,
}
self.session.proxies.update(proxies)
if (
self.SendRequest('si/fetch_headers/?challenge_type=signup&guid=' + self.generateUUID(False),
None, True)):
data = {'phone_id': self.generateUUID(True),
'_csrftoken': self.LastResponse.cookies['csrftoken'],
'username': self.username,
'guid': self.uuid,
'device_id': self.device_id,
'password': self.password,
'login_attempt_count': '0'}
if self.SendRequest('accounts/login/', self.generateSignature(json.dumps(data)), True):
self.isLoggedIn = True
self.user_id = self.LastJson["logged_in_user"]["pk"]
self.rank_token = "%s_%s" % (self.user_id, self.uuid)
self.token = self.LastResponse.cookies["csrftoken"]
self.logger.info("Login success as %s!" % self.username)
return True
else:
self.logger.info("Login or password is incorrect.")
delete_credentials()
exit()
def logout(self):
if not self.isLoggedIn:
return True
self.isLoggedIn = not self.SendRequest('accounts/logout/')
return not self.isLoggedIn
def SendRequest(self, endpoint, post=None, login=False):
if (not self.isLoggedIn and not login):
self.logger.critical("Not logged in.")
raise Exception("Not logged in!")
self.session.headers.update({'Connection': 'close',
'Accept': '*/*',
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'User-Agent': config.USER_AGENT})
try:
self.total_requests += 1
if post is not None: # POST
response = self.session.post(
config.API_URL + endpoint, data=post)
else: # GET
response = self.session.get(
config.API_URL + endpoint)
except Exception as e:
self.logger.warning(str(e))
return False
if response.status_code == 200:
self.LastResponse = response
self.LastJson = json.loads(response.text)
return True
else:
self.logger.warning("Request return " +
str(response.status_code) + " error!")
if response.status_code == 429:
sleep_minutes = 5
self.logger.warning("That means 'too many requests'. "
"I'll go to sleep for %d minutes." % sleep_minutes)
time.sleep(sleep_minutes * 60)
# for debugging
try:
self.LastResponse = response
self.LastJson = json.loads(response.text)
except:
pass
return False
def syncFeatures(self):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'id': self.user_id,
'_csrftoken': self.token,
'experiments': config.EXPERIMENTS
})
return self.SendRequest('qe/sync/', self.generateSignature(data))
def autoCompleteUserList(self):
return self.SendRequest('friendships/autocomplete_user_list/')
def getTimelineFeed(self):
""" Returns 8 medias from timeline feed of logged user """
return self.SendRequest('feed/timeline/')
def megaphoneLog(self):
return self.SendRequest('megaphone/log/')
def expose(self):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'id': self.user_id,
'_csrftoken': self.token,
'experiment': 'ig_android_profile_contextual_feed'
})
return self.SendRequest('qe/expose/', self.generateSignature(data))
def uploadPhoto(self, photo, caption=None, upload_id=None):
return uploadPhoto(self, photo, caption, upload_id)
def downloadPhoto(self, media_id, filename, media=False, path='photos/'):
return downloadPhoto(self, media_id, filename, media, path)
def configurePhoto(self, upload_id, photo, caption=''):
return configurePhoto(self, upload_id, photo, caption)
def uploadVideo(self, photo, caption=None, upload_id=None):
return uploadVideo(self, photo, caption, upload_id)
def configureVideo(self, upload_id, video, thumbnail, caption=''):
return configureVideo(self, upload_id, video, thumbnail, caption)
def editMedia(self, mediaId, captionText=''):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token,
'caption_text': captionText
})
return self.SendRequest('media/' + str(mediaId) + '/edit_media/', self.generateSignature(data))
def removeSelftag(self, mediaId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token
})
return self.SendRequest('media/' + str(mediaId) + '/remove/', self.generateSignature(data))
def mediaInfo(self, mediaId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token,
'media_id': mediaId
})
return self.SendRequest('media/' + str(mediaId) + '/info/', self.generateSignature(data))
def archiveMedia(self, media, undo=False):
action = 'only_me' if not undo else 'undo_only_me'
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token,
'media_id': media['id']
})
return self.SendRequest('media/' + str(media['id']) + '/' + str(action) + '/?media_type=' +
str(media['media_type']), self.generateSignature(data))
def deleteMedia(self, mediaId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token,
'media_id': mediaId
})
return self.SendRequest('media/' + str(mediaId) + '/delete/', self.generateSignature(data))
def changePassword(self, newPassword):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token,
'old_password': self.password,
'new_password1': newPassword,
'new_password2': newPassword
})
return self.SendRequest('accounts/change_password/', self.generateSignature(data))
def explore(self):
return self.SendRequest('discover/explore/')
def comment(self, mediaId, commentText):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token,
'comment_text': commentText
})
return self.SendRequest('media/' + str(mediaId) + '/comment/', self.generateSignature(data))
def deleteComment(self, mediaId, commentId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token
})
return self.SendRequest('media/' + str(mediaId) + '/comment/' + str(commentId) + '/delete/',
self.generateSignature(data))
def removeProfilePicture(self):
return removeProfilePicture(self)
def setPrivateAccount(self):
return setPrivateAccount(self)
def setPublicAccount(self):
return setPublicAccount(self)
def getProfileData(self):
return getProfileData(self)
def editProfile(self, url, phone, first_name, biography, email, gender):
return editProfile(self, url, phone, first_name, biography, email, gender)
def getUsernameInfo(self, usernameId):
return self.SendRequest('users/' + str(usernameId) + '/info/')
def getSelfUsernameInfo(self):
return self.getUsernameInfo(self.user_id)
def getRecentActivity(self):
activity = self.SendRequest('news/inbox/?')
return activity
def getFollowingRecentActivity(self):
activity = self.SendRequest('news/?')
return activity
def getv2Inbox(self):
inbox = self.SendRequest('direct_v2/inbox/?')
return inbox
def getUserTags(self, usernameId):
tags = self.SendRequest('usertags/' + str(usernameId) +
'/feed/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return tags
def getSelfUserTags(self):
return self.getUserTags(self.user_id)
def tagFeed(self, tag):
userFeed = self.SendRequest(
'feed/tag/' + str(tag) + '/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return userFeed
def getMediaLikers(self, media_id):
likers = self.SendRequest('media/' + str(media_id) + '/likers/?')
return likers
def getGeoMedia(self, usernameId):
locations = self.SendRequest('maps/user/' + str(usernameId) + '/')
return locations
def getSelfGeoMedia(self):
return self.getGeoMedia(self.user_id)
def fbUserSearch(self, query):
return fbUserSearch(self, query)
def searchUsers(self, query):
return searchUsers(self, query)
def searchUsername(self, username):
return searchUsername(self, username)
def searchTags(self, query):
return searchTags(self, query)
def searchLocation(self, query='', lat=None, lng=None):
return searchLocation(self, query, lat, lng)
def syncFromAdressBook(self, contacts):
return self.SendRequest('address_book/link/?include=extra_display_name,thumbnails',
"contacts=" + json.dumps(contacts))
def getTimeline(self):
query = self.SendRequest(
'feed/timeline/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return query
def getArchiveFeed(self):
query = self.SendRequest(
'feed/only_me_feed/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return query
def getUserFeed(self, usernameId, maxid='', minTimestamp=None):
query = self.SendRequest(
'feed/user/' + str(usernameId) + '/?max_id=' + str(maxid) + '&min_timestamp=' + str(minTimestamp) +
'&rank_token=' + str(self.rank_token) + '&ranked_content=true')
return query
def getSelfUserFeed(self, maxid='', minTimestamp=None):
return self.getUserFeed(self.user_id, maxid, minTimestamp)
def getHashtagFeed(self, hashtagString, maxid=''):
return self.SendRequest('feed/tag/' + hashtagString + '/?max_id=' + str(
maxid) + '&rank_token=' + self.rank_token + '&ranked_content=true&')
def getLocationFeed(self, locationId, maxid=''):
return self.SendRequest('feed/location/' + str(locationId) + '/?max_id=' + str(
maxid) + '&rank_token=' + self.rank_token + '&ranked_content=true&')
def getPopularFeed(self):
popularFeed = self.SendRequest(
'feed/popular/?people_teaser_supported=1&rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return popularFeed
def getUserFollowings(self, usernameId, maxid=''):
return self.SendRequest('friendships/' + str(usernameId) + '/following/?max_id=' + str(maxid) +
'&ig_sig_key_version=' + config.SIG_KEY_VERSION + '&rank_token=' + self.rank_token)
def getSelfUsersFollowing(self):
return self.getUserFollowings(self.user_id)
def getUserFollowers(self, usernameId, maxid=''):
if maxid == '':
return self.SendRequest('friendships/' + str(usernameId) + '/followers/?rank_token=' + self.rank_token)
else:
return self.SendRequest(
'friendships/' + str(usernameId) + '/followers/?rank_token=' + self.rank_token + '&max_id=' + str(
maxid))
def getSelfUserFollowers(self):
return self.getUserFollowers(self.user_id)
def like(self, mediaId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token,
'media_id': mediaId
})
return self.SendRequest('media/' + str(mediaId) + '/like/', self.generateSignature(data))
def unlike(self, mediaId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'_csrftoken': self.token,
'media_id': mediaId
})
return self.SendRequest('media/' + str(mediaId) + '/unlike/', self.generateSignature(data))
def getMediaComments(self, mediaId):
return self.SendRequest('media/' + str(mediaId) + '/comments/?')
def setNameAndPhone(self, name='', phone=''):
return setNameAndPhone(self, name, phone)
def getDirectShare(self):
return self.SendRequest('direct_share/inbox/?')
def follow(self, userId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'user_id': userId,
'_csrftoken': self.token
})
return self.SendRequest('friendships/create/' + str(userId) + '/', self.generateSignature(data))
def unfollow(self, userId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'user_id': userId,
'_csrftoken': self.token
})
return self.SendRequest('friendships/destroy/' + str(userId) + '/', self.generateSignature(data))
def block(self, userId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'user_id': userId,
'_csrftoken': self.token
})
return self.SendRequest('friendships/block/' + str(userId) + '/', self.generateSignature(data))
def unblock(self, userId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'user_id': userId,
'_csrftoken': self.token
})
return self.SendRequest('friendships/unblock/' + str(userId) + '/', self.generateSignature(data))
def userFriendship(self, userId):
data = json.dumps({
'_uuid': self.uuid,
'_uid': self.user_id,
'user_id': userId,
'_csrftoken': self.token
})
return self.SendRequest('friendships/show/' + str(userId) + '/', self.generateSignature(data))
def generateSignature(self, data):
try:
parsedData = urllib.parse.quote(data)
except AttributeError:
parsedData = urllib.quote(data)
return 'ig_sig_key_version=' + config.SIG_KEY_VERSION + '&signed_body=' + hmac.new(
config.IG_SIG_KEY.encode('utf-8'), data.encode('utf-8'), hashlib.sha256).hexdigest() + '.' + parsedData
def generateDeviceId(self, seed):
volatile_seed = "12345"
m = hashlib.md5()
m.update(seed.encode('utf-8') + volatile_seed.encode('utf-8'))
return 'android-' + m.hexdigest()[:16]
def generateUUID(self, uuid_type):
generated_uuid = str(uuid.uuid4())
if (uuid_type):
return generated_uuid
else:
return generated_uuid.replace('-', '')
def getLikedMedia(self, maxid=''):
return self.SendRequest('feed/liked/?max_id=' + str(maxid))
def getTotalFollowers(self, usernameId, amount=None):
sleep_track = 0
followers = []
next_max_id = ''
self.getUsernameInfo(usernameId)
if "user" in self.LastJson:
if amount:
total_followers = amount
else:
total_followers = self.LastJson["user"]['follower_count']
if total_followers > 200000:
print("Consider temporarily saving the result of this big operation. This will take a while.\n")
else:
return False
with tqdm(total=total_followers, desc="Getting followers", leave=False) as pbar:
while True:
self.getUserFollowers(usernameId, next_max_id)
temp = self.LastJson
try:
pbar.update(len(temp["users"]))
for item in temp["users"]:
followers.append(item)
sleep_track += 1
if sleep_track >= 20000:
sleep_time = randint(120, 180)
print("\nWaiting %.2f min. due to too many requests." % float(sleep_time / 60))
time.sleep(sleep_time)
sleep_track = 0
if len(temp["users"]) == 0 or len(followers) >= total_followers:
return followers[:total_followers]
except:
return followers[:total_followers]
if temp["big_list"] is False:
return followers[:total_followers]
next_max_id = temp["next_max_id"]
def getTotalFollowings(self, usernameId, amount=None):
sleep_track = 0
following = []
next_max_id = ''
self.getUsernameInfo(usernameId)
if "user" in self.LastJson:
if amount:
total_following = amount
else:
total_following = self.LastJson["user"]['following_count']
if total_following > 200000:
print("Consider temporarily saving the result of this big operation. This will take a while.\n")
else:
return False
with tqdm(total=total_following, desc="Getting following", leave=False) as pbar:
while True:
self.getUserFollowings(usernameId, next_max_id)
temp = self.LastJson
try:
pbar.update(len(temp["users"]))
for item in temp["users"]:
following.append(item)
sleep_track += 1
if sleep_track >= 20000:
sleep_time = randint(120, 180)
print("\nWaiting %.2f min. due to too many requests." % float(sleep_time / 60))
time.sleep(sleep_time)
sleep_track = 0
if len(temp["users"]) == 0 or len(following) >= total_following:
return following[:total_following]
except:
return following[:total_following]
if temp["big_list"] is False:
return following[:total_following]
next_max_id = temp["next_max_id"]
def getTotalUserFeed(self, usernameId, minTimestamp=None):
user_feed = []
next_max_id = ''
while 1:
self.getUserFeed(usernameId, next_max_id, minTimestamp)
temp = self.LastJson
if "items" not in temp: # maybe user is private, (we have not access to posts)
return []
for item in temp["items"]:
user_feed.append(item)
if "more_available" not in temp or temp["more_available"] is False:
return user_feed
next_max_id = temp["next_max_id"]
def getTotalSelfUserFeed(self, minTimestamp=None):
return self.getTotalUserFeed(self.user_id, minTimestamp)
def getTotalSelfFollowers(self):
return self.getTotalFollowers(self.user_id)
def getTotalSelfFollowings(self):
return self.getTotalFollowings(self.user_id)
def getTotalLikedMedia(self, scan_rate=1):
next_id = ''
liked_items = []
for _ in range(0, scan_rate):
temp = self.getLikedMedia(next_id)
temp = self.LastJson
next_id = temp["next_max_id"]
for item in temp["items"]:
liked_items.append(item)
return liked_items
|
{
"content_hash": "4f4f69c84cda7ac20ef59473fedf0cf3",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 115,
"avg_line_length": 37.842622950819674,
"alnum_prop": 0.5609079882169468,
"repo_name": "Diapostrofo/instabot",
"id": "22133ad7bdc8f558ddbe6e0b82d27978a89581e6",
"size": "23084",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "instabot/api/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "101732"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/food/crafted/shared_dessert_felbar.iff"
result.attribute_template_id = 5
result.stfName("food_name","felbar")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "4799a8853c6a004a98cb1a919e053794",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 23.153846153846153,
"alnum_prop": 0.6976744186046512,
"repo_name": "anhstudios/swganh",
"id": "27709f2e02c86e6c2dcec3e3296f3db60b586ba1",
"size": "446",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/food/crafted/shared_dessert_felbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import sensor, image, time
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
# Change this to False to undo the mirror.
sensor.set_hmirror(True)
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
# to the IDE. The FPS should increase once disconnected.
|
{
"content_hash": "39f4f4297f6f7b3f1188ab3c12c248b9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 93,
"avg_line_length": 50.125,
"alnum_prop": 0.628428927680798,
"repo_name": "kwagyeman/openmv",
"id": "467b462861e09518798cf219e327143d92cf4744",
"size": "936",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/examples/01-Camera/07-Sensor-Control/sensor_horizontal_mirror.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "569161"
},
{
"name": "C",
"bytes": "100505757"
},
{
"name": "C++",
"bytes": "97780"
},
{
"name": "CMake",
"bytes": "10214"
},
{
"name": "Dockerfile",
"bytes": "885"
},
{
"name": "Makefile",
"bytes": "73899"
},
{
"name": "Python",
"bytes": "877021"
},
{
"name": "Shell",
"bytes": "3220"
}
],
"symlink_target": ""
}
|
import argparse
from opensfm import commands
import data_generation
def run_command(command, args):
parser = argparse.ArgumentParser()
command.add_arguments(parser)
parsed_args = parser.parse_args(args)
command.run(parsed_args)
def test_run_all(tmpdir):
data = data_generation.create_berlin_test_folder(tmpdir)
run_all_commands = [
commands.extract_metadata,
commands.detect_features,
commands.match_features,
commands.create_tracks,
commands.reconstruct,
commands.mesh,
]
for module in run_all_commands:
command = module.Command()
run_command(command, [data.data_path])
reconstruction = data.load_reconstruction()
assert len(reconstruction[0].shots) == 3
assert len(reconstruction[0].points) > 1000
|
{
"content_hash": "0a7bd4829fcecdde7a3f3ea14713c3b6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 60,
"avg_line_length": 25.5,
"alnum_prop": 0.6801470588235294,
"repo_name": "kkkarthik6/Lol",
"id": "00623fd1ab9d2039982908ac285672fd13274dda",
"size": "816",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "opensfm/test/test_commands.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "17111"
},
{
"name": "C++",
"bytes": "110789"
},
{
"name": "CMake",
"bytes": "108117"
},
{
"name": "HTML",
"bytes": "54471"
},
{
"name": "JavaScript",
"bytes": "968382"
},
{
"name": "Makefile",
"bytes": "294684"
},
{
"name": "Python",
"bytes": "609882"
},
{
"name": "Shell",
"bytes": "1767"
}
],
"symlink_target": ""
}
|
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert_true(issparse(got))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, -1, 1])
msg = "contains previously unseen labels"
assert_raise_message(ValueError, msg, le.inverse_transform, [-2])
assert_raise_message(ValueError, msg, le.inverse_transform, [-2, -3, -4])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
|
{
"content_hash": "2a43eba4a7b61f15ec2099737555bcb4",
"timestamp": "",
"source": "github",
"line_count": 515,
"max_line_length": 79,
"avg_line_length": 36.227184466019416,
"alnum_prop": 0.5568955351878652,
"repo_name": "zorroblue/scikit-learn",
"id": "4f64fc6b4638c20a99bfa183d3a8799861e85bf0",
"size": "18657",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "sklearn/preprocessing/tests/test_label.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7346700"
},
{
"name": "Shell",
"bytes": "20756"
}
],
"symlink_target": ""
}
|
from io import BytesIO
from threading import Condition
class FrameSplitter:
def __init__(self):
self.frame = None
self.output = BytesIO()
self.condition = Condition()
def write(self, buffer):
if buffer.startswith(b'\xff\xd8'):
# New frame, copy the existing buffer's content and notify all
# clients it's available
self.output.truncate()
with self.condition:
self.frame = self.output.getvalue()
self.condition.notify_all()
self.output.seek(0)
return self.output.write(buffer)
|
{
"content_hash": "8d35249def0d2c7362d6a543fcc181f1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 27.043478260869566,
"alnum_prop": 0.5868167202572347,
"repo_name": "misalcedo/RapBot",
"id": "ada3649dd744c6617acfd3965636c7cde679d26f",
"size": "622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Vision/src/mjpeg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2331"
},
{
"name": "HTML",
"bytes": "18278"
},
{
"name": "Java",
"bytes": "6618"
},
{
"name": "JavaScript",
"bytes": "1625"
},
{
"name": "Jupyter Notebook",
"bytes": "4671932"
},
{
"name": "Python",
"bytes": "8925"
},
{
"name": "Scala",
"bytes": "32395"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.