text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""The common front-end functionality."""
import abc
import locale
import logging
import multiprocessing
import os
import pdb
import signal
import sys
import threading
import traceback
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.volume import tsk_volume_system
from dfvfs.volume import vshadow_volume_system
import plaso
from plaso import parsers # pylint: disable=unused-import
from plaso.engine import engine
from plaso.engine import scanner
from plaso.engine import utils as engine_utils
from plaso.frontend import rpc_proxy
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import foreman
from plaso.lib import pfilter
from plaso.lib import putils
from plaso.lib import queue
from plaso.lib import storage
from plaso.lib import timelib
import pytz
class FrontendInputReader(object):
"""Class that implements the input reader interface for the engine."""
@abc.abstractmethod
def Read(self):
"""Reads a string from the input.
Returns:
A string containing the input.
"""
class FrontendOutputWriter(object):
"""Class that implements the output writer interface for the engine."""
@abc.abstractmethod
def Write(self, string):
"""Wtites a string to the output.
Args:
string: A string containing the output.
"""
class StdinFrontendInputReader(object):
"""Class that implements a stdin input reader."""
def Read(self):
"""Reads a string from the input.
Returns:
A string containing the input.
"""
return sys.stdin.readline()
class StdoutFrontendOutputWriter(object):
"""Class that implements a stdout output writer."""
ENCODING = u'utf-8'
def Write(self, string):
"""Writes a string to the output.
Args:
string: A string containing the output.
"""
try:
sys.stdout.write(string.encode(self.ENCODING))
except UnicodeEncodeError:
logging.error(
u'Unable to properly write output, line will be partially '
u'written out.')
sys.stdout.write(u'LINE ERROR')
sys.stdout.write(string.encode(self.ENCODING, 'ignore'))
class Frontend(object):
"""Class that implements a front-end."""
# The maximum length of the line in number of charcters.
_LINE_LENGTH = 80
def __init__(self, input_reader, output_writer):
"""Initializes the front-end object.
Args:
input_reader: the input reader (instance of FrontendInputReader).
The default is None which indicates to use the stdin
input reader.
output_writer: the output writer (instance of FrontendOutputWriter).
The default is None which indicates to use the stdout
output writer.
"""
super(Frontend, self).__init__()
self._input_reader = input_reader
self._output_writer = output_writer
# TODO: add preferred_encoding support ot output writer.
self.preferred_encoding = locale.getpreferredencoding().lower()
def AddImageOptions(self, argument_group):
"""Adds the storage media image options to the argument group.
Args:
argument_group: The argparse argument group (instance of
argparse._ArgumentGroup).
"""
argument_group.add_argument(
'-o', '--offset', dest='image_offset', action='store', default=None,
type=int, help=(
u'The offset of the volume within the storage media image in '
u'number of sectors. A sector is 512 bytes in size by default '
u'this can be overwritten with the --sector_size option.'))
argument_group.add_argument(
'--sector_size', '--sector-size', dest='bytes_per_sector',
action='store', type=int, default=512, help=(
u'The number of bytes per sector, which is 512 by default.'))
argument_group.add_argument(
'--ob', '--offset_bytes', '--offset_bytes', dest='image_offset_bytes',
action='store', default=None, type=int, help=(
u'The offset of the volume within the storage media image in '
u'number of bytes.'))
def AddVssProcessingOptions(self, argument_group):
"""Adds the VSS processing options to the argument group.
Args:
argument_group: The argparse argument group (instance of
argparse._ArgumentGroup).
"""
argument_group.add_argument(
'--vss_stores', '--vss-stores', dest='vss_stores', action='store',
type=str, default=None, help=(
u'Define Volume Shadow Snapshots (VSS) (or stores that need to be '
u'processed. A range of stores can be defined as: \'3..5\'. '
u'Multiple stores can be defined as: \'1,3,5\' (a list of comma '
u'separated values). Ranges and lists can also be combined as: '
u'\'1,3..5\'. The first store is 1.'))
def PrintColumnValue(self, name, description, column_length=25):
"""Prints a value with a name and description aligned to the column length.
Args:
name: The name.
description: The description.
column_length: Optional column length. The default is 25.
"""
line_length = self._LINE_LENGTH - column_length - 3
# The format string of the first line of the column value.
primary_format_string = u'{{0:>{0:d}s}} : {{1:s}}\n'.format(column_length)
# The format string of successive lines of the column value.
secondary_format_string = u'{{0:<{0:d}s}}{{1:s}}\n'.format(
column_length + 3)
if len(description) < line_length:
self._output_writer.Write(primary_format_string.format(name, description))
return
# Split the description in words.
words = description.split()
current = 0
lines = []
word_buffer = []
for word in words:
current += len(word) + 1
if current >= line_length:
current = len(word)
lines.append(u' '.join(word_buffer))
word_buffer = [word]
else:
word_buffer.append(word)
lines.append(u' '.join(word_buffer))
# Print the column value on multiple lines.
self._output_writer.Write(primary_format_string.format(name, lines[0]))
for line in lines[1:]:
self._output_writer.Write(secondary_format_string.format(u'', line))
def PrintHeader(self, text, character='*'):
"""Prints the header as a line with centered text.
Args:
text: The header text.
character: Optional header line character. The default is '*'.
"""
self._output_writer.Write(u'\n')
format_string = u'{{0:{0:s}^{1:d}}}\n'.format(character, self._LINE_LENGTH)
header_string = format_string.format(u' {0:s} '.format(text))
self._output_writer.Write(header_string)
def PrintOptions(self, options, source_path):
"""Prints the options.
Args:
options: the command line arguments (instance of argparse.Namespace).
source_path: the source path.
"""
self._output_writer.Write(u'\n')
self._output_writer.Write(
u'Source path\t\t: {0:s}\n'.format(source_path))
if self._source_type == self._SOURCE_TYPE_STORAGE_MEDIA_IMAGE:
is_image = True
else:
is_image = False
self._output_writer.Write(
u'Is storage media image\t: {0!s}\n'.format(is_image))
if is_image:
image_offset_bytes = self._partition_offset
if isinstance(image_offset_bytes, basestring):
try:
image_offset_bytes = int(image_offset_bytes, 10)
except ValueError:
image_offset_bytes = 0
elif image_offset_bytes is None:
image_offset_bytes = 0
self._output_writer.Write(
u'Partition offset\t: {0:d} (0x{0:08x})\n'.format(image_offset_bytes))
vss_stores = getattr(options, 'vss_stores', None)
if vss_stores:
self._output_writer.Write(u'VSS stores\t\t: {0!s}\n'.format(vss_stores))
filter_file = getattr(options, 'file_filter', None)
if filter_file:
self._output_writer.Write(u'Filter file\t\t: {0:s}\n'.format(filter_file))
self._output_writer.Write(u'\n')
def PrintSeparatorLine(self):
"""Prints a separator line."""
self._output_writer.Write(u'{0:s}\n'.format(u'-' * self._LINE_LENGTH))
class ExtractionFrontend(Frontend):
"""Class that implements an extraction front-end."""
# The minimum number of processes.
MINIMUM_WORKERS = 2
# The maximum number of processes.
MAXIMUM_WORKERS = 15
_SOURCE_TYPE_DEVICE = 1
_SOURCE_TYPE_DIRECTORY = 2
_SOURCE_TYPE_FILE = 3
_SOURCE_TYPE_STORAGE_MEDIA_IMAGE = 4
def __init__(self, input_reader, output_writer):
"""Initializes the front-end object.
Args:
input_reader: the input reader (instance of FrontendInputReader).
The default is None which indicates to use the stdin
input reader.
output_writer: the output writer (instance of FrontendOutputWriter).
The default is None which indicates to use the stdout
output writer.
"""
super(ExtractionFrontend, self).__init__(input_reader, output_writer)
self._collection_process = None
self._collector = None
self._debug_mode = False
self._engine = None
self._file_system_scanner = scanner.FileSystemScanner()
self._filter_expression = None
self._filter_object = None
self._number_of_worker_processes = 0
self._parsers = None
self._partition_offset = None
self._preprocess = False
self._resolver_context = context.Context()
self._run_foreman = True
self._single_process_mode = False
self._show_worker_memory_information = False
self._source_path = None
self._source_path_spec = None
self._source_type = None
self._storage_file_path = None
self._storage_process = None
self._timezone = pytz.utc
self._vss_stores = None
# TODO: turn into a process pool.
self._worker_processes = {}
def _CheckStorageFile(self, storage_file_path):
"""Checks if the storage file path is valid.
Args:
storage_file_path: The path of the storage file.
Raises:
BadConfigOption: if the storage file path is invalid.
"""
if os.path.exists(storage_file_path):
if not os.path.isfile(storage_file_path):
raise errors.BadConfigOption(
u'Storage file: {0:s} already exists and is not a file.'.format(
storage_file_path))
logging.warning(u'Appending to an already existing storage file.')
dirname = os.path.dirname(storage_file_path)
if not dirname:
dirname = '.'
# TODO: add a more thorough check to see if the storage file really is
# a plaso storage file.
if not os.access(dirname, os.W_OK):
raise errors.BadConfigOption(
u'Unable to write to storage file: {0:s}'.format(storage_file_path))
def _CreateExtractionWorker(self, worker_number, options, pre_obj):
"""Creates an extraction worker object.
Args:
worker_number: number that identifies the worker.
options: the command line arguments (instance of argparse.Namespace).
pre_obj: The preprocessing object (instance of PreprocessObject).
Returns:
An extraction worker (instance of worker.ExtractionWorker).
"""
# Set up a simple XML RPC server for the worker for status indications.
# Since we don't know the worker's PID for now we'll set the initial port
# number to zero and then adjust it later.
proxy_server = rpc_proxy.StandardRpcProxyServer()
extraction_worker = self._engine.CreateExtractionWorker(
worker_number, pre_obj, self._parsers, rpc_proxy=proxy_server)
extraction_worker.SetDebugMode(self._debug_mode)
extraction_worker.SetSingleProcessMode(self._single_process_mode)
open_files = getattr(options, 'open_files', None)
extraction_worker.SetOpenFiles(open_files)
if getattr(options, 'os', None):
mount_path = getattr(options, 'filename', None)
extraction_worker.SetMountPath(mount_path)
filter_query = getattr(options, 'filter', None)
if filter_query:
filter_object = pfilter.GetMatcher(filter_query)
extraction_worker.SetFilterObject(filter_object)
text_prepend = getattr(options, 'text_prepend', None)
extraction_worker.SetTextPrepend(text_prepend)
return extraction_worker
def _DebugPrintCollector(self, options):
"""Prints debug information about the collector.
Args:
options: the command line arguments (instance of argparse.Namespace).
"""
filter_file = getattr(options, 'file_filter', None)
if self._source_type == self._SOURCE_TYPE_STORAGE_MEDIA_IMAGE:
if filter_file:
logging.debug(u'Starting a collection on image with filter.')
else:
logging.debug(u'Starting a collection on image.')
elif self._source_type == self._SOURCE_TYPE_DIRECTORY:
if filter_file:
logging.debug(u'Starting a collection on directory with filter.')
else:
logging.debug(u'Starting a collection on directory.')
elif self._source_type == self._SOURCE_TYPE_FILE:
logging.debug(u'Starting a collection on a single file.')
else:
logging.warning(u'Unsupported source type.')
def _GetPartionIdentifierFromUser(self, volume_system, volume_identifiers):
"""Asks the user to provide the partitioned volume identifier.
Args:
volume_system: The volume system (instance of dfvfs.TSKVolumeSystem).
volume_identifiers: List of allowed volume identifiers.
Raises:
FileSystemScannerError: if the source cannot be processed.
"""
self._output_writer.Write(
u'The following partitions were found:\n'
u'Identifier\tOffset (in bytes)\tSize (in bytes)\n')
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.FileSystemScannerError(
u'Volume missing for identifier: {0:s}.'.format(volume_identifier))
volume_extent = volume.extents[0]
self._output_writer.Write(
u'{0:s}\t\t{1:d} (0x{1:08x})\t{2:d}\n'.format(
volume.identifier, volume_extent.offset, volume_extent.size))
self._output_writer.Write(u'\n')
while True:
self._output_writer.Write(
u'Please specify the identifier of the partition that should '
u'be processed:\nNote that you can abort with Ctrl^C.\n')
selected_volume_identifier = self._input_reader.Read()
selected_volume_identifier = selected_volume_identifier.strip()
if selected_volume_identifier in volume_identifiers:
break
self._output_writer.Write(
u'\n'
u'Unsupported partition identifier, please try again or abort '
u'with Ctrl^C.\n'
u'\n')
return selected_volume_identifier
def _GetVolumeTSKPartition(
self, volume_system_path_spec, partition_number=None,
partition_offset=None):
"""Determines the volume path specification.
Args:
volume_system_path_spec: the volume system path specification (instance
of dfvfs.PathSpec).
partition_number: Optional preferred partition number. The default is
None.
partition_offset: Optional preferred partition byte offset. The default
is None.
Returns:
The TSK partition volume path specification (instance of dfvfs.PathSpec)
or None if no supported partition was found.
Raises:
FileSystemScannerError: if the format of or within the source
is not supported.
RuntimeError: if the volume for a specific identifier cannot be
retrieved.
"""
volume_system = tsk_volume_system.TSKVolumeSystem()
volume_system.Open(volume_system_path_spec)
volume_identifiers = self._file_system_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
logging.info(u'No supported partitions found.')
return
if partition_number is not None and partition_number > 0:
# Plaso uses partition numbers starting with 1 while dfvfs expects
# the volume index to start with 0.
partition_number -= 1
volume = volume_system.GetVolumeByIndex(partition_number)
if volume:
volume_extent = volume.extents[0]
self._partition_offset = volume_extent.offset
return path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
start_offset=volume_extent.offset,
parent=volume_system_path_spec.parent)
logging.warning(u'No such partition: {0:d}.'.format(partition_number))
if partition_offset is not None:
for volume in volume_system.volumes:
volume_extent = volume.extents[0]
if volume_extent.offset == partition_offset:
self._partition_offset = volume_extent.offset
return path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
start_offset=volume_extent.offset,
parent=volume_system_path_spec.parent)
logging.warning(
u'No such partition with offset: {0:d} (0x{0:08x}).'.format(
partition_offset))
if len(volume_identifiers) == 1:
volume = volume_system.GetVolumeByIdentifier(volume_identifiers[0])
if not volume:
raise RuntimeError(
u'Unable to retieve volume by identifier: {0:s}'.format(
volume_identifiers[0]))
volume_extent = volume.extents[0]
self._partition_offset = volume_extent.offset
return path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION, location=u'/p1',
parent=volume_system_path_spec.parent)
try:
selected_volume_identifier = self._GetPartionIdentifierFromUser(
volume_system, volume_identifiers)
except KeyboardInterrupt:
raise errors.FileSystemScannerError(u'File system scan aborted.')
location = u'/{0:s}'.format(selected_volume_identifier)
volume = volume_system.GetVolumeByIdentifier(selected_volume_identifier)
if not volume:
raise RuntimeError(
u'Unable to retieve volume by identifier: {0:s}'.format(
selected_volume_identifier))
volume_extent = volume.extents[0]
self._partition_offset = volume_extent.offset
return path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION, location=location,
parent=volume_system_path_spec.parent)
def _GetVolumeVssStoreIdentifiers(
self, volume_system_path_spec, vss_stores=None):
"""Determines the VSS store identifiers.
Args:
volume_system_path_spec: the volume system path specification (instance
of dfvfs.PathSpec).
vss_stores: Optional list of preferred VSS stored identifiers. The
default is None.
Returns:
None to indicate no sub volume system was found.
Raises:
FileSystemScannerError: if the format of or within the source
is not supported.
"""
volume_system = vshadow_volume_system.VShadowVolumeSystem()
volume_system.Open(volume_system_path_spec)
volume_identifiers = self._file_system_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
return
try:
self._vss_stores = self._GetVssStoreIdentifiersFromUser(
volume_system, volume_identifiers, vss_stores=vss_stores)
except KeyboardInterrupt:
raise errors.FileSystemScannerError(u'File system scan aborted.')
return
def _GetVssStoreIdentifiersFromUser(
self, volume_system, volume_identifiers, vss_stores=None):
"""Asks the user to provide the VSS store identifiers.
Args:
volume_system: The volume system (instance of dfvfs.VShadowVolumeSystem).
volume_identifiers: List of allowed volume identifiers.
vss_stores: Optional list of preferred VSS stored identifiers. The
default is None.
Returns:
The list of selected VSS store identifiers or None.
Raises:
FileSystemScannerError: if the source cannot be processed.
"""
normalized_volume_identifiers = []
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.FileSystemScannerError(
u'Volume missing for identifier: {0:s}.'.format(volume_identifier))
try:
volume_identifier = int(volume.identifier[3:], 10)
normalized_volume_identifiers.append(volume_identifier)
except ValueError:
pass
if vss_stores:
if not set(vss_stores).difference(
normalized_volume_identifiers):
return vss_stores
print_header = True
while True:
if print_header:
self._output_writer.Write(
u'The following Volume Shadow Snapshots (VSS) were found:\n'
u'Identifier\tVSS store identifier\tCreation Time\n')
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.FileSystemScannerError(
u'Volume missing for identifier: {0:s}.'.format(
volume_identifier))
vss_identifier = volume.GetAttribute('identifier')
vss_creation_time = volume.GetAttribute('creation_time')
vss_creation_time = timelib.Timestamp.FromFiletime(
vss_creation_time.value)
vss_creation_time = timelib.Timestamp.CopyToIsoFormat(
vss_creation_time)
self._output_writer.Write(u'{0:s}\t\t{1:s}\t{2:s}\n'.format(
volume.identifier, vss_identifier.value, vss_creation_time))
self._output_writer.Write(u'\n')
print_header = False
self._output_writer.Write(
u'Please specify the identifier(s) of the VSS that should be '
u'processed:\nNote that a range of stores can be defined as: 3..5. '
u'Multiple stores can\nbe defined as: 1,3,5 (a list of comma '
u'separated values). Ranges and lists can\nalso be combined '
u'as: 1,3..5. The first store is 1. If no stores are specified\n'
u'none will be processed. You can abort with Ctrl^C.\n')
selected_vss_stores = self._input_reader.Read()
selected_vss_stores = selected_vss_stores.strip()
if not selected_vss_stores:
break
try:
selected_vss_stores = self._file_system_scanner.ParseVssStores(
selected_vss_stores)
except errors.BadConfigOption:
selected_vss_stores = []
if not set(selected_vss_stores).difference(normalized_volume_identifiers):
break
self._output_writer.Write(
u'\n'
u'Unsupported VSS identifier(s), please try again or abort with '
u'Ctrl^C.\n'
u'\n')
return selected_vss_stores
# TODO: have the frontend fill collecton information gradually
# and set it as the last step of preprocessing?
def _PreprocessSetCollectionInformation(self, options, pre_obj):
"""Sets the collection information as part of the preprocessing.
Args:
options: the command line arguments (instance of argparse.Namespace).
pre_obj: the preprocess object (instance of PreprocessObject).
"""
collection_information = {}
collection_information['version'] = plaso.GetVersion()
collection_information['configured_zone'] = self._timezone
collection_information['file_processed'] = self._source_path
collection_information['output_file'] = self._storage_file_path
collection_information['protobuf_size'] = self._buffer_size
collection_information['parser_selection'] = getattr(
options, 'parsers', '(no list set)')
collection_information['preferred_encoding'] = self.preferred_encoding
collection_information['time_of_run'] = timelib.Timestamp.GetNow()
collection_information['parsers'] = self._parser_names
collection_information['preprocess'] = self._preprocess
if self._source_type == self._SOURCE_TYPE_DIRECTORY:
recursive = True
else:
recursive = False
collection_information['recursive'] = recursive
collection_information['debug'] = self._debug_mode
collection_information['vss parsing'] = bool(self._vss_stores)
if self._filter_expression:
collection_information['filter'] = self._filter_expression
filter_file = getattr(options, 'file_filter', None)
if filter_file:
if os.path.isfile(filter_file):
filters = []
with open(filter_file, 'rb') as fh:
for line in fh:
filters.append(line.rstrip())
collection_information['file_filter'] = ', '.join(filters)
collection_information['os_detected'] = getattr(options, 'os', 'N/A')
if self._source_type == self._SOURCE_TYPE_STORAGE_MEDIA_IMAGE:
collection_information['method'] = 'imaged processed'
collection_information['image_offset'] = self._partition_offset
else:
collection_information['method'] = 'OS collection'
if self._single_process_mode:
collection_information['runtime'] = 'single process mode'
else:
collection_information['runtime'] = 'multi process mode'
collection_information['workers'] = self._number_of_worker_processes
pre_obj.collection_information = collection_information
def _PreprocessSetParserFilter(self, options, pre_obj):
"""Sets the parser filter as part of the preprocessing.
Args:
options: the command line arguments (instance of argparse.Namespace).
pre_obj: The previously created preprocessing object (instance of
PreprocessObject) or None.
"""
# TODO: Make this more sane. Currently we are only checking against
# one possible version of Windows, and then making the assumption if
# that is not correct we default to Windows 7. Same thing with other
# OS's, no assumption or checks are really made there.
# Also this is done by default, and no way for the user to turn off
# this behavior, need to add a parameter to the frontend that takes
# care of overwriting this behavior.
# TODO: refactor putting the filter into the options object.
# See if it can be passed in another way.
if not getattr(options, 'filter', None):
options.filter = u''
if not options.filter:
options.filter = u''
parser_filter_string = u''
# If no parser filter is set, let's use our best guess of the OS
# to build that list.
if not getattr(options, 'parsers', ''):
if hasattr(pre_obj, 'osversion'):
os_version = pre_obj.osversion.lower()
# TODO: Improve this detection, this should be more 'intelligent', since
# there are quite a lot of versions out there that would benefit from
# loading up the set of 'winxp' parsers.
if 'windows xp' in os_version:
parser_filter_string = 'winxp'
elif 'windows server 2000' in os_version:
parser_filter_string = 'winxp'
elif 'windows server 2003' in os_version:
parser_filter_string = 'winxp'
else:
parser_filter_string = 'win7'
if getattr(pre_obj, 'guessed_os', None):
if pre_obj.guessed_os == 'MacOSX':
parser_filter_string = u'macosx'
elif pre_obj.guessed_os == 'Linux':
parser_filter_string = 'linux'
if parser_filter_string:
options.parsers = parser_filter_string
logging.info(u'Parser filter expression changed to: {0:s}'.format(
options.parsers))
def _PreprocessSetTimezone(self, options, pre_obj):
"""Sets the timezone as part of the preprocessing.
Args:
options: the command line arguments (instance of argparse.Namespace).
pre_obj: The previously created preprocessing object (instance of
PreprocessObject) or None.
"""
if hasattr(pre_obj, 'time_zone_str'):
logging.info(u'Setting timezone to: {0:s}'.format(pre_obj.time_zone_str))
try:
pre_obj.zone = pytz.timezone(pre_obj.time_zone_str)
except pytz.UnknownTimeZoneError:
if hasattr(options, 'zone'):
logging.warning((
u'Unable to automatically configure timezone, falling back '
u'to the user supplied one: {0:s}').format(self._timezone))
pre_obj.zone = self._timezone
else:
logging.warning(u'TimeZone was not properly set, defaulting to UTC')
pre_obj.zone = pytz.utc
else:
# TODO: shouldn't the user to be able to always override the timezone
# detection? Or do we need an input sanitation function.
pre_obj.zone = self._timezone
if not getattr(pre_obj, 'zone', None):
pre_obj.zone = self._timezone
def _ProcessSourceMultiProcessMode(self, options):
"""Processes the source in a multiple process.
Muliprocessing is used to start up separate processes.
Args:
options: the command line arguments (instance of argparse.Namespace).
"""
# TODO: replace by an option.
start_collection_process = True
self._number_of_worker_processes = getattr(options, 'workers', 0)
if self._number_of_worker_processes < 1:
# One worker for each "available" CPU (minus other processes).
# The number three here is derived from the fact that the engine starts
# up:
# + A collector process.
# + A storage process.
# If we want to utilize all CPU's on the system we therefore need to start
# up workers that amounts to the total number of CPU's - 3 (these two plus
# the main process). Thus the number three.
cpus = multiprocessing.cpu_count() - 3
if cpus <= self.MINIMUM_WORKERS:
cpus = self.MINIMUM_WORKERS
elif cpus >= self.MAXIMUM_WORKERS:
# Let's have a maximum amount of workers.
cpus = self.MAXIMUM_WORKERS
self._number_of_worker_processes = cpus
logging.info(u'Starting extraction in multi process mode.')
collection_queue = queue.MultiThreadedQueue()
storage_queue = queue.MultiThreadedQueue()
self._engine = engine.Engine(collection_queue, storage_queue)
self._engine.SetSource(
self._source_path_spec, resolver_context=self._resolver_context)
logging.debug(u'Starting preprocessing.')
pre_obj = self.PreprocessSource(options)
# TODO: move FindAllParsers to engine as a class method?
filter_query = getattr(options, 'parsers', '')
self._parsers = putils.FindAllParsers(
pre_obj=pre_obj, config=options, parser_filter_string=filter_query)
self._parser_names = [parser.parser_name for parser in self._parsers['all']]
self._PreprocessSetCollectionInformation(options, pre_obj)
output_module = getattr(options, 'output_module', None)
if output_module:
storage_writer = storage.BypassStorageWriter(
storage_queue, self._storage_file_path,
output_module_string=output_module, pre_obj=pre_obj)
else:
storage_writer = storage.StorageFileWriter(
storage_queue, self._storage_file_path, self._buffer_size, pre_obj)
logging.debug(u'Preprocessing done.')
if 'filestat' in self._parser_names:
include_directory_stat = True
else:
include_directory_stat = False
filter_file = getattr(options, 'file_filter', None)
if filter_file:
filter_find_specs = engine_utils.BuildFindSpecsFromFile(
filter_file, pre_obj=pre_obj)
else:
filter_find_specs = None
if start_collection_process:
resolver_context = context.Context()
else:
resolver_context = self._resolver_context
engine_proxy = None
rpc_proxy_client = None
if self._run_foreman:
worker_foreman = foreman.Foreman(
show_memory_usage=self._show_worker_memory_information)
# Start a proxy server (only needed when a foreman is started).
engine_proxy = rpc_proxy.StandardRpcProxyServer(os.getpid())
try:
engine_proxy.Open()
engine_proxy.RegisterFunction(
'signal_end_of_collection', worker_foreman.SignalEndOfProcessing)
proxy_thread = threading.Thread(
name='rpc_proxy', target=engine_proxy.StartProxy)
proxy_thread.start()
rpc_proxy_client = rpc_proxy.StandardRpcProxyClient(
engine_proxy.listening_port)
except errors.ProxyFailedToStart as exception:
proxy_thread = None
logging.error((
u'Unable to setup a RPC server for the engine with error '
u'{0:s}').format(exception))
else:
worker_foreman = None
self._collector = self._engine.CreateCollector(
include_directory_stat, vss_stores=self._vss_stores,
filter_find_specs=filter_find_specs, resolver_context=resolver_context)
if rpc_proxy_client:
self._collector.SetProxy(rpc_proxy_client)
self._DebugPrintCollector(options)
logging.info(u'Starting storage process.')
self._storage_process = multiprocessing.Process(
name='StorageThread', target=storage_writer.WriteEventObjects)
self._storage_process.start()
if start_collection_process:
logging.info(u'Starting collection process.')
self._collection_process = multiprocessing.Process(
name='Collection', target=self._collector.Collect)
self._collection_process.start()
logging.info(u'Starting worker processes to extract events.')
for worker_nr in range(self._number_of_worker_processes):
extraction_worker = self._CreateExtractionWorker(
worker_nr, options, pre_obj)
logging.debug(u'Starting worker: {0:d} process'.format(worker_nr))
worker_name = u'Worker_{0:d}'.format(worker_nr)
# TODO: Test to see if a process pool can be a better choice.
self._worker_processes[worker_name] = multiprocessing.Process(
name=worker_name, target=extraction_worker.Run)
self._worker_processes[worker_name].start()
pid = self._worker_processes[worker_name].pid
if worker_foreman:
worker_foreman.MonitorWorker(pid=pid, name=worker_name)
logging.info(u'Collecting and processing files.')
if self._collection_process:
while self._collection_process.is_alive():
self._collection_process.join(10)
# Check the worker status regularly while collection is still ongoing.
if worker_foreman:
worker_foreman.CheckStatus()
# TODO: We get a signal when collection is done, which might happen
# before the collection thread joins. Look at the option of speeding
# up the process of the collector stopping by potentially killing it.
else:
self._collector.Collect()
logging.info(u'Collection is done, waiting for processing to complete.')
if worker_foreman:
worker_foreman.SignalEndOfProcessing()
# Close the RPC server since the collection thread is done.
if engine_proxy:
# Close the proxy, free up resources so we can shut down the thread.
engine_proxy.Close()
if proxy_thread.isAlive():
proxy_thread.join()
# Run through the running workers, one by one.
# This will go through a list of all active worker processes and check it's
# status. If a worker has completed it will be removed from the list.
# The process will not wait longer than five seconds for each worker to
# complete, if longer time passes it will simply check it's status and
# move on. That ensures that worker process is monitored and status is
# updated.
while self._worker_processes:
for process_name, process_obj in sorted(self._worker_processes.items()):
if worker_foreman:
worker_label = worker_foreman.GetLabel(
name=process_name, pid=process_obj.pid)
else:
worker_label = None
if not worker_label:
if process_obj.is_alive():
logging.info((
u'Process {0:s} [{1:d}] is not monitored by the foreman. Most '
u'likely due to a worker having completed it\'s processing '
u'while waiting for another worker to complete.').format(
process_name, process_obj.pid))
logging.info(
u'Waiting for worker {0:s} to complete.'.format(process_name))
process_obj.join()
logging.info(u'Worker: {0:s} [{1:d}] has completed.'.format(
process_name, process_obj.pid))
del self._worker_processes[process_name]
continue
if process_obj.is_alive():
# Check status of worker.
worker_foreman.CheckStatus(label=worker_label)
process_obj.join(5)
# Note that we explicitly must test against exitcode 0 here since
# process.exitcode will be None if there is no exitcode.
elif process_obj.exitcode != 0:
logging.warning((
u'Worker process: {0:s} already exited with code: '
u'{1:d}.').format(process_name, process_obj.exitcode))
process_obj.terminate()
worker_foreman.TerminateProcess(label=worker_label)
else:
# Process is no longer alive, no need to monitor.
worker_foreman.StopMonitoringWorker(label=worker_label)
# Remove it from our list of active workers.
del self._worker_processes[process_name]
logging.info(u'Processing is done, waiting for storage to complete.')
self._engine.SignalEndOfInputStorageQueue()
self._storage_process.join()
logging.info(u'Storage is done.')
def _ProcessSourceSingleProcessMode(self, options):
"""Processes the source in a single process.
Args:
options: the command line arguments (instance of argparse.Namespace).
"""
logging.info(u'Starting extraction in single process mode.')
try:
self._StartSingleThread(options)
except Exception as exception:
# The tool should generally not be run in single process mode
# for other reasons than to debug. Hence the general error
# catching.
logging.error(u'An uncaught exception occured: {0:s}.\n{1:s}'.format(
exception, traceback.format_exc()))
if self._debug_mode:
pdb.post_mortem()
def _StartSingleThread(self, options):
"""Starts everything up in a single process.
This should not normally be used, since running the tool in a single
process buffers up everything into memory until the storage is called.
Just to make it clear, this starts up the collection, completes that
before calling the worker that extracts all EventObjects and stores
them in memory. when that is all done, the storage function is called
to drain the buffer. Hence the tool's excessive use of memory in this
mode and the reason why it is not suggested to be used except for
debugging reasons (and mostly to get into the debugger).
This is therefore mostly useful during debugging sessions for some
limited parsing.
Args:
options: the command line arguments (instance of argparse.Namespace).
"""
collection_queue = queue.SingleThreadedQueue()
storage_queue = queue.SingleThreadedQueue()
self._engine = engine.Engine(collection_queue, storage_queue)
self._engine.SetSource(
self._source_path_spec, resolver_context=self._resolver_context)
logging.debug(u'Starting preprocessing.')
pre_obj = self.PreprocessSource(options)
# TODO: move FindAllParsers to engine as a class method?
filter_query = getattr(options, 'parsers', '')
self._parsers = putils.FindAllParsers(
pre_obj=pre_obj, config=options, parser_filter_string=filter_query)
self._parser_names = [parser.parser_name for parser in self._parsers['all']]
self._PreprocessSetCollectionInformation(options, pre_obj)
logging.debug(u'Preprocessing done.')
if 'filestat' in self._parser_names:
include_directory_stat = True
else:
include_directory_stat = False
filter_file = getattr(options, 'file_filter', None)
if filter_file:
filter_find_specs = engine_utils.BuildFindSpecsFromFile(
filter_file, pre_obj=pre_obj)
else:
filter_find_specs = None
self._collector = self._engine.CreateCollector(
include_directory_stat, vss_stores=self._vss_stores,
filter_find_specs=filter_find_specs,
resolver_context=self._resolver_context)
self._DebugPrintCollector(options)
logging.debug(u'Starting collection.')
self._collector.Collect()
logging.debug(u'Collection done.')
extraction_worker = self._CreateExtractionWorker(0, options, pre_obj)
logging.debug(u'Starting extraction worker.')
extraction_worker.Run()
logging.debug(u'Extraction worker done.')
self._engine.SignalEndOfInputStorageQueue()
output_module = getattr(options, 'output_module', None)
if output_module:
storage_writer = storage.BypassStorageWriter(
storage_queue, self._storage_file_path,
output_module_string=output_module, pre_obj=pre_obj)
else:
storage_writer = storage.StorageFileWriter(
storage_queue, self._storage_file_path,
buffer_size=self._buffer_size, pre_obj=pre_obj)
logging.debug(u'Starting storage.')
storage_writer.WriteEventObjects()
logging.debug(u'Storage done.')
self._resolver_context.Empty()
# Note that this function is not called by the normal termination.
def CleanUpAfterAbort(self):
"""Signals the tool to stop running nicely after an abort."""
if self._single_process_mode and self._debug_mode:
logging.warning(u'Running in debug mode, set up debugger.')
pdb.post_mortem()
return
logging.warning(u'Stopping collector.')
if self._collector:
self._collector.SignalEndOfInput()
logging.warning(u'Stopping storage.')
self._engine.SignalEndOfInputStorageQueue()
# Kill the collection process.
if self._collection_process:
logging.warning(u'Terminating the collection process.')
self._collection_process.terminate()
try:
logging.warning(u'Waiting for workers to complete.')
for worker_name, worker_process in self._worker_processes.iteritems():
pid = worker_process.pid
logging.warning(u'Waiting for worker: {0:s} [PID {1:d}]'.format(
worker_name, pid))
# Let's kill the process, different methods depending on the platform
# used.
if sys.platform.startswith('win'):
import ctypes
process_terminate = 1
handle = ctypes.windll.kernel32.OpenProcess(
process_terminate, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
try:
os.kill(pid, signal.SIGKILL)
except OSError as exception:
logging.error(
u'Unable to kill process {0:d} with error: {1:s}'.format(
pid, exception))
logging.warning(u'Worker: {0:d} CLOSED'.format(pid))
logging.warning(u'Workers completed.')
if hasattr(self, 'storage_process'):
logging.warning(u'Waiting for storage.')
self._storage_process.join()
logging.warning(u'Storage ended.')
logging.info(u'Exiting the tool.')
# Sometimes the main process will be unresponsive.
if not sys.platform.startswith('win'):
os.kill(os.getpid(), signal.SIGKILL)
except KeyboardInterrupt:
logging.warning(u'Terminating all processes.')
for process in self._worker_processes:
process.terminate()
logging.warning(u'Worker processes terminated.')
if hasattr(self, 'storage_process'):
self._storage_process.terminate()
logging.warning(u'Storage terminated.')
# Sometimes the main process will be unresponsive.
if not sys.platform.startswith('win'):
os.kill(os.getpid(), signal.SIGKILL)
def GetSourceFileSystemSearcher(self):
"""Retrieves the file system searcher of the source.
Returns:
The file system searcher object (instance of dfvfs.FileSystemSearcher).
"""
return self._engine.GetSourceFileSystemSearcher(
resolver_context=self._resolver_context)
def GetSourcePathSpec(self):
"""Retrieves the source path specification.
Returns:
The source path specification (instance of dfvfs.PathSpec).
"""
return self._source_path_spec
def ParseOptions(self, options, source_option):
"""Parses the options and initializes the front-end.
Args:
options: the command line arguments (instance of argparse.Namespace).
source_option: the name of the source option.
Raises:
BadConfigOption: if the options are invalid.
"""
if not options:
raise errors.BadConfigOption(u'Missing options.')
self._source_path = getattr(options, source_option, None)
if not self._source_path:
raise errors.BadConfigOption(u'Missing source path.')
try:
self._source_path = unicode(self._source_path)
except UnicodeDecodeError as exception:
raise errors.BadConfigOption(
u'Unable to convert source path to Unicode with error: {0:s}.'.format(
exception))
self._source_path = os.path.abspath(self._source_path)
self._buffer_size = getattr(options, 'buffer_size', 0)
if self._buffer_size:
# TODO: turn this into a generic function that supports more size
# suffixes both MB and MiB and also that does not allow m as a valid
# indicator for MiB since m represents milli not Mega.
try:
if self._buffer_size[-1].lower() == 'm':
self._buffer_size = int(self._buffer_size[:-1], 10)
self._buffer_size *= self._BYTES_IN_A_MIB
else:
self._buffer_size = int(self._buffer_size, 10)
except ValueError:
raise errors.BadConfigOption(
u'Invalid buffer size: {0:s}.'.format(self._buffer_size))
self._filter_expression = getattr(options, 'filter', None)
if self._filter_expression:
self._filter_object = pfilter.GetMatcher(self._filter_expression)
if not self._filter_object:
raise errors.BadConfigOption(
u'Invalid filter expression: {0:s}'.format(self._filter_expression))
filter_file = getattr(options, 'file_filter', None)
if filter_file and not os.path.isfile(filter_file):
raise errors.BadConfigOption(
u'No such collection filter file: {0:s}.'.format(filter_file))
self._debug_mode = getattr(options, 'debug', False)
timezone_string = getattr(options, 'timezone', None)
if timezone_string:
self._timezone = pytz.timezone(timezone_string)
self._single_process_mode = getattr(
options, 'single_process', False)
def PreprocessSource(self, options):
"""Preprocesses the source.
Args:
options: the command line arguments (instance of argparse.Namespace).
Returns:
The preprocessing object (instance of PreprocessObject).
"""
pre_obj = None
old_preprocess = getattr(options, 'old_preprocess', False)
if old_preprocess and os.path.isfile(self._storage_file_path):
# Check if the storage file contains a preprocessing object.
try:
with storage.StorageFile(
self._storage_file_path, read_only=True) as store:
storage_information = store.GetStorageInformation()
if storage_information:
logging.info(u'Using preprocessing information from a prior run.')
pre_obj = storage_information[-1]
self._preprocess = False
except IOError:
logging.warning(u'Storage file does not exist, running preprocess.')
if not pre_obj:
pre_obj = event.PreprocessObject()
if self._preprocess and self._source_type in [
self._SOURCE_TYPE_DIRECTORY, self._SOURCE_TYPE_STORAGE_MEDIA_IMAGE]:
platform = getattr(options, 'os', None)
try:
self._engine.PreprocessSource(
pre_obj, platform, resolver_context=self._resolver_context)
except IOError as exception:
logging.error(u'Unable to preprocess with error: {0:s}'.format(
exception))
return
self._PreprocessSetTimezone(options, pre_obj)
self._PreprocessSetParserFilter(options, pre_obj)
return pre_obj
def ProcessSource(self, options):
"""Processes the source.
Args:
options: the command line arguments (instance of argparse.Namespace).
Raises:
BadConfigOption: if the options are incorrect or not supported.
"""
try:
# TODO: move scanner into engine.
self.ScanSource(options)
except errors.FileSystemScannerError as exception:
# TODO: make this a processing error.
raise errors.BadConfigOption((
u'Unable to scan for a supported filesystem with error: {0:s}.\n'
u'Most likely the image format is not supported by the '
u'tool.').format(exception))
self.PrintOptions(options, self._source_path)
if self._partition_offset is None:
self._preprocess = False
else:
# If we're dealing with a storage media image always run pre-processing.
self._preprocess = True
self._CheckStorageFile(self._storage_file_path)
# No need to multi process when we're only processing a single file.
if self._source_type == self._SOURCE_TYPE_FILE:
self._single_process_mode = True
if self._single_process_mode:
self._ProcessSourceSingleProcessMode(options)
else:
self._ProcessSourceMultiProcessMode(options)
def ScanSource(self, options):
"""Scans the source path for volume and file systems.
This functions sets the internal source path specification and source
type values.
Args:
options: the command line arguments (instance of argparse.Namespace).
"""
partition_number = getattr(options, 'partition_number', None)
if (partition_number is not None and
isinstance(partition_number, basestring)):
try:
partition_number = int(partition_number, 10)
except ValueError:
logging.warning(u'Invalid partition number: {0:s}.'.format(
partition_number))
partition_number = None
partition_offset = getattr(options, 'image_offset_bytes', None)
if (partition_offset is not None and
isinstance(partition_offset, basestring)):
try:
partition_offset = int(partition_offset, 10)
except ValueError:
logging.warning(u'Invalid image offset bytes: {0:s}.'.format(
partition_offset))
partition_offset = None
if partition_offset is None and hasattr(options, 'image_offset'):
image_offset = getattr(options, 'image_offset')
bytes_per_sector = getattr(options, 'bytes_per_sector', 512)
if isinstance(image_offset, basestring):
try:
image_offset = int(image_offset, 10)
except ValueError:
logging.warning(u'Invalid image offset: {0:s}.'.format(image_offset))
image_offset = None
if isinstance(bytes_per_sector, basestring):
try:
bytes_per_sector = int(bytes_per_sector, 10)
except ValueError:
logging.warning(u'Invalid bytes per sector: {0:s}.'.format(
bytes_per_sector))
bytes_per_sector = 512
if image_offset:
partition_offset = image_offset * bytes_per_sector
vss_stores = getattr(options, 'vss_stores', None)
if vss_stores:
vss_stores = self._file_system_scanner.ParseVssStores(vss_stores)
self._source_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=self._source_path)
# Note that os.path.isfile() can return false when source_path points
# to a device file.
if os.path.isdir(self._source_path):
self._source_type = self._SOURCE_TYPE_DIRECTORY
return
scan_path_spec = self._source_path_spec
path_spec = self._file_system_scanner.ScanForStorageMediaImage(
scan_path_spec)
if path_spec:
scan_path_spec = path_spec
# In case we did not find a storage media image type we keep looking
# since the RAW storage media image type is detected by its content.
while True:
path_spec = self._file_system_scanner.ScanForVolumeSystem(scan_path_spec)
if not path_spec:
break
scan_path_spec = path_spec
if scan_path_spec.type_indicator in [
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION]:
path_spec = self._GetVolumeTSKPartition(
scan_path_spec, partition_number=partition_number,
partition_offset=partition_offset)
if not path_spec:
break
scan_path_spec = path_spec
elif scan_path_spec.type_indicator in [
dfvfs_definitions.TYPE_INDICATOR_VSHADOW]:
path_spec = self._GetVolumeVssStoreIdentifiers(
scan_path_spec, vss_stores=vss_stores)
# Trace back to the parent volume path specification.
scan_path_spec = scan_path_spec.parent
break
# In case we did not find a volume system type we keep looking
# since we could be dealing with a storage media image that contains
# a single volume.
path_spec = self._file_system_scanner.ScanForFileSystem(scan_path_spec)
if path_spec:
self._source_path_spec = path_spec
self._source_type = self._SOURCE_TYPE_STORAGE_MEDIA_IMAGE
if self._partition_offset is None:
self._partition_offset = 0
else:
if scan_path_spec.type_indicator in [
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION]:
logging.warning(
u'Unsupported file system falling back to single file mode.')
self._source_type = self._SOURCE_TYPE_FILE
def SetStorageFile(self, storage_file_path):
"""Sets the storage file path.
Args:
storage_file_path: The path of the storage file.
"""
self._storage_file_path = storage_file_path
def SetRunForeman(self, run_foreman=True):
"""Sets a flag indicating whether the frontend should monitor workers.
Args:
run_foreman: A boolean (defaults to true) that indicates whether or not
the frontend should start a foreman that monitors workers.
"""
self._run_foreman = run_foreman
def SetShowMemoryInformation(self, show_memory=True):
"""Sets a flag telling the worker monitor to show memory information.
Args:
show_memory: A boolean (defaults to True) that indicates whether or not
the foreman should include memory information as part of
the worker monitoring.
"""
self._show_worker_memory_information = show_memory
class AnalysisFrontend(Frontend):
"""Class that implements an analysis front-end."""
def __init__(self, input_reader, output_writer):
"""Initializes the front-end object.
Args:
input_reader: the input reader (instance of FrontendInputReader).
The default is None which indicates to use the stdin
input reader.
output_writer: the output writer (instance of FrontendOutputWriter).
The default is None which indicates to use the stdout
output writer.
"""
super(AnalysisFrontend, self).__init__(input_reader, output_writer)
self._storage_file_path = None
def AddStorageFileOptions(self, argument_group):
"""Adds the storage file options to the argument group.
Args:
argument_group: The argparse argument group (instance of
argparse._ArgumentGroup) or argument parser (instance of
argparse.ArgumentParser).
"""
argument_group.add_argument(
'storage_file', metavar='STORAGE_FILE', action='store', nargs='?',
type=unicode, default=None, help='The path of the storage file.')
def OpenStorageFile(self, read_only=True):
"""Opens the storage file.
Args:
read_only: Optional boolean value to indicate the storage file should
be opened in read-only mode. The default is True.
Returns:
The storage file object (instance of StorageFile).
"""
return storage.StorageFile(self._storage_file_path, read_only=read_only)
def ParseOptions(self, options):
"""Parses the options and initializes the front-end.
Args:
options: the command line arguments (instance of argparse.Namespace).
Raises:
BadConfigOption: if the options are invalid.
"""
if not options:
raise errors.BadConfigOption(u'Missing options.')
self._storage_file_path = getattr(options, 'storage_file', None)
if not self._storage_file_path:
raise errors.BadConfigOption(u'Missing storage file.')
if not os.path.isfile(self._storage_file_path):
raise errors.BadConfigOption(
u'No such storage file {0:s}.'.format(self._storage_file_path))
|
{
"content_hash": "bfc3c8099041995debc46c343224b345",
"timestamp": "",
"source": "github",
"line_count": 1584,
"max_line_length": 80,
"avg_line_length": 36.23863636363637,
"alnum_prop": 0.6626249956447511,
"repo_name": "iwm911/plaso",
"id": "94557101bde9cb596be43639c8db2b262ad56bc5",
"size": "58100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/frontend/frontend.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2431825"
},
{
"name": "Shell",
"bytes": "21885"
},
{
"name": "VHDL",
"bytes": "2100224"
}
],
"symlink_target": ""
}
|
import os,re,copy
from waflib import Configure, TaskGen, Task, Logs, Errors
def configure(conf):
conf.env['CDS_LIB_PATH'] = conf.bldnode.make_node('cds.lib').abspath()
conf.env['CDS_HDLVAR_PATH'] = conf.bldnode.make_node('hdl.var').abspath()
conf.env['CDS_LIBS_FLAT'] = {}
conf.env['CDS_HDL_VIEW_MAP'] = {'.vams': 'ams','+':'module'}
#@Configure.conf
def parse_cds_libs(tgen):
# Here, we check if all the libraries given in CDS_LIBS
# and all the include paths defined in CDS_LIB_INCLUDES
# exist and merge them into CDS_LIBS_FLAT.
found_absolute_path = False
try:
for key,value in tgen.bld.env['CDS_LIBS'].iteritems():
tgen.bld.env['CDS_LIBS_FLAT'][key] = value
if os.path.isabs(value):
found_absolute_path = True
if not tgen.bld.root.find_dir(value):
tgen.bld.fatal('Cadence library '+key+' not found in '+value+'.')
else:
if not tgen.path.find_dir(value):
tgen.bld.fatal('Cadence library '+key+' not found in '+value+'.')
Logs.info('Checking for environment variable CDS_LIBS...Found '+str(len(tgen.bld.env['CDS_LIBS_FLAT']))+' libraries.')
except AttributeError, e:
Logs.warn('Checking for environment variable CDS_LIBS...Found None')
if found_absolute_path:
Logs.warn('Defining absolute paths in conf.env.CDS_LIBS can lead to undefined behavior, especially when doing so for your worklib!')
try:
# copy cds_includes
my_includes = copy.copy(tgen.env['CDS_LIB_INCLUDES'])
# start processing stack
include = my_includes.pop()
while include:
if not os.path.exists(os.path.expandvars(include)):
tgen.bld.fatal('Cadence library include '+include+' does not exist.')
else:
with open(os.path.expandvars(include),'r') as include_file:
for line in include_file.readlines():
if re.match(r"^\s*\#",line):
continue
define = re.search('DEFINE\s+(\w+)\s+([\.\w\$\/]+)',line)
if define:
tgen.bld.env['CDS_LIBS_FLAT'][define.group(1)] = os.path.expandvars(define.group(2))
else:
inc = re.search('INCLUDE\s+([\.\w\$\/]+)',line)
if inc:
my_includes.append(inc.group(1))
include = my_includes.pop()
except AttributeError:
pass
except IndexError:
pass
# if CDS_LIBS has not been defined, define it
if len(tgen.bld.env['CDS_LIBS']) == 0:
tgen.bld.env['CDS_LIBS'] = {}
# if worklib hasn't been specified, do it
if not 'CDS_WORKLIB' in tgen.bld.env:
tgen.bld.env['CDS_WORKLIB'] = 'worklib'
worklib = tgen.bld.path.get_bld().make_node('worklib')
if not os.path.isdir(worklib.abspath()):
worklib.mkdir()
if not tgen.env['CDS_LIBS_FLAT'].has_key('worklib'):
try:
tgen.bld.env['CDS_LIBS']['worklib'] = worklib.path_from(tgen.bld.path)
except TypeError:
tgen.bld.env['CDS_LIBS'] = {}
tgen.bld.env['CDS_LIBS']['worklib'] = worklib.path_from(tgen.bld.path)
tgen.bld.env['CDS_LIBS_FLAT']['worklib'] = worklib.path_from(tgen.bld.path)
elif not tgen.bld.env['CDS_LIBS'].has_key('worklib'):
try:
tgen.bld.env['CDS_LIBS']['worklib'] = worklib.path_from(tgen.bld.path)
except TypeError:
tgen.bld.env['CDS_LIBS'] = {}
tgen.bld.env['CDS_LIBS']['worklib'] = worklib.path_from(tgen.bld.path)
@TaskGen.taskgen_method
def get_cellview_path(self,libcellview,create_if_not_exists=False):
# get an instance of the root node (ugly)
up = "../"
for i in range(self.path.height()-1):
up += "../"
rootnode = self.path.find_dir(up)
m0 = re.search('(\w+).(\w+):(\w+)', libcellview)
if m0:
lib = m0.group(1)
cell = m0.group(2)
view = m0.group(3)
if not self.env.CDS_LIBS_FLAT:
Logs.error('Please specify the environment variable CDS_LIBS and make sure to include module cadence_base.')
return
try:
return_path = None
if os.path.isabs(self.env.CDS_LIBS_FLAT[lib]):
return_path = rootnode.find_dir(self.env.CDS_LIBS_FLAT[lib]+'/'+cell+'/'+view+'/')
else:
return_path = self.path.find_dir(self.env.CDS_LIBS_FLAT[lib]+'/'+cell+'/'+view+'/')
if not return_path:
if create_if_not_exists:
Logs.warn('Path for cellview \''+libcellview+'\' not found, creating it.')
if os.path.isabs(self.env.CDS_LIBS_FLAT[lib]):
return_path = self.rootnode.make_node(self.env.CDS_LIBS_FLAT[lib]+'/'+cell+'/'+view+'/')
return_path.mkdir()
else:
return_path = self.path.make_node(self.env.CDS_LIBS_FLAT[lib]+'/'+cell+'/'+view+'/')
return_path.mkdir()
else:
raise Errors.WafError('Path for cellview \''+libcellview+'\' not found in cadence_base.py')
return return_path
except TypeError:
Logs.error('Please specify the environment variable CDS_LIBS and make sure to include module cadence_base.')
def split_cellview(cellview):
if cellview.find('.') == -1 or cellview.find(':') == -1:
raise Errors.WafError('Please specify a cellview of the form Lib:Cell:View with the \'view\' attribute.')
lib = None
cell = None
view = None
(lib,rest) = cellview.split(".")
(cell,view) = rest.split(":")
return (lib,cell,view)
@TaskGen.taskgen_method
def get_cadence_lib_cell_view_from_cellview(self):
lib = None
cell = None
view = None
try:
(lib, cell, view) = split_cellview(self.cellview)
except AttributeError:
raise Errors.WafError('For taskgen "'+self.name+'", you need to specify a parameter "cellview" in the form of lib.cell:view')
return (lib,cell,view)
class cdsWriteCdsLibs(Task.Task):
def run(self):
cdslib = open(self.outputs[0].abspath(),'w')
libdefs = open(self.outputs[1].abspath(),'w')
try:
for key,value in self.generator.bld.env['CDS_LIBS'].iteritems():
if os.path.isabs(value):
cdslib.write('DEFINE '+key+' '+value+"\n")
libdefs.write('DEFINE '+key+' '+value+"\n")
else:
value = self.generator.path.find_dir(value)
cdslib.write('DEFINE '+key+' '+value.abspath()+"\n")
libdefs.write('DEFINE '+key+' '+value.abspath()+"\n")
for value in self.generator.bld.env['CDS_LIB_INCLUDES']:
if os.path.isabs(os.path.expandvars(value)):
cdslib.write('INCLUDE '+value+"\n")
libdefs.write('INCLUDE '+value+"\n")
else:
path = self.generator.path.find_node(os.path.expandvars(value)).abspath()
cdslib.write('INCLUDE '+path+"\n")
libdefs.write('INCLUDE '+path+"\n")
except AttributeError:
Logs.warn('Not writing anything to cds.lib because env[\'CDS_LIBS\'] is not defined.')
cdslib.close()
libdefs.close()
hdlvar = open(self.outputs[2].abspath(),'w')
hdlvar.write('DEFINE WORK '+self.env.CDS_WORKLIB+'\n')
hdlvar.write('DEFINE VIEW_MAP ( \\\n')
for k,v in self.generator.bld.env['CDS_HDL_VIEW_MAP'].iteritems():
hdlvar.write(k+' => '+v+', \\\n')
hdlvar.write(')\n')
hdlvar.close()
return 0
@TaskGen.taskgen_method
def cadence_get_cdslib_base(self):
return self.bld.bldnode
@TaskGen.feature("cds_write_libs")
def write_cds_lib(self):
parse_cds_libs(self)
# write cds.lib file to toplevel directory
cds_lib_path = self.cadence_get_cdslib_base().make_node('cds.lib')
lib_defs_path = self.cadence_get_cdslib_base().make_node('lib.defs')
hdl_var_path = self.cadence_get_cdslib_base().make_node('hdl.var')
# create a copy task
t = self.create_task('cdsWriteCdsLibs', None, [cds_lib_path, lib_defs_path, hdl_var_path])
@Configure.conf
def check_cds_libs(self,*k,**kw):
self.env['CDS_LIBS'] = {}
for key,value in kw.iteritems():
if type(value) == type('str'):
# DEFINE
if os.path.isdir(value):
libpath = self.path.find_dir(value)
self.env['CDS_LIBS'][key] = libpath.path_from(self.path)
else:
self.fatal('Directory '+value+' not found.')
elif type(value) == type([]):
# INCLUDE
# TODO: implement
pass
def fix_verilog_name(name):
name = re.sub(r'^(\d)(.+)', r'\\\1\2 ',name)
return name
def fix_verilog_name_cellview(cellview):
ret = split_cellview(cellview)
return ret[0]+'.'+fix_verilog_name(ret[1])+':'+ret[2]
class cdsWorklibTask(Task.Task):
def run(self):
run_str = """mkdir -p ${TGT[0].parent.abspath()} && echo '<?xml version="1.0"?>
<Library DMSystem="oaDMFileSys">
<oaDMFileSys libReadOnly="No"
origFileSystem="Unix"/>
</Library>' >> ${TGT[0].abspath()}"""
(f, dvars) = Task.compile_fun(run_str, False)
return f(self)
@TaskGen.taskgen_method
def check_create_worklib_task(self,lib):
worklib = self.path.get_bld().find_node(lib+'/.oalib')
if not worklib and not getattr(self,'worklib_task',None):
worklib = self.path.get_bld().make_node(lib+'/.oalib')
worklib_task = self.create_task('cdsWorklibTask',None,worklib.get_bld())
return worklib_task
else:
return None
# vim: noexpandtab
|
{
"content_hash": "3d7dbbff47f095857cca07bed0fc3d9b",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 134,
"avg_line_length": 33.54509803921569,
"alnum_prop": 0.6594575637128829,
"repo_name": "ahartel/brick",
"id": "0f6d83198cd4f338924e7229981790c17132d510",
"size": "8554",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "source/waf/cadence_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Coq",
"bytes": "78"
},
{
"name": "Perl",
"bytes": "6122"
},
{
"name": "Python",
"bytes": "627672"
},
{
"name": "Shell",
"bytes": "1580"
},
{
"name": "SystemVerilog",
"bytes": "1147"
},
{
"name": "Tcl",
"bytes": "2767"
},
{
"name": "VHDL",
"bytes": "1837"
}
],
"symlink_target": ""
}
|
"""Abstractions for the head(s) of a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib import framework as framework_lib
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey as mkey
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import losses as losses_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
class Head(object):
"""Interface for the head/top of a model.
Given logits (or output of a hidden layer), a Head knows how to compute
predictions, loss, default metric and export signature. It is meant to,
1) Simplify writing model_fn and to make model_fn more configurable
2) Support wide range of machine learning models. Since most heads can work
with logits, they can support DNN, RNN, Wide, Wide&Deep,
Global objectives, Gradient boosted trees and many other types
of machine learning models.
2) To allow users to seamlessly switch between 1 to n heads for multi
objective learning (See _MultiHead implementation for more details)
Common usage:
Here is simplified model_fn to build a multiclass DNN model.
```python
def _my_dnn_model_fn(features, labels, mode, params, config=None):
# Optionally your callers can pass head to model_fn as a param.
head = tf.contrib.learn.multi_class_head(...)
input = tf.contrib.layers.input_from_feature_columns(features, ...)
last_hidden_layer_out = tf.contrib.layers.stack(
input, tf.contrib.layers.fully_connected, [1000, 500])
logits = tf.contrib.layers.fully_connected(
last_hidden_layer_out, head.logits_dimension, activation_fn=None)
def _train_op_fn(loss):
return optimizer.minimize(loss)
return head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits,
scope=...)
```
Most heads also support logits_input which is typically the output of the last
hidden layer. Some heads (like heads responsible for candidate sampling or
hierarchical softmax) intrinsically will not support logits and you have
to pass logits_input. Here is a common usage,
```python
return head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits_input=last_hidden_layer_out,
scope=...)
```python
There are cases where computing and applying gradients can not be meaningfully
captured with train_op_fn we support (for example, with sync optimizer). In
such case, you can take the responsibility on your own. Here is a common
use case,
```python
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=tf.contrib.learn.no_op_train_fn,
logits=logits,
scope=...)
if mode == tf.contrib.learn.ModeKeys.TRAIN:
optimizer = ...
sync = tf.train.SyncReplicasOptimizer(opt=optimizer, ...)
update_op = tf.contrib.layers.optimize_loss(optimizer=sync,
loss=model_fn_ops.loss, ...)
hooks = [sync.make_session_run_hook(is_chief)]
... upate train_op and hooks in ModelFnOps and return
```
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def logits_dimension(self):
"""Size of the last dimension of the logits `Tensor`.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
The expected size of the `logits` tensor.
"""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""Returns `ModelFnOps` that a model_fn can return.
Please note that,
+ Exactly one of `logits` and `logits_input` must be provided.
+ All args must be passed via name.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss `Tensor` and returns an op
to optimize the model with the loss. This is used in TRAIN mode and
must not be None. None is allowed in other modes. If you want to
optimize loss yourself you can pass `no_op_train_fn` and then use
ModeFnOps.loss to compute and apply gradients.
logits: logits `Tensor` to be used by the head.
logits_input: `Tensor` from which to build logits, often needed when you
don't want to compute the logits. Typically this is the activation of
the last hidden layer in a DNN. Some heads (like the ones responsible
for candidate sampling) intrinsically avoid computing full logits and
only accepts logits_input.
scope: Optional scope for `variable_scope`.
Returns:
An instance of `ModelFnOps`.
Raises:
ValueError: If `mode` is not recognized.
ValueError: If neither or both of `logits` and `logits_input` is provided.
"""
raise NotImplementedError("Calling an abstract method.")
def regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
Returns:
An instance of `Head` for linear regression.
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_mean_squared_loss,
link_fn=array_ops.identity)
def poisson_regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for poisson regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
Returns:
An instance of `Head` for poisson regression.
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_poisson_loss,
link_fn=math_ops.exp)
# TODO(zakaria): Consider adding a _RegressionHead for logistic_regression
def multi_class_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None,
label_keys=None):
"""Creates a `Head` for multi class single label classification.
The Head uses softmax cross entropy loss.
This head expects to be fed integer labels specifying the class index. But
if `label_keys` is specified, then labels must be strings from this
vocabulary, and the predicted classes will be strings from the same
vocabulary.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`. Invalid if
`n_classes` is 2.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
Returns:
An instance of `Head` for multi class classification.
Raises:
ValueError: if `n_classes` is < 2.
ValueError: If `metric_class_ids` is provided when `n_classes` is 2.
ValueError: If `len(label_keys) != n_classes`.
"""
if (n_classes is None) or (n_classes < 2):
raise ValueError("n_classes must be > 1 for classification: %s." %
n_classes)
if loss_fn:
_verify_loss_fn_args(loss_fn)
loss_fn = _wrap_custom_loss_fn(loss_fn) if loss_fn else None
if n_classes == 2:
if metric_class_ids:
raise ValueError("metric_class_ids invalid for n_classes==2.")
if label_keys:
raise ValueError("label_keys is not supported for n_classes=2.")
return _BinaryLogisticHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
loss_fn=loss_fn)
return _MultiClassHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=loss_fn,
label_keys=label_keys)
def binary_svm_head(
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,):
"""Creates a `Head` for binary classification with SVMs.
The head uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
Returns:
An instance of `Head` for binary classification with SVM.
"""
return _BinarySvmHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
def multi_label_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None):
"""Creates a Head for multi label classification.
Multi-label classification handles the case where each example may have zero
or more associated labels, from a discrete set. This is distinct from
`multi_class_head` which has exactly one label from a discrete set.
This head by default uses sigmoid cross entropy loss, which expects as input
a multi-hot tensor of shape `(batch_size, num_classes)`.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
Returns:
An instance of `Head` for multi label classification.
Raises:
ValueError: If n_classes is < 2
ValueError: If loss_fn does not have expected signature.
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if loss_fn:
_verify_loss_fn_args(loss_fn)
return _MultiLabelHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=_wrap_custom_loss_fn(loss_fn) if loss_fn else None)
def loss_only_head(loss_fn, head_name=None):
"""Creates a Head that contains only loss terms.
Loss only head holds additional loss terms to be added to other heads and
usually represents additional regularization terms in the objective function.
Args:
loss_fn: a function that takes no argument and returns a list of
scalar tensors.
head_name: a name for the head.
Returns:
An instance of `Head` to hold the additional losses.
"""
return _LossOnlyHead(loss_fn, head_name=head_name)
def multi_head(heads, loss_weights=None):
"""Creates a MultiHead stemming from same logits/hidden layer.
Args:
heads: list of Head objects.
loss_weights: optional list of weights to be used to merge losses from
each head. All losses are weighted equally if not provided.
Returns:
A instance of `Head` that merges multiple heads.
Raises:
ValueError: if heads and loss_weights have different size.
"""
if loss_weights:
if len(loss_weights) != len(heads):
raise ValueError("heads and loss_weights must have same size")
def _weighted_loss_merger(losses):
if loss_weights:
if len(losses) != len(loss_weights):
raise ValueError("losses and loss_weights must have same size")
weighted_losses = []
for loss, weight in zip(losses, loss_weights):
weighted_losses.append(math_ops.multiply(loss, weight))
return math_ops.add_n(weighted_losses)
else:
return math_ops.add_n(losses)
return _MultiHead(heads, loss_merger=_weighted_loss_merger)
def no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
class _SingleHead(Head):
"""Interface for a single head/top of a model."""
__metaclass__ = abc.ABCMeta
def __init__(
self, problem_type, logits_dimension, label_name=None,
weight_column_name=None, head_name=None):
if problem_type is None:
raise ValueError("Invalid problem_type %s." % problem_type)
if logits_dimension is None or logits_dimension < 1:
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
self._problem_type = problem_type
self._logits_dimension = logits_dimension
self._label_name = label_name
self._weight_column_name = weight_column_name
self._head_name = head_name
@property
def logits_dimension(self):
return self._logits_dimension
@property
def label_name(self):
return self._label_name
@property
def weight_column_name(self):
return self._weight_column_name
@property
def head_name(self):
return self._head_name
def _create_output_alternatives(self, predictions):
"""Creates output alternative for the Head.
Args:
predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
symbolic name for an output Tensor possibly but not necessarily taken
from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
itself.
Returns:
`dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
'submodel_name' is a submodel identifier that should be consistent across
the pipeline (here likely taken from the head_name),
'problem_type' is a `ProblemType`,
'tensor_name' is a symbolic name for an output Tensor possibly but not
necessarily taken from `PredictionKey`, and
'Tensor' is the corresponding output Tensor itself.
"""
return {self._head_name: (self._problem_type, predictions)}
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(labels, logits, weights=None):
with ops.name_scope(None, "mean_squared_loss", (logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = math_ops.square(logits - math_ops.to_float(labels), name=name)
return _compute_weighted_loss(loss, weights)
def _poisson_loss(labels, logits, weights=None):
"""Computes poisson loss from logits."""
with ops.name_scope(None, "_poisson_loss", (logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = nn.log_poisson_loss(labels, logits, compute_full_loss=True,
name=name)
return _compute_weighted_loss(loss, weights)
def _logits(logits_input, logits, logits_dimension):
"""Validate logits args, and create `logits` if necessary.
Exactly one of `logits_input` and `logits` must be provided.
Args:
logits_input: `Tensor` input to `logits`.
logits: `Tensor` output.
logits_dimension: Integer, last dimension of `logits`. This is used to
create `logits` from `logits_input` if `logits` is `None`; otherwise, it's
used to validate `logits`.
Returns:
`logits` `Tensor`.
Raises:
ValueError: if neither or both of `logits` and `logits_input` are supplied.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
# If not provided, create logits.
if logits is None:
if logits_input is None:
raise ValueError("Neither logits nor logits_input supplied.")
return layers_lib.linear(logits_input, logits_dimension, scope="logits")
if logits_input is not None:
raise ValueError("Both logits and logits_input supplied.")
logits = ops.convert_to_tensor(logits, name="logits")
logits_dims = logits.get_shape().dims
if logits_dims is not None:
logits_dims[-1].assert_is_compatible_with(logits_dimension)
return logits
def _create_model_fn_ops(features,
mode,
loss_fn,
logits_to_predictions_fn,
metrics_fn,
create_output_alternatives_fn,
labels=None,
train_op_fn=None,
logits=None,
logits_dimension=None,
head_name=None,
weight_column_name=None,
enable_centered_bias=False):
"""Returns a `ModelFnOps` object."""
_check_mode_valid(mode)
centered_bias = None
if enable_centered_bias:
centered_bias = _centered_bias(logits_dimension, head_name)
logits = nn.bias_add(logits, centered_bias)
predictions = logits_to_predictions_fn(logits)
loss = None
train_op = None
eval_metric_ops = None
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
weight_tensor = _weight_tensor(features, weight_column_name)
loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)
# Uses the deprecated API to set the tag explicitly.
# Without it, training and eval losses will show up in different graphs.
logging_ops.scalar_summary(
_summary_key(head_name, mkey.LOSS), weighted_average_loss)
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode")
batch_size = array_ops.shape(logits)[0]
train_op = _train_op(loss, labels, train_op_fn, centered_bias,
batch_size, loss_fn, weight_tensor)
eval_metric_ops = metrics_fn(
weighted_average_loss, predictions, labels, weight_tensor)
return model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=create_output_alternatives_fn(predictions))
class _RegressionHead(_SingleHead):
"""`Head` for regression with a generalized linear model."""
def __init__(self,
label_dimension,
loss_fn,
link_fn,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""`Head` for regression.
Args:
label_dimension: Number of regression labels per example. This is the
size of the last dimension of the labels `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
loss_fn: Loss function, takes logits and labels and returns loss.
link_fn: Link function, takes a logits tensor and returns the output.
logits_dimension: Number of logits per example. This is the
size of the last dimension of the logits `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
Default value: `label_dimension`.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. Predictions, summary and metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
"""
super(_RegressionHead, self).__init__(
problem_type=constants.ProblemType.LINEAR_REGRESSION,
logits_dimension=(logits_dimension if logits_dimension is not None
else label_dimension),
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._loss_fn = loss_fn
self._link_fn = link_fn
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "regression_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=self._create_output_alternatives,
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
key = prediction_key.PredictionKey.SCORES
with ops.name_scope(None, "predictions", (logits,)):
if self.logits_dimension == 1:
logits = array_ops.squeeze(logits, squeeze_dims=(1,), name=key)
return {key: self._link_fn(logits)}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
del predictions, labels, weights # Unused by this head.
with ops.name_scope("metrics", values=[eval_loss]):
return {
_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
def _log_loss_with_two_classes(labels, logits, weights=None):
with ops.name_scope(None, "log_loss_with_two_classes",
(logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = math_ops.to_float(labels)
# TODO(ptucker): This will break for dynamic shapes.
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
loss = nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits,
name=name)
return _compute_weighted_loss(loss, weights)
def _one_class_to_two_class_logits(logits):
return array_ops.concat((array_ops.zeros_like(logits), logits), 1)
class _BinaryLogisticHead(_SingleHead):
"""`Head` for binary classification with logistic regression."""
def __init__(self,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
loss_fn=None,
thresholds=None):
"""`Head` for binary classification with logistic regression.
Args:
label_name: String, name of the key in label dict. Can be `None` if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. Predictions, summary, metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
loss_fn: Loss function.
thresholds: thresholds for eval.
Raises:
ValueError: if n_classes is invalid.
"""
super(_BinaryLogisticHead, self).__init__(
problem_type=constants.ProblemType.LOGISTIC_REGRESSION,
logits_dimension=1,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _log_loss_with_two_classes
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "binary_logistic_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=_classification_output_alternatives(
self.head_name, self._problem_type),
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Output` after applying possible centered bias.
Returns:
Dict of prediction `Output` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
two_class_logits = _one_class_to_two_class_logits(logits)
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.LOGISTIC:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.LOGISTIC),
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
two_class_logits,
name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
two_class_logits,
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
logistic = predictions[prediction_key.PredictionKey.LOGISTIC]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.accuracy(labels, classes, weights))
metrics[_summary_key(self.head_name, mkey.PREDICTION_MEAN)] = (
_predictions_streaming_mean(logistic, weights))
metrics[_summary_key(self.head_name, mkey.LABEL_MEAN)] = (
_indicator_labels_streaming_mean(labels, weights))
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
metrics[_summary_key(self.head_name, mkey.ACCURACY_BASELINE)] = (
_indicator_labels_streaming_mean(labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = (
_streaming_auc(logistic, labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC_PR)] = (
_streaming_auc(logistic, labels, weights, curve="PR"))
for threshold in self._thresholds:
metrics[_summary_key(
self.head_name, mkey.ACCURACY_MEAN % threshold)] = (
_streaming_accuracy_at_threshold(logistic, labels, weights,
threshold))
# Precision for positive examples.
metrics[_summary_key(
self.head_name, mkey.PRECISION_MEAN % threshold)] = (
_streaming_precision_at_threshold(logistic, labels, weights,
threshold))
# Recall for positive examples.
metrics[_summary_key(
self.head_name, mkey.RECALL_MEAN % threshold)] = (
_streaming_recall_at_threshold(logistic, labels, weights,
threshold))
return metrics
def _softmax_cross_entropy_loss(labels, logits, weights=None):
with ops.name_scope(
None, "softmax_cross_entropy_loss", (logits, labels,)) as name:
labels = ops.convert_to_tensor(labels)
# Check that we got integer for classification.
if not labels.dtype.is_integer:
raise ValueError("Labels dtype should be integer "
"Instead got %s." % labels.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] labels.
is_squeezed_labels = False
# TODO(ptucker): This will break for dynamic shapes.
if len(labels.get_shape()) == 2:
labels = array_ops.squeeze(labels, squeeze_dims=(1,))
is_squeezed_labels = True
loss = nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
# Restore squeezed dimension, if necessary, so loss matches weights shape.
if is_squeezed_labels:
loss = array_ops.expand_dims(loss, axis=(1,))
return _compute_weighted_loss(loss, weights)
class _MultiClassHead(_SingleHead):
"""'Head' for multi class classification."""
def __init__(self,
n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
loss_fn=None,
thresholds=None,
metric_class_ids=None,
label_keys=None):
"""'Head' for multi class classification.
This head expects to be fed integer labels specifying the class index. But
if `label_keys` is specified, then labels must be strings from this
vocabulary, and the predicted classes will be strings from the same
vocabulary.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`_BinaryLogisticHead`).
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary, metrics
keys will be suffixed by `"/" + head_name` and the default variable
scope will be `head_name`.
loss_fn: Loss function. Defaults to softmax cross entropy loss.
thresholds: thresholds for eval.
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary.
Raises:
ValueError: if `n_classes`, `metric_class_ids` or `label_keys` is invalid.
"""
super(_MultiClassHead, self).__init__(
problem_type=constants.ProblemType.CLASSIFICATION,
logits_dimension=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
if (n_classes is None) or (n_classes <= 2):
raise ValueError("n_classes must be > 2: %s." % n_classes)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _softmax_cross_entropy_loss
self._enable_centered_bias = enable_centered_bias
self._metric_class_ids = tuple([] if metric_class_ids is None else
metric_class_ids)
for class_id in self._metric_class_ids:
if (class_id < 0) or (class_id >= n_classes):
raise ValueError("Class ID %s not in [0, %s)." % (class_id, n_classes))
if label_keys and len(label_keys) != n_classes:
raise ValueError("Length of label_keys must equal n_classes.")
self._label_keys = label_keys
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "multi_class_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._wrapped_loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=_classification_output_alternatives(
self.head_name, self._problem_type, self._label_keys),
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Returns a dict that contains both the original labels and label IDs."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
if self._label_keys:
table = lookup_ops.index_table_from_tensor(
self._label_keys, name="label_id_lookup")
return {
"labels": labels_tensor,
"label_ids": table.lookup(labels_tensor),
}
return {
"labels": labels_tensor,
"label_ids": labels_tensor,
}
def _labels(self, labels_dict):
"""Returns labels `Tensor` of the same type as classes."""
return labels_dict["labels"]
def _label_ids(self, labels_dict):
"""Returns integer label ID `Tensor`."""
return labels_dict["label_ids"]
def _wrapped_loss_fn(self, labels, logits, weights=None):
return self._loss_fn(self._label_ids(labels), logits, weights=weights)
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
class_ids = math_ops.argmax(
logits, 1, name=prediction_key.PredictionKey.CLASSES)
if self._label_keys:
table = lookup_ops.index_to_string_table_from_tensor(
self._label_keys, name="class_string_lookup")
classes = table.lookup(class_ids)
else:
classes = class_ids
return {
prediction_key.PredictionKey.LOGITS: logits,
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES: classes
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope(
"metrics",
values=((eval_loss, self._labels(labels), self._label_ids(labels),
weights) + tuple(six.itervalues(predictions)))):
logits = predictions[prediction_key.PredictionKey.LOGITS]
probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
classes = predictions[prediction_key.PredictionKey.CLASSES]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.accuracy(self._labels(labels), classes, weights))
if not self._label_keys:
# Classes are IDs. Add some metrics.
for class_id in self._metric_class_ids:
metrics[_summary_key(
self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
_class_predictions_streaming_mean(classes, weights, class_id))
# TODO(ptucker): Add per-class accuracy, precision, recall.
metrics[_summary_key(
self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
_class_labels_streaming_mean(
self._label_ids(labels), weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
_predictions_streaming_mean(probabilities, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
_predictions_streaming_mean(logits, weights, class_id))
return metrics
def _to_labels_tensor(labels, label_name):
"""Returns label as a tensor.
Args:
labels: Label `Tensor` or `SparseTensor` or a dict containing labels.
label_name: Label name if labels is a dict.
Returns:
Label `Tensor` or `SparseTensor`.
"""
labels = labels[label_name] if isinstance(labels, dict) else labels
return framework_lib.convert_to_tensor_or_sparse_tensor(labels)
def _check_no_sparse_tensor(x):
"""Raises ValueError if the given tensor is `SparseTensor`."""
if isinstance(x, sparse_tensor.SparseTensor):
raise ValueError("SparseTensor is not supported.")
def _sparse_labels_to_indicator(labels, num_classes):
"""If labels is `SparseTensor`, converts it to indicator `Tensor`.
Args:
labels: Label `Tensor` or `SparseTensor`.
num_classes: Number of classes.
Returns:
Dense label `Tensor`.
Raises:
ValueError: If labels is `SparseTensor` and `num_classes` < 2.
"""
if isinstance(labels, sparse_tensor.SparseTensor):
if num_classes < 2:
raise ValueError("Must set num_classes >= 2 when passing labels as a "
"SparseTensor.")
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(labels, num_classes))
return labels
def _assert_labels_rank(labels):
return control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(labels), 2),
("labels shape should be either [batch_size, 1] or [batch_size]",))
class _BinarySvmHead(_SingleHead):
"""`Head` for binary classification using SVM."""
def __init__(self, label_name, weight_column_name, enable_centered_bias,
head_name, thresholds):
def _loss_fn(labels, logits, weights=None):
with ops.name_scope(None, "hinge_loss", (logits, labels)) as name:
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, shape=(-1, 1))
loss = losses_lib.hinge_loss(labels=labels, logits=logits, scope=name,
reduction=losses_lib.Reduction.NONE)
return _compute_weighted_loss(loss, weights)
super(_BinarySvmHead, self).__init__(
problem_type=constants.ProblemType.LOGISTIC_REGRESSION,
logits_dimension=1,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = _loss_fn
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "binary_svm_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
# TODO(zakaria): Handle labels for export.
create_output_alternatives_fn=self._create_output_alternatives,
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
_one_class_to_two_class_logits(logits),
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""See `_MultiClassHead`."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
classes = predictions[prediction_key.PredictionKey.CLASSES]
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.accuracy(labels, classes, weights))
# TODO(sibyl-vie3Poto): add more metrics relevant for svms.
return metrics
class _MultiLabelHead(_SingleHead):
"""`Head` for multi-label classification."""
# TODO(zakaria): add signature and metric for multilabel.
def __init__(self,
n_classes,
label_name,
weight_column_name,
enable_centered_bias,
head_name,
thresholds,
metric_class_ids=None,
loss_fn=None):
super(_MultiLabelHead, self).__init__(
problem_type=constants.ProblemType.CLASSIFICATION,
logits_dimension=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _sigmoid_cross_entropy_loss
self._enable_centered_bias = enable_centered_bias
self._metric_class_ids = tuple([] if metric_class_ids is None else
metric_class_ids)
for class_id in self._metric_class_ids:
if (class_id < 0) or (class_id >= n_classes):
raise ValueError("Class ID %s not in [0, %s)." % (class_id, n_classes))
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `Head`."""
with variable_scope.variable_scope(
scope,
default_name=self.head_name or "multi_label_head",
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
labels = self._transform_labels(mode=mode, labels=labels)
logits = _logits(logits_input, logits, self.logits_dimension)
return _create_model_fn_ops(
features=features,
mode=mode,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=_classification_output_alternatives(
self.head_name, self._problem_type),
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, mode, labels):
"""Applies transformations to labels tensor."""
if (mode == model_fn.ModeKeys.INFER) or (labels is None):
return None
labels_tensor = _to_labels_tensor(labels, self._label_name)
labels_tensor = _sparse_labels_to_indicator(labels_tensor,
self._logits_dimension)
return labels_tensor
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.PROBABILITIES:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.to_int64(
math_ops.greater(logits, 0),
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
logits = predictions[prediction_key.PredictionKey.LOGITS]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.accuracy(labels, classes, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = _streaming_auc(
probabilities, labels, weights)
metrics[_summary_key(self.head_name, mkey.AUC_PR)] = _streaming_auc(
probabilities, labels, weights, curve="PR")
for class_id in self._metric_class_ids:
# TODO(ptucker): Add per-class accuracy, precision, recall.
metrics[_summary_key(
self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
_predictions_streaming_mean(classes, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
_indicator_labels_streaming_mean(labels, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
_predictions_streaming_mean(probabilities, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
_predictions_streaming_mean(logits, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC % class_id)] = (
_streaming_auc(probabilities, labels, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC_PR % class_id)] = (
_streaming_auc(probabilities, labels, weights, class_id,
curve="PR"))
return metrics
class _LossOnlyHead(Head):
"""`Head` implementation for additional loss terms.
This class only holds loss terms unrelated to any other heads (labels),
e.g. regularization.
Common usage:
This is oftem combine with other heads in a multi head setup.
```python
head = multi_head([
head1, head2, loss_only_head('regularizer', regularizer)])
```
"""
def __init__(self, loss_fn, head_name=None):
self._loss_fn = loss_fn
self.head_name = head_name or "loss_only_head"
@property
def logits_dimension(self):
return 0
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head.create_model_fn_ops`.
Args:
features: Not been used.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss and returns an op to
optimize with the loss.
logits: Not been used.
logits_input: Not been used.
scope: Optional scope for variable_scope. If provided, will be passed to
all heads. Most users will want to set this to `None`, so each head
constructs a separate variable_scope according to its `head_name`.
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if `mode` is not recognition.
"""
_check_mode_valid(mode)
loss = None
train_op = None
if mode != model_fn.ModeKeys.INFER:
with variable_scope.variable_scope(scope, default_name=self.head_name):
loss = self._loss_fn()
if isinstance(loss, list):
loss = math_ops.add_n(loss)
logging_ops.scalar_summary(
_summary_key(self.head_name, mkey.LOSS), loss)
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode")
with ops.name_scope(None, "train_op", (loss,)):
train_op = train_op_fn(loss)
return model_fn.ModelFnOps(
mode=mode,
loss=loss,
train_op=train_op,
predictions={},
eval_metric_ops={})
class _MultiHead(Head):
"""`Head` implementation for multi objective learning.
This class is responsible for using and merging the output of multiple
`Head` objects.
All heads stem from the same logits/logit_input tensor.
Common usage:
For simple use cases you can pass the activation of hidden layer like
this from your model_fn,
```python
last_hidden_layer_activation = ... Build your model.
multi_head = ...
return multi_head.create_model_fn_ops(
..., logits_input=last_hidden_layer_activation, ...)
```
Or you can create a logits tensor of
[batch_size, multi_head.logits_dimension] shape. _MultiHead will split the
logits for you.
return multi_head.create_model_fn_ops(..., logits=logits, ...)
For more complex use cases like a multi-task/multi-tower model or when logits
for each head has to be created separately, you can pass a dict of logits
where the keys match the name of the single heads.
```python
logits = {"head1": logits1, "head2": logits2}
return multi_head.create_model_fn_ops(..., logits=logits, ...)
```
Here is what this class does,
+ For training, merges losses of each heads according a function provided by
user, calls user provided train_op_fn with this final loss.
+ For eval, merges metrics by adding head_name suffix to the keys in eval
metrics.
+ For inference, updates keys in prediction dict to a 2-tuple,
(head_name, prediction_key)
"""
def __init__(self, heads, loss_merger):
"""_Head to merges multiple _Head objects.
Args:
heads: list of _Head objects.
loss_merger: function that takes a list of loss tensors for the heads
and returns the final loss tensor for the multi head.
Raises:
ValueError: if any head does not have a name.
"""
self._logits_dimension = 0
for head in heads:
if not head.head_name:
raise ValueError("Members of MultiHead must have names.")
self._logits_dimension += head.logits_dimension
self._heads = heads
self._loss_merger = loss_merger
@property
def logits_dimension(self):
return self._logits_dimension
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head.create_model_fn_ops`.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss and returns an op to
optimize with the loss.
logits: Concatenated logits for all heads or a dict of head name to logits
tensor. If concatenated logits, it should have (batchsize, x) shape
where x is the sum of `logits_dimension` of all the heads,
i.e., same as `logits_dimension` of this class. create_model_fn_ops
will split the logits tensor and pass logits of proper size to each
head. This is useful if we want to be agnostic about whether you
creating a single versus multihead. logits can also be a dict for
convenience where you are creating the head specific logits explicitly
and don't want to concatenate them yourself.
logits_input: tensor to build logits from.
scope: Optional scope for variable_scope. If provided, will be passed to
all heads. Most users will want to set this to `None`, so each head
constructs a separate variable_scope according to its `head_name`.
Returns:
`ModelFnOps`.
Raises:
ValueError: if `mode` is not recognized, or neither or both of `logits`
and `logits_input` is provided.
"""
_check_mode_valid(mode)
all_model_fn_ops = []
if logits is None:
# Use logits_input.
for head in self._heads:
all_model_fn_ops.append(
head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=no_op_train_fn,
logits_input=logits_input,
scope=scope))
else:
head_logits_pairs = []
if isinstance(logits, dict):
head_logits_pairs = []
for head in self._heads:
if isinstance(head, _LossOnlyHead):
head_logits_pairs.append((head, None))
else:
head_logits_pairs.append((head, logits[head.head_name]))
else:
# Split logits for each head.
head_logits_pairs = zip(self._heads, self._split_logits(logits))
for head, head_logits in head_logits_pairs:
all_model_fn_ops.append(
head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=no_op_train_fn,
logits=head_logits,
scope=scope))
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode.")
return self._merge_train(all_model_fn_ops, train_op_fn)
if mode == model_fn.ModeKeys.INFER:
return self._merge_infer(all_model_fn_ops)
if mode == model_fn.ModeKeys.EVAL:
return self._merge_eval(all_model_fn_ops)
raise ValueError("mode=%s unrecognized" % str(mode))
def _split_logits(self, logits):
"""Splits logits for heads.
Args:
logits: the logits tensor.
Returns:
A list of logits for the individual heads.
"""
all_logits = []
begin = 0
for head in self._heads:
current_logits_size = head.logits_dimension
current_logits = array_ops.slice(logits, [0, begin],
[-1, current_logits_size])
all_logits.append(current_logits)
begin += current_logits_size
return all_logits
def _merge_train(self, all_model_fn_ops, train_op_fn):
"""Merges list of ModelFnOps for training.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
train_op_fn: Function to create train op. See `create_model_fn_ops`
documentation for more details.
Returns:
ModelFnOps that merges all heads for TRAIN.
"""
losses = []
metrics = {}
additional_train_ops = []
for m in all_model_fn_ops:
losses.append(m.loss)
if m.eval_metric_ops is not None:
for k, v in six.iteritems(m.eval_metric_ops):
# metrics["%s/%s" % (k, head_name)] = v
metrics[k] = v
additional_train_ops.append(m.train_op)
loss = self._loss_merger(losses)
train_op = train_op_fn(loss)
train_op = control_flow_ops.group(train_op, *additional_train_ops)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.TRAIN,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def _merge_infer(self, all_model_fn_ops):
"""Merges list of ModelFnOps for inference.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that Merges all the heads for INFER.
"""
predictions = {}
output_alternatives = {}
for head, m in zip(self._heads, all_model_fn_ops):
if isinstance(head, _LossOnlyHead):
continue
head_name = head.head_name
output_alternatives[head_name] = m.output_alternatives[head_name]
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.INFER,
predictions=predictions,
output_alternatives=output_alternatives)
def _merge_eval(self, all_model_fn_ops):
"""Merges list of ModelFnOps for eval.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that merges all the heads for EVAL.
"""
predictions = {}
metrics = {}
losses = []
for head, m in zip(self._heads, all_model_fn_ops):
losses.append(m.loss)
head_name = head.head_name
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
for k, v in m.eval_metric_ops.items():
# metrics["%s/%s" % (k, head_name)] = v
metrics[k] = v
loss = self._loss_merger(losses)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=loss,
eval_metric_ops=metrics)
def _weight_tensor(features, weight_column_name):
"""Returns weights as `Tensor` of rank 0, or at least 2."""
if not weight_column_name:
return None
if weight_column_name not in features:
raise ValueError("Weights {} missing from features.".format(
weight_column_name))
with ops.name_scope(None, "weight_tensor", tuple(six.itervalues(features))):
weight_tensor = math_ops.to_float(features[weight_column_name])
shape = weight_tensor.get_shape()
rank = shape.ndims
# We don't bother with expanding dims of non-staticly shaped tensors or
# scalars, and >1d is already in a good format.
if rank == 1:
logging.warning("Weights {} has shape {}, expanding to make it 2d.".
format(weight_column_name, shape))
return (
sparse_ops.sparse_reshape(weight_tensor, (-1, 1))
if isinstance(weight_tensor, sparse_tensor.SparseTensor) else
array_ops.reshape(weight_tensor, (-1, 1)))
return weight_tensor
# TODO(zakaria): This function is needed for backward compatibility and should
# be removed when we migrate to core.
def _compute_weighted_loss(loss_unweighted, weight, name="loss"):
"""Returns a tuple of (loss_train, loss_report).
loss is used for gradient descent while weighted_average_loss is used for
summaries to be backward compatible.
loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
loss_unweighted: Unweighted loss
weight: Weight tensor
name: Optional name
Returns:
A tuple of losses. First one for training and the second one for reporting.
"""
with ops.name_scope(name, values=(loss_unweighted, weight)) as name_scope:
if weight is None:
loss = math_ops.reduce_mean(loss_unweighted, name=name_scope)
return loss, loss
weight = weights_broadcast_ops.broadcast_weights(weight, loss_unweighted)
with ops.name_scope(None, "weighted_loss",
(loss_unweighted, weight)) as name:
weighted_loss = math_ops.multiply(loss_unweighted, weight, name=name)
weighted_loss_mean = math_ops.reduce_mean(weighted_loss, name=name_scope)
weighted_loss_normalized = math_ops.div(
math_ops.reduce_sum(weighted_loss),
math_ops.to_float(math_ops.reduce_sum(weight)),
name="weighted_average_loss")
return weighted_loss_mean, weighted_loss_normalized
def _wrap_custom_loss_fn(loss_fn):
def _wrapper(labels, logits, weights=None):
if weights is None:
loss = loss_fn(labels, logits)
else:
loss = loss_fn(labels, logits, weights)
return loss, loss
return _wrapper
def _check_mode_valid(mode):
"""Raises ValueError if the given mode is invalid."""
if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and
mode != model_fn.ModeKeys.EVAL):
raise ValueError("mode=%s unrecognized." % str(mode))
def _get_arguments(func):
"""Returns a spec of given func."""
_, func = tf_decorator.unwrap(func)
if hasattr(func, "__code__"):
# Regular function.
return tf_inspect.getargspec(func)
elif hasattr(func, "__call__"):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, "func"):
# Partial function.
return _get_arguments(func.func)
def _verify_loss_fn_args(loss_fn):
args = _get_arguments(loss_fn).args
for arg_name in ["labels", "logits", "weights"]:
if arg_name not in args:
raise ValueError("Argument %s not found in loss_fn." % arg_name)
def _centered_bias(logits_dimension, head_name=None):
"""Returns centered_bias `Variable`.
Args:
logits_dimension: Last dimension of `logits`. Must be >= 1.
head_name: Optional name of the head.
Returns:
`Variable` with shape `[logits_dimension]`.
Raises:
ValueError: if `logits_dimension` is invalid.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
# Do not create a variable with variable_scope.get_variable, because that may
# create a PartitionedVariable, which does not support indexing, so
# summary.scalar will not work.
centered_bias = variable_scope.variable(
name="centered_bias_weight",
initial_value=array_ops.zeros(shape=(logits_dimension,)),
trainable=True)
for dim in range(logits_dimension):
if head_name:
summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name),
centered_bias[dim])
else:
summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim])
return centered_bias
def _centered_bias_step(centered_bias, batch_size, labels, loss_fn, weights):
"""Creates and returns training op for centered bias."""
with ops.name_scope(None, "centered_bias_step", (labels,)) as name:
logits_dimension = array_ops.shape(centered_bias)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias, (batch_size,)),
(batch_size, logits_dimension))
with ops.name_scope(None, "centered_bias", (labels, logits)):
centered_bias_loss = math_ops.reduce_mean(
loss_fn(labels, logits, weights), name="training_loss")
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
centered_bias_loss, var_list=(centered_bias,), name=name)
def _summary_key(head_name, val):
return "%s/%s" % (val, head_name) if head_name else val
def _train_op(loss, labels, train_op_fn, centered_bias, batch_size, loss_fn,
weights):
"""Returns op for the training step."""
if centered_bias is not None:
centered_bias_step = _centered_bias_step(
centered_bias=centered_bias,
batch_size=batch_size,
labels=labels,
loss_fn=loss_fn,
weights=weights)
else:
centered_bias_step = None
with ops.name_scope(None, "train_op", (loss, labels)):
train_op = train_op_fn(loss)
if centered_bias_step is not None:
train_op = control_flow_ops.group(train_op, centered_bias_step)
return train_op
def _sigmoid_cross_entropy_loss(labels, logits, weights=None):
with ops.name_scope(None, "sigmoid_cross_entropy_loss",
(logits, labels)) as name:
# sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels.
loss = nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(labels), logits=logits, name=name)
return _compute_weighted_loss(loss, weights)
def _float_weights_or_none(weights):
if weights is None:
return None
with ops.name_scope(None, "float_weights", (weights,)) as name:
return math_ops.to_float(weights, name=name)
def _indicator_labels_streaming_mean(labels, weights=None, class_id=None):
labels = math_ops.to_float(labels)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
labels = labels[:, class_id]
return metrics_lib.mean(labels, weights)
def _predictions_streaming_mean(predictions,
weights=None,
class_id=None):
predictions = math_ops.to_float(predictions)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
predictions = predictions[:, class_id]
return metrics_lib.mean(predictions, weights)
# TODO(ptucker): Add support for SparseTensor labels.
def _class_id_labels_to_indicator(labels, num_classes):
if (num_classes is None) or (num_classes < 2):
raise ValueError("Invalid num_classes %s." % num_classes)
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, (-1,))
return array_ops.one_hot(labels, depth=num_classes, axis=-1)
def _class_predictions_streaming_mean(predictions, weights, class_id):
return metrics_lib.mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(predictions)),
array_ops.ones_like(predictions),
array_ops.zeros_like(predictions)),
weights=weights)
def _class_labels_streaming_mean(labels, weights, class_id):
return metrics_lib.mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(labels)),
array_ops.ones_like(labels), array_ops.zeros_like(labels)),
weights=weights)
def _streaming_auc(predictions, labels, weights=None, class_id=None,
curve="ROC"):
# pylint: disable=missing-docstring
predictions = math_ops.to_float(predictions)
if labels.dtype.base_dtype != dtypes.bool:
logging.warning("Casting %s labels to bool.", labels.dtype)
labels = math_ops.cast(labels, dtypes.bool)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
if class_id is not None:
if weights is not None:
weights = weights[:, class_id]
predictions = predictions[:, class_id]
labels = labels[:, class_id]
return metrics_lib.auc(labels, predictions, weights, curve=curve)
def _assert_class_id(class_id, num_classes=None):
"""Average label value for class `class_id`."""
if (class_id is None) or (class_id < 0):
raise ValueError("Invalid class_id %s." % class_id)
if num_classes is not None:
if num_classes < 2:
raise ValueError("Invalid num_classes %s." % num_classes)
if class_id >= num_classes:
raise ValueError("Invalid class_id %s." % class_id)
def _streaming_accuracy_at_threshold(predictions, labels, weights, threshold):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.accuracy(labels, threshold_predictions, weights)
def _streaming_precision_at_threshold(predictions, labels, weights, threshold):
precision_tensor, update_op = metrics_lib.precision_at_thresholds(
labels, predictions, (threshold,), _float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _streaming_recall_at_threshold(predictions, labels, weights, threshold):
precision_tensor, update_op = metrics_lib.recall_at_thresholds(
labels, predictions, (threshold,), _float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _classification_output_alternatives(head_name, problem_type,
label_keys=None):
"""Creates a func to generate output alternatives for classification.
Servo expects classes to be a string tensor, and have the same dimensions
as the probabilities tensor. It should contain the labels of the corresponding
entries in probabilities. This function creates a new classes tensor that
satisfies these conditions and can be exported.
Args:
head_name: Name of the head.
problem_type: `ProblemType`
label_keys: Optional label keys
Returns:
A function to generate output alternatives.
"""
def _create_output_alternatives(predictions):
"""Creates output alternative for the Head.
Args:
predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
symbolic name for an output Tensor possibly but not necessarily taken
from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
itself.
Returns:
`dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
'submodel_name' is a submodel identifier that should be consistent across
the pipeline (here likely taken from the head_name),
'problem_type' is a `ProblemType`,
'tensor_name' is a symbolic name for an output Tensor possibly but not
necessarily taken from `PredictionKey`, and
'Tensor' is the corresponding output Tensor itself.
Raises:
ValueError: if predictions does not have PredictionKey.PROBABILITIES key.
"""
probabilities = predictions.get(prediction_key.PredictionKey.PROBABILITIES)
if probabilities is None:
raise ValueError("%s missing in predictions" %
prediction_key.PredictionKey.PROBABILITIES)
with ops.name_scope(None, "_classification_output_alternatives",
(probabilities,)):
batch_size = array_ops.shape(probabilities)[0]
if label_keys:
classes = array_ops.tile(
input=array_ops.expand_dims(input=label_keys, axis=0),
multiples=[batch_size, 1],
name="classes_tensor")
else:
n = array_ops.shape(probabilities)[1]
classes = array_ops.tile(
input=array_ops.expand_dims(input=math_ops.range(n), axis=0),
multiples=[batch_size, 1])
classes = string_ops.as_string(classes, name="classes_tensor")
exported_predictions = {
prediction_key.PredictionKey.PROBABILITIES: probabilities,
prediction_key.PredictionKey.CLASSES: classes}
return {head_name: (problem_type, exported_predictions)}
return _create_output_alternatives
# Aliases
# TODO(zakaria): Remove these aliases, See b/34751732
_regression_head = regression_head
_poisson_regression_head = poisson_regression_head
_multi_class_head = multi_class_head
_binary_svm_head = binary_svm_head
_multi_label_head = multi_label_head
_multi_head = multi_head
_Head = Head
|
{
"content_hash": "2a4a6554776fee62720a9004fcb663ce",
"timestamp": "",
"source": "github",
"line_count": 2101,
"max_line_length": 89,
"avg_line_length": 39.00428367444074,
"alnum_prop": 0.6428710889832576,
"repo_name": "Mazecreator/tensorflow",
"id": "a67694d1c93c9d01bf63fc216b83d87ab390c456",
"size": "82637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/head.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "175403"
},
{
"name": "C++",
"bytes": "21737608"
},
{
"name": "CMake",
"bytes": "130644"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "786880"
},
{
"name": "HTML",
"bytes": "558790"
},
{
"name": "Java",
"bytes": "279355"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36991"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64656"
},
{
"name": "Protocol Buffer",
"bytes": "199996"
},
{
"name": "Python",
"bytes": "17935555"
},
{
"name": "Shell",
"bytes": "320192"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
}
|
"""Trying to make a report that corresponds to
https://github.com/OpenTreeOfLife/germinator/wiki/Overview-of-repository-statistics
"""
import time
start_clock = time.time()
from peyotl.phylesystem.phylesystem_umbrella import Phylesystem
from peyotl.nexson_syntax import get_nexml_el
from peyotl import gen_otu_dict, iter_node
from peyotl.manip import iter_trees
import codecs
import json
import sys
out = codecs.getwriter('utf-8')(sys.stdout)
phy = Phylesystem()
# Start all of the properties for the report at 0
report_properties = ['reported_study_count',
'study_count',
'OTU_count',
'unmapped_OTU_count',
'unique_OTU_count',
'nominated_study_count',
'nominated_study_OTU_count',
'nominated_study_unique_OTU_count',
'nominated_study_unmapped_OTU_count',
'run_time']
reported_study_count = 0
study_count = 0
OTU_count = 0
unmapped_OTU_count = 0
unique_OTU_count = 0
nominated_study_count = 0
nominated_study_OTU_count = 0
nominated_study_unique_OTU_count = 0
nominated_study_unmapped_OTU_count = 0
run_time = 0
ott_id_set = set()
nominated_ott_id_set = set()
for study_id, n in phy.iter_study_objs():
reported_study_count += 1
otu_dict = gen_otu_dict(n)
if not bool(otu_dict):
continue
nex_obj = get_nexml_el(n)
study_count += 1
not_intended_for_synth = nex_obj.get('^ot:notIntendedForSynthesis')
intended_for_synth = (not_intended_for_synth is None) or (not_intended_for_synth is False)
if intended_for_synth:
nominated_study_count += 1
nominated_study_OTU_count += len(otu_dict)
OTU_count += len(otu_dict)
for oid, o in otu_dict.items():
ott_id = o.get('^ot:ottId')
if ott_id is None:
unmapped_OTU_count += 1
if intended_for_synth:
nominated_study_unmapped_OTU_count += 1
else:
ott_id_set.add(ott_id)
if intended_for_synth:
nominated_ott_id_set.add(ott_id)
unique_OTU_count = len(ott_id_set)
nominated_study_unique_OTU_count = len(nominated_ott_id_set)
end_clock = time.time()
run_time = end_clock - start_clock
#################################################
# write variables in local scope in a JSON blob
report = {}
for prop in report_properties:
report[prop] = locals()[prop]
json.dump(report, out, sort_keys=True, indent=2, separators=(',', ': '))
out.write('\n')
|
{
"content_hash": "3bfc14358e4c16d0a3d4abb52293f04b",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 94,
"avg_line_length": 33.064935064935064,
"alnum_prop": 0.6182246661429693,
"repo_name": "OpenTreeOfLife/peyotl",
"id": "0fd8436424d6ce7f141fc38e84a759947ba49e8f",
"size": "2568",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/phylesystem/ott_status_report.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1834266"
},
{
"name": "Python",
"bytes": "1010278"
},
{
"name": "Shell",
"bytes": "28989"
},
{
"name": "XSLT",
"bytes": "573"
}
],
"symlink_target": ""
}
|
from .workflow import * # noqa
from .share import * # noqa
from .feeds import * # noqa
|
{
"content_hash": "13b3dcc153381573a9d1465e175e56b7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 31,
"avg_line_length": 30,
"alnum_prop": 0.6666666666666666,
"repo_name": "aaxelb/SHARE",
"id": "f9313083fb246582600bbfe4f6dae5b70ac49630",
"size": "90",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api/views/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3321"
},
{
"name": "Dockerfile",
"bytes": "1143"
},
{
"name": "Gherkin",
"bytes": "4346"
},
{
"name": "HTML",
"bytes": "4834"
},
{
"name": "Python",
"bytes": "1450482"
},
{
"name": "Shell",
"bytes": "408"
}
],
"symlink_target": ""
}
|
from recipe_engine import recipe_api
from . import android
from . import chromebook
from . import chromecast
from . import default
from . import ios
from . import valgrind
from . import win_ssh
"""Abstractions for running code on various platforms.
The methods in this module define how certain high-level functions should work.
Each flavor should correspond to a subclass of DefaultFlavor which may override
any of these functions as appropriate for that flavor.
For example, the AndroidFlavor will override the functions for copying files
between the host and Android device, as well as the 'step' function, so that
commands may be run through ADB.
"""
VERSION_FILE_LOTTIE = 'LOTTIE_VERSION'
VERSION_FILE_SK_IMAGE = 'SK_IMAGE_VERSION'
VERSION_FILE_SKP = 'SKP_VERSION'
VERSION_FILE_SVG = 'SVG_VERSION'
VERSION_NONE = -1
def is_android(vars_api):
return ('Android' in vars_api.extra_tokens or
'Android' in vars_api.builder_cfg.get('os', ''))
def is_chromecast(vars_api):
return ('Chromecast' in vars_api.extra_tokens or
'Chromecast' in vars_api.builder_cfg.get('os', ''))
def is_chromebook(vars_api):
return ('Chromebook' in vars_api.extra_tokens or
'ChromeOS' in vars_api.builder_cfg.get('os', ''))
def is_ios(vars_api):
return ('iOS' in vars_api.extra_tokens or
'iOS' == vars_api.builder_cfg.get('os', ''))
def is_test_skqp(vars_api):
return ('SKQP' in vars_api.extra_tokens and
vars_api.builder_name.startswith('Test'))
def is_valgrind(vars_api):
return 'Valgrind' in vars_api.extra_tokens
def is_win_ssh(vars_api):
return 'LenovoYogaC630' in vars_api.builder_cfg.get('model', '')
class SkiaFlavorApi(recipe_api.RecipeApi):
def get_flavor(self, vars_api):
"""Return a flavor utils object specific to the given builder."""
if is_chromecast(vars_api):
return chromecast.ChromecastFlavor(self)
if is_chromebook(vars_api):
return chromebook.ChromebookFlavor(self)
if is_android(vars_api) and not is_test_skqp(vars_api):
return android.AndroidFlavor(self)
elif is_ios(vars_api):
return ios.iOSFlavor(self)
elif is_valgrind(vars_api):
return valgrind.ValgrindFlavor(self)
elif is_win_ssh(vars_api):
return win_ssh.WinSSHFlavor(self)
else:
return default.DefaultFlavor(self)
def setup(self):
self._f = self.get_flavor(self.m.vars)
self.device_dirs = self._f.device_dirs
self.host_dirs = self._f.host_dirs
self._skia_dir = self.m.path['start_dir'].join('skia')
def step(self, name, cmd, **kwargs):
return self._f.step(name, cmd, **kwargs)
def device_path_join(self, *args):
return self._f.device_path_join(*args)
def copy_directory_contents_to_device(self, host_dir, device_dir):
return self._f.copy_directory_contents_to_device(host_dir, device_dir)
def copy_directory_contents_to_host(self, device_dir, host_dir):
return self._f.copy_directory_contents_to_host(device_dir, host_dir)
def copy_file_to_device(self, host_path, device_path):
return self._f.copy_file_to_device(host_path, device_path)
def create_clean_host_dir(self, path):
return self._f.create_clean_host_dir(path)
def create_clean_device_dir(self, path):
return self._f.create_clean_device_dir(path)
def read_file_on_device(self, path, **kwargs):
return self._f.read_file_on_device(path, **kwargs)
def remove_file_on_device(self, path):
return self._f.remove_file_on_device(path)
def install(self, skps=False, images=False, lotties=False, svgs=False,
resources=False):
self._f.install()
# TODO(borenet): Only copy files which have changed.
if resources:
self.copy_directory_contents_to_device(
self.m.path['start_dir'].join('skia', 'resources'),
self.device_dirs.resource_dir)
if skps:
self._copy_skps()
if images:
self._copy_images()
if lotties:
self._copy_lotties()
if svgs:
self._copy_svgs()
def cleanup_steps(self):
return self._f.cleanup_steps()
def _copy_dir(self, host_version, version_file, tmp_dir,
host_path, device_path):
actual_version_file = self.m.path.join(tmp_dir, version_file)
# Copy to device.
device_version_file = self.device_path_join(
self.device_dirs.tmp_dir, version_file)
if str(actual_version_file) != str(device_version_file):
device_version = self.read_file_on_device(device_version_file,
abort_on_failure=False,
fail_build_on_failure=False)
if not device_version:
device_version = VERSION_NONE
if device_version != host_version:
self.remove_file_on_device(device_version_file)
self.create_clean_device_dir(device_path)
self.copy_directory_contents_to_device(
host_path, device_path)
# Copy the new version file.
self.copy_file_to_device(actual_version_file, device_version_file)
def _copy_images(self):
"""Copy test images if needed."""
version = self.m.run.asset_version('skimage', self._skia_dir)
self.m.run.writefile(
self.m.path.join(self.m.vars.tmp_dir, VERSION_FILE_SK_IMAGE),
version)
self._copy_dir(
version,
VERSION_FILE_SK_IMAGE,
self.m.vars.tmp_dir,
self.host_dirs.images_dir,
self.device_dirs.images_dir)
return version
def _copy_lotties(self):
"""Copy test lotties if needed."""
version = self.m.run.asset_version('lottie-samples', self._skia_dir)
self.m.run.writefile(
self.m.path.join(self.m.vars.tmp_dir, VERSION_FILE_LOTTIE),
version)
self._copy_dir(
version,
VERSION_FILE_LOTTIE,
self.m.vars.tmp_dir,
self.host_dirs.lotties_dir,
self.device_dirs.lotties_dir)
return version
def _copy_skps(self):
"""Copy the SKPs if needed."""
version = self.m.run.asset_version('skp', self._skia_dir)
self.m.run.writefile(
self.m.path.join(self.m.vars.tmp_dir, VERSION_FILE_SKP),
version)
self._copy_dir(
version,
VERSION_FILE_SKP,
self.m.vars.tmp_dir,
self.host_dirs.skp_dir,
self.device_dirs.skp_dir)
return version
def _copy_svgs(self):
"""Copy the SVGs if needed."""
version = self.m.run.asset_version('svg', self._skia_dir)
self.m.run.writefile(
self.m.path.join(self.m.vars.tmp_dir, VERSION_FILE_SVG),
version)
self._copy_dir(
version,
VERSION_FILE_SVG,
self.m.vars.tmp_dir,
self.host_dirs.svg_dir,
self.device_dirs.svg_dir)
return version
|
{
"content_hash": "32934bbd93ba7dd1a68af777cf837e98",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 79,
"avg_line_length": 32.60679611650485,
"alnum_prop": 0.6543099598034837,
"repo_name": "rubenvb/skia",
"id": "4ba6ff7c8568effcbe4fcdc772e1af01c48c51ca",
"size": "6907",
"binary": false,
"copies": "1",
"ref": "refs/heads/m75",
"path": "infra/bots/recipe_modules/flavor/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1277297"
},
{
"name": "Batchfile",
"bytes": "23441"
},
{
"name": "C",
"bytes": "6644747"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "29396937"
},
{
"name": "CMake",
"bytes": "47131"
},
{
"name": "CSS",
"bytes": "3078"
},
{
"name": "Cuda",
"bytes": "260404"
},
{
"name": "Dockerfile",
"bytes": "13955"
},
{
"name": "GLSL",
"bytes": "101705"
},
{
"name": "Go",
"bytes": "141452"
},
{
"name": "HTML",
"bytes": "1304215"
},
{
"name": "Java",
"bytes": "167581"
},
{
"name": "JavaScript",
"bytes": "359396"
},
{
"name": "Lex",
"bytes": "2521"
},
{
"name": "Lua",
"bytes": "70982"
},
{
"name": "Makefile",
"bytes": "17821"
},
{
"name": "Objective-C",
"bytes": "64844"
},
{
"name": "Objective-C++",
"bytes": "237053"
},
{
"name": "PHP",
"bytes": "128343"
},
{
"name": "Python",
"bytes": "965353"
},
{
"name": "Shell",
"bytes": "94903"
}
],
"symlink_target": ""
}
|
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
import webob
from werkzeug import wrappers
from blazar.api import context as api_context
from blazar import exceptions
from blazar import tests
class ContextTestCase(tests.TestCase):
def setUp(self):
super(ContextTestCase, self).setUp()
self.fake_headers = {'X-User-Id': uuidsentinel.user_id,
'X-Project-Id': uuidsentinel.project_id,
'X-Auth-Token': '111-111-111',
'X-User-Name': 'user_name',
'X-User-Domain-Name': 'user_domain_name',
'X-Project-Name': 'project_name',
'X-Project-Domain-Name': 'project_domain_name',
'X-Roles': 'user_name0, user_name1'}
self.catalog = jsonutils.dump_as_bytes({'nova': 'catalog'})
def test_ctx_from_headers_no_catalog(self):
self.assertRaises(
exceptions.ServiceCatalogNotFound,
api_context.ctx_from_headers,
self.fake_headers)
def test_ctx_from_headers_wrong_format(self):
catalog = ['etc']
self.fake_headers['X-Service-Catalog'] = catalog
self.assertRaises(
exceptions.WrongFormat,
api_context.ctx_from_headers,
self.fake_headers)
class ContextTestCaseV1(ContextTestCase):
def test_ctx_from_headers(self):
self.fake_headers['X-Service-Catalog'] = self.catalog
environ_base = {
'openstack.request_id': 'req-' + uuidsentinel.reqid,
'openstack.global_request_id': 'req-' + uuidsentinel.globalreqid}
req = wrappers.Request.from_values(
'/v1/leases',
headers=self.fake_headers,
environ_base=environ_base)
context = api_context.ctx_from_headers(req.headers)
expected = dict(
user_id=uuidsentinel.user_id,
roles=['user_name0',
'user_name1'],
project_name='project_name',
project_domain_name='project_domain_name',
auth_token='111-111-111',
service_catalog={'nova': 'catalog'},
project_id=uuidsentinel.project_id,
user_name='user_name',
user_domain_name='user_domain_name',
request_id='req-' + uuidsentinel.reqid,
global_request_id='req-' + uuidsentinel.globalreqid
)
for k, v in expected.items():
self.assertEqual(getattr(context, k, None), v)
class ContextTestCaseV2(ContextTestCase):
def test_ctx_from_headers(self):
self.fake_headers['X-Service-Catalog'] = self.catalog
req = webob.Request.blank('/v2/leases')
req.headers = self.fake_headers
context = api_context.ctx_from_headers(req.headers)
expected = dict(
user_id=uuidsentinel.user_id,
roles=['user_name0',
'user_name1'],
project_name='project_name',
project_domain_name='project_domain_name',
auth_token='111-111-111',
service_catalog={'nova': 'catalog'},
project_id=uuidsentinel.project_id,
user_name='user_name',
user_domain_name='user_domain_name'
)
for k, v in expected.items():
self.assertEqual(getattr(context, k, None), v)
|
{
"content_hash": "8d7f0bf22b8f884bf750086394e43973",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 77,
"avg_line_length": 37.01075268817204,
"alnum_prop": 0.5735037768739105,
"repo_name": "ChameleonCloud/blazar",
"id": "4206c6708bec62fc850833e13a6c957e417a535c",
"size": "4025",
"binary": false,
"copies": "2",
"ref": "refs/heads/chameleoncloud/xena",
"path": "blazar/tests/api/test_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1014"
},
{
"name": "Python",
"bytes": "1413248"
},
{
"name": "Shell",
"bytes": "11090"
}
],
"symlink_target": ""
}
|
import base64
import getopt
import httplib
import json
import re
import os
import os.path
import sys
import StringIO
import urlparse
import xml.dom.minidom
import zipfile
from ApigeePlatformTools import httptools, deploytools
def printUsage():
print 'Usage: deploynodeapp -n [name] -o [organization] -e [environment]'
print ' -d [directory name] -m [main script file]'
print ' -u [username] -p [password]'
print ' -b [base path] -l [apigee API url] -z [zip file] -i -h'
print ''
print '-o Apigee organization name'
print '-e Apigee environment name'
print '-n Apigee proxy name'
print '-d Apigee proxy directory'
print '-m Main script name: Should be at the top level of the directory'
print '-u Apigee user name'
print '-p Apigee password'
print '-b Base path (optional, defaults to /)'
print '-l Apigee API URL (optional, defaults to https://api.enterprise.apigee.com)'
print '-z ZIP file to save (optional for debugging)'
print '-i import only, do not deploy'
print '-x Virtual Host name (optional, defaults to "default")'
print '-h Print this message'
print ''
print 'Typically, the "default" virtual host listens on HTTP.'
print 'For an HTTPS-only app, use "-x secure".'
def run():
ApigeeURL = 'https://api.enterprise.apigee.com'
Username = None
Password = None
Directory = None
MainScript = None
Organization = None
Environment = None
Name = None
BasePath = '/'
ShouldDeploy = True
ZipFile = None
VirtualHost = 'default'
Options = 'o:e:x:n:d:m:u:p:b:l:z:ih'
opts = getopt.getopt(sys.argv[2:], Options)[0]
for o in opts:
if o[0] == '-o':
Organization = o[1]
elif o[0] == '-e':
Environment =o[1]
elif o[0] == '-n':
Name = o[1]
elif o[0] == '-d':
Directory =o[1]
elif o[0] == '-m':
MainScript = o[1]
elif o[0] == '-u':
Username = o[1]
elif o[0] == '-p':
Password = o[1]
elif o[0] == '-b':
BasePath = o[1]
elif o[0] == '-l':
ApigeeURL = o[1]
elif o[0] == '-z':
ZipFile = o[1]
elif o[0] == '-x':
VirtualHost = o[1]
elif o[0] == '-i':
ShouldDeploy = False
elif o[0] == '-h':
printUsage()
sys.exit(0)
BadUsage = False
if Username == None:
BadUsage = True
print '-u is required'
if Password == None:
BadUsage = True
print '-p is required'
if Directory == None:
BadUsage = True
print '-d is required'
if Environment == None:
BadUsage = True
print '-e is required'
if Name == None:
BadUsage = True
print '-n is required'
if Organization == None:
BadUsage = True
print '-o is required'
if MainScript == None:
BadUsage = True
print '-m is required'
if BadUsage:
printUsage()
sys.exit(1)
httptools.setup(ApigeeURL, Username, Password)
def makeApplication():
return '<APIProxy name="%s"/>' % Name
def makeProxy():
return '<ProxyEndpoint name="default">\
<HTTPProxyConnection>\
<BasePath>%s</BasePath>\
<VirtualHost>%s</VirtualHost>\
</HTTPProxyConnection>\
<RouteRule name="default">\
<TargetEndpoint>default</TargetEndpoint>\
</RouteRule>\
</ProxyEndpoint>' % (BasePath, VirtualHost)
def makeTarget():
return '<TargetEndpoint name="default">\
<ScriptTarget>\
<ResourceURL>node://%s</ResourceURL>\
</ScriptTarget>\
</TargetEndpoint>' % MainScript
# Return TRUE if any component of the file path contains a directory name that
# starts with a "." like '.svn', but not '.' or '..'
def pathContainsDot(p):
c = re.compile('\.\w+')
for pc in p.split('/'):
if c.match(pc) != None:
return True
return False
# ZIP a whole directory into a stream and return the result so that it
# can be nested into the top-level ZIP
def zipDirectory(dir, pfx):
ret = StringIO.StringIO()
tzip = zipfile.ZipFile(ret, 'w')
dirList = os.walk(dir)
for dirEntry in dirList:
for fileEntry in dirEntry[2]:
if not fileEntry.endswith('~'):
fn = os.path.join(dirEntry[0], fileEntry)
en = os.path.join(pfx, os.path.relpath(dirEntry[0], dir), fileEntry)
if (os.path.isfile(fn)):
tzip.write(fn, en)
tzip.close()
return ret.getvalue()
# Construct a ZIPped copy of the bundle in memory
tf = StringIO.StringIO()
zipout = zipfile.ZipFile(tf, 'w')
zipout.writestr('apiproxy/%s.xml' % Name, makeApplication())
zipout.writestr('apiproxy/proxies/default.xml', makeProxy())
zipout.writestr('apiproxy/targets/default.xml', makeTarget())
for topName in os.listdir(Directory):
if not pathContainsDot(topName):
fn = os.path.join(Directory, topName)
if (os.path.isdir(fn)):
contents = zipDirectory(fn, topName)
en = 'apiproxy/resources/node/%s.zip' % topName
zipout.writestr(en, contents)
else:
en = 'apiproxy/resources/node/%s' % topName
zipout.write(fn, en)
zipout.close()
if (ZipFile != None):
tzf = open(ZipFile, 'w')
tzf.write(tf.getvalue())
tzf.close()
revision = deploytools.importBundle(Organization, Name, tf.getvalue())
if (revision < 0):
sys.exit(2)
print 'Imported new app revision %i' % revision
if ShouldDeploy:
status = deploytools.deployWithoutConflict(Organization, Environment, Name, '/', revision)
if status == False:
sys.exit(2)
response = httptools.httpCall('GET',
'/v1/o/%s/apis/%s/deployments' % (Organization, Name))
deps = deploytools.parseAppDeployments(Organization, response, Name)
deploytools.printDeployments(deps)
|
{
"content_hash": "89b0dce6e68920267a8eeea88a83c739",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 94,
"avg_line_length": 28.46766169154229,
"alnum_prop": 0.6186648025166026,
"repo_name": "r3mus/api-platform-tools",
"id": "ef73d715860d0ec3635bf536ed6739d93317d6e6",
"size": "5722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ApigeePlatformTools/deploynodeapp.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21860"
}
],
"symlink_target": ""
}
|
from nose.tools import eq_
from standup.tests import BaseTestCase
class LandingsViewsTestCase(BaseTestCase):
def test_help_view(self):
response = self.client.get('/help')
eq_(response.status_code, 200)
|
{
"content_hash": "4284668ff9f4aaa698d41efa73095386",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 28,
"alnum_prop": 0.7142857142857143,
"repo_name": "rehandalal/standup",
"id": "9ff44a95ed822ae3864341d5d884d98993ccc9e2",
"size": "224",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "standup/tests/test_landings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43292"
},
{
"name": "JavaScript",
"bytes": "2960"
},
{
"name": "Python",
"bytes": "140930"
},
{
"name": "Shell",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
from datetime import datetime
from six import add_metaclass
from .base import LocatedDeployEntity
__author__ = 'y.gavenchuk'
@add_metaclass(ABCMeta)
class DataBase(LocatedDeployEntity):
DEFAULT_BACKUP_COUNT = 5
__slots__ = ('_host', '_user', '_port', '_db_name', '_password',
'_bkp_count', )
def __init__(self, path, name, user, password, host='localhost', port='',
backup_count=DEFAULT_BACKUP_COUNT):
super(DataBase, self).__init__(path)
self._host = host
self._user = user
self._db_name = name
self._port = port
self._password = password
self._bkp_count = int(backup_count)
assert self._bkp_count > 1, "Backups count can't be less than 2"
def _rotate_backups(self):
"""
Keep no more than `backup_count` dumps
"""
backups = self._api.run('ls "%s"' % self.path, quiet=True).split()
if not backups:
return
bkp_to_remove = sorted(backups)[:-self.backup_count]
if not bkp_to_remove:
return
cmd = 'rm -- %s' % ' '.join(map(lambda x: '"%s"' % x, bkp_to_remove))
with self._api.cd(self.path):
self._api.run(cmd)
@abstractmethod
def _do_backup(self):
pass
@property
def backup_count(self):
return self._bkp_count
def create_backup(self):
self._do_backup()
self._rotate_backups()
class Postgres(DataBase):
__slots__ = ()
def _do_backup(self):
now = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
dump_file = 'dump_sql_%s' % now
cmd_tpl = 'pg_dump --clean -h {host} -U {user} {dbname} ' \
'|gzip > {file}_db.sq.gz'
cmd = cmd_tpl.format(
host=self._host,
user=self._user,
dbname=self._db_name,
file=dump_file
)
with self._api.cd(self.path):
with self._cm.shell_env(PGPASSWORD=self._password):
self._api.run(cmd)
|
{
"content_hash": "c2992064c1d8558fe79e00317d1c0648",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 77,
"avg_line_length": 28.10810810810811,
"alnum_prop": 0.5413461538461538,
"repo_name": "ygavenchuk/web-deploy",
"id": "49ae1d9f7a46b9aff015be189cb901a4f84512a7",
"size": "2702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_deploy/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47942"
}
],
"symlink_target": ""
}
|
from unittest import mock
from oslo_utils import timeutils
import pytz
from sqlalchemy.dialects.mysql import base as mysql_base
from sqlalchemy.dialects.sqlite import base as sqlite_base
from sqlalchemy import types
import testtools
from senlin.db.sqlalchemy import types as db_types
class DictTest(testtools.TestCase):
def setUp(self):
super(DictTest, self).setUp()
self.sqltype = db_types.Dict()
def test_load_dialect_impl(self):
dialect = mysql_base.MySQLDialect()
impl = self.sqltype.load_dialect_impl(dialect)
self.assertNotEqual(types.Text, type(impl))
dialect = sqlite_base.SQLiteDialect()
impl = self.sqltype.load_dialect_impl(dialect)
self.assertEqual(types.Text, type(impl))
def test_process_bind_param(self):
dialect = None
value = {'foo': 'bar'}
result = self.sqltype.process_bind_param(value, dialect)
self.assertEqual('{"foo": "bar"}', result)
def test_process_bind_param_null(self):
dialect = None
value = None
result = self.sqltype.process_bind_param(value, dialect)
self.assertEqual('null', result)
def test_process_result_value(self):
dialect = None
value = '{"foo": "bar"}'
result = self.sqltype.process_result_value(value, dialect)
self.assertEqual({'foo': 'bar'}, result)
def test_process_result_value_null(self):
dialect = None
value = None
result = self.sqltype.process_result_value(value, dialect)
self.assertIsNone(result)
class ListTest(testtools.TestCase):
def setUp(self):
super(ListTest, self).setUp()
self.sqltype = db_types.List()
def test_load_dialect_impl(self):
dialect = mysql_base.MySQLDialect()
impl = self.sqltype.load_dialect_impl(dialect)
self.assertNotEqual(types.Text, type(impl))
dialect = sqlite_base.SQLiteDialect()
impl = self.sqltype.load_dialect_impl(dialect)
self.assertEqual(types.Text, type(impl))
def test_process_bind_param(self):
dialect = None
value = ['foo', 'bar']
result = self.sqltype.process_bind_param(value, dialect)
self.assertEqual('["foo", "bar"]', result)
def test_process_bind_param_null(self):
dialect = None
value = None
result = self.sqltype.process_bind_param(value, dialect)
self.assertEqual('null', result)
def test_process_result_value(self):
dialect = None
value = '["foo", "bar"]'
result = self.sqltype.process_result_value(value, dialect)
self.assertEqual(['foo', 'bar'], result)
def test_process_result_value_null(self):
dialect = None
value = None
result = self.sqltype.process_result_value(value, dialect)
self.assertIsNone(result)
class TZAwareDateTimeTest(testtools.TestCase):
def setUp(self):
super(TZAwareDateTimeTest, self).setUp()
self.sqltype = db_types.TZAwareDateTime()
def test_process_bind_param(self):
dialect = mock.Mock()
dialect.name = 'nonmysql'
value = timeutils.utcnow(True)
result = self.sqltype.process_bind_param(value, dialect)
self.assertEqual(value, result)
def test_process_bind_param_mysql(self):
dialect = mock.Mock()
dialect.name = 'mysql'
value = timeutils.utcnow(True)
expected_value = timeutils.normalize_time(value)
result = self.sqltype.process_bind_param(value, dialect)
self.assertEqual(expected_value, result)
def test_process_bind_param_mysql_null(self):
dialect = mock.Mock()
dialect.name = 'mysql'
value = None
result = self.sqltype.process_bind_param(value, dialect)
self.assertIsNone(result)
def test_process_result_value(self):
dialect = None
value = timeutils.utcnow(False)
expected_value = value.replace(tzinfo=pytz.utc)
result = self.sqltype.process_result_value(value, dialect)
self.assertEqual(expected_value, result)
def test_process_result_value_null(self):
dialect = None
value = None
result = self.sqltype.process_result_value(value, dialect)
self.assertIsNone(result)
|
{
"content_hash": "1217e43805876e9712cdf8d522d9d8d6",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 66,
"avg_line_length": 33.06153846153846,
"alnum_prop": 0.6444858073522569,
"repo_name": "stackforge/senlin",
"id": "baac4653a98c29a67c4d4165f683bd0ad874c3ac",
"size": "4874",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senlin/tests/unit/db/test_sqlalchemy_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2145946"
},
{
"name": "Shell",
"bytes": "18730"
}
],
"symlink_target": ""
}
|
"""
agentredrabbit - A general purpose transport agent between Redis and RabbitMQ
"""
__title__ = "agentredrabbit"
__version__ = "1.3"
__author__ = "Rohit Yadav"
__license__ = "MIT"
__copyright__ = "Copyright 2013 Rohit Yadav, Wingify"
|
{
"content_hash": "4be75e984254a7fe1966cc60c0109311",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 77,
"avg_line_length": 29.5,
"alnum_prop": 0.673728813559322,
"repo_name": "lucknerjb/agentredrabbit",
"id": "8471c92836513bc203201416ddb887880d821cdd",
"size": "260",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "agentredrabbit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1031"
},
{
"name": "Python",
"bytes": "35987"
},
{
"name": "Shell",
"bytes": "1980"
}
],
"symlink_target": ""
}
|
import pickle
import os
import unittest
from ample.util import benchmark_util
@unittest.skip("not a regular test")
class Test(unittest.TestCase):
def test_benchmark(self):
pklfile = "/home/jmht/ample-dev1/examples/toxd-example/ROSETTA_MR_0/resultsd.pkl"
with open(pklfile) as f:
d = pickle.load(f)
bd = "/home/jmht/ample-dev1/python/foo"
if not os.path.isdir(bd):
os.mkdir(bd)
d['benchmark_dir'] = bd
benchmark_util.analyse(d)
|
{
"content_hash": "b175f73e9d5662d66d2731347552e157",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 89,
"avg_line_length": 29.705882352941178,
"alnum_prop": 0.6396039603960396,
"repo_name": "rigdenlab/ample",
"id": "609ec8a37a7ee853625a4ce09a73b1694fb14e6c",
"size": "505",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ample/util/tests/test_benchmark_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "45"
},
{
"name": "CMake",
"bytes": "426"
},
{
"name": "Fortran",
"bytes": "52396"
},
{
"name": "Python",
"bytes": "1088422"
},
{
"name": "Shell",
"bytes": "1022"
},
{
"name": "TeX",
"bytes": "10539"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) <2014> <Solkeera/mScotty>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os,pycurl, json, zipfile, sys
from BeautifulSoup import BeautifulSoup as beatsop
from StringIO import StringIO
class Itasa:
ITASA={}
ITASA["HTTPS"]=True
ITASA["URL"]="//api.italiansubs.net/api/rest/"
ITASA["APIKEY"]=""
ITASA["JSON"]=True
ITASA["AGENT"]="curl (Python-Curl; Linux-x11)"
ITASA["AUTHCODE"]=""
ITASA["COOKIES"]=[]
ITASA["KEYS"]={'USER':"Itasa_Rest2_Server_Users",
'NEWS':"Itasa_Rest2_Server_News",
'SUBS':"Itasa_Rest2_Server_Subtitles"}
ITASA["USER"]=""
ITASA["PASS"]=""
ITASA['LOGIN']=0
ITASA['LAST_ACTION']=''
SUBTITLE={}
SUBTITLE['FILENAME']=''
SUBTITLE['VERSION']=''
SUBTITLE['NAME']=''
## serve per __headerFunction contiene tutti gli header che restituisce
## pycurl
HEADERS={}
# res curl
FP=""
def __init__(self):
URL=self.ITASA["URL"]
if self.ITASA["HTTPS"]==True: URL="https:"+URL
else: URL="http:"+URL
self.ITASA["URL"]=URL
def setHttps(self,boolean=True):
self.ITASA['HTTPS']=boolean
def setUrl(self,sUrl=''):
self.ITASA['URL']=sUrl
def setApiKey(self,sApikey=''):
self.ITASA['APIKEY']=sApikey
def setJson(self,boolean=True):
self.ITASA['JSON']=boolean
def setAgent(self,sAgent=''):
self.ITASA['AGENT']=sAgent
def setAuthCode(self,sAuthCode=''):
self.ITASA['AUTHCODE']=sAuthCode
def setUsername(self,sUsername=''):
self.ITASA['USER']=sUsername
def setPassword(self,sPassword=''):
self.ITASA['PASS']=sPassword
def setLogin(self,boolean=False):
self.ITASA['LOGIN']=boolean
def setLastAction(self,sAction):
self.ITASA['LAST_ACTION']=''
def setSubInfo(self,dInfo):
sSubKey=self.ITASA['KEYS']['SUBS']
dInfo=dInfo[sSubKey]['single']['subtitle']
self.SUBTITLE['NAME']=dInfo['name']
self.SUBTITLE['FILENAME']=dInfo['filename']
self.SUBTITLE['VERSION']=dInfo['version']
def getUsername(self):
return self.ITASA['USER']
def getPassword(self):
return self.ITASA['PASS']
def getAuthCode(self):
return self.ITASA['AUTHCODE']
def getLogin(self):
return self.ITASA['LOGIN']
def getLastAction(self):
return self.ITASA['LAST_ACTION']
def getUserAgent(self):
return self.ITASA['AGENT']
def isLogged(self):
bLogin=self.getLogin()
if bLogin==True:
return True
return False
def __headerFunction(self,header_line):
header_line=header_line.decode('iso-8859-1')
if ':' not in header_line:
return
name, value=header_line.split(':',1)
name=name.strip()
value=value.strip()
name=name.lower()
self.HEADERS[self.__convertMe(name)]=self.__convertMe(value)
## curl write
def __write(self,sData,headers=''):
self.FP=pycurl.Curl()
self.FP.setopt(self.FP.USERAGENT,self.getUserAgent())
if sData=='':
return False
if "?" not in sData: sData=sData+"?"
else: sData=sData+"&"
sData+="apikey={sApikey}".format(sApikey=self.ITASA["APIKEY"])
if self.ITASA["JSON"]==True:
sData+="&format=json"
sData=self.ITASA["URL"]+sData
#self.ITASA['URL']
self.FP.setopt(self.FP.URL,sData)
if len(self.ITASA['COOKIES'])>0:
self.FP.setopt(pycurl.COOKIELIST, 'ALL')
self.FP.setopt(self.FP.COOKIE,'; '.join(x for x in self.ITASA['COOKIES']))
return self.__read()
## curl read
def __read(self):
oData = StringIO()
self.FP.setopt(self.FP.HEADERFUNCTION, self.__headerFunction)
self.FP.setopt(self.FP.SSL_VERIFYPEER, False)
self.FP.setopt(self.FP.WRITEFUNCTION,oData.write)
self.FP.setopt(self.FP.FOLLOWLOCATION,True)
self.FP.perform()
rVal=oData
if self.ITASA['LAST_ACTION']=='DOWNLOAD': self.ITASA['LAST_ACTION']=''
self.__close()
return rVal
## chiude handler curl
def __close(self):
self.FP.close()
def loginSite(self):
## tramite le api non e' possibile scaricare i sub
## per faciliatre il lavoro ci affidiamo a urllib2 e urllib
## nota: html_parser importa bs
## !!! effettuare sempre il login tramite Itasa.login, e poi effettuare
## il login con Itasa.loginSite
import urllib2
import urllib
headers={}
headers['User-Agent']=self.ITASA['AGENT']
headers['Cookie']='; '.join(x for x in self.ITASA['COOKIES'])
url='http://www.italiansubs.net/'
req=urllib2.Request(url,headers=headers)
html_data=urllib2.urlopen(req)
## dict form con i dati
dForm=self.__htmlParser(html_data.read().decode("utf-8"))
## utilizzo dopo il cookie
self.ITASA['COOKIES'].append(html_data.info()['Set-Cookie'].split(';')[0])
headers['Cookie']='; '.join(x for x in self.ITASA['COOKIES'])
dForm['username']=self.getUsername()
dForm['passwd']=self.getPassword()
## login sito utilizzando i vecchi cookie
sData=urllib.urlencode(dForm)
request=urllib2.Request(url,sData,headers)
response=urllib2.urlopen(request)
if not response.info().has_key('Set-Cookie'):
return True
return False
def getNews(self,bSearch=False,nId=0,sQ='',nPage=0):
"""
Ritorna Stringa o Boolean
"""
bRet=False
sWrite=''
if bSearch==True and len(sQ)>0:
sWrite="news/search?q={sQuery}&page={nPage}".format(sQuery=sQ,nPage=nPage)
if not bSearch:
if nId==0:
sWrite="news"
else:
sWrite="news/{nId}".format(nId=nId)
bRet=self.__write(sWrite)
return bRet
def getShow(self,sAction='',dParams={}):
"""
| Azione | URL | PARAMS |
|----------------------+-----------------------------+--------------------------|
| Lista degli show | shows?... | nessuno |
|----------------------+-----------------------------+--------------------------|
| Dettagli show | shows/{nId}?... | nId -> Id dello show |
|----------------------+-----------------------------+--------------------------|
| Prossimi Episodi | shows/nextepisodes?... | nessuno |
|----------------------+-----------------------------+--------------------------|
| Immagine folder show | shows/{nId}/folderThumb?... | nId -> Id dello show |
|----------------------+-----------------------------+--------------------------|
| Ricerca Show | shows?search... | q -> stringa da cercare |
| | | page -> numero di pagina |
| | | opzionale |
|----------------------+-----------------------------+--------------------------|
"""
lActions=['get','info','next_episode','image','search']
if sAction not in lActions:
return False
nId=0
if dParams.has_key("ID"):
nId=dParams["ID"]
if not dParams.has_key('PAGE'):
dParams['PAGE']=0
sWrite='shows'
if sAction=='info':
if nId > 0: sWrite+='/{nId}'.format(nId=nId)
elif sAction=='next_episode':
sWrite+='/nextepisodes'
elif sAction=='image':
if nId > 0: sWrite+='/{nId}/folderThumb'.format(nId=nId)
elif sAction=='search':
sWrite+='/search?q={sQuery}&page={nPage}'.format(sQuery=dParams['QUERY'],nPage=dParams['PAGE'])
bRet=self.__write(sWrite)
return bRet
def getSubtitles(self,sAction='',dParams={}):
"""
sAction stringa azione da eseguire
dParams dizionario parametri da passare
cosa fa: cerca i sottotitoli/dettagli seguendo la tabella sotto
| Azione | URL | Params |
|------------------+----------------------+------------------------------------------|
| Subs di uno show | subtitles?... | show_id -> numerico id dello show |
| | | version -> string web-dl,720p, opzionale |
| | | page -> numerico pagina |
|------------------+----------------------+------------------------------------------|
| Dettagli sub | subtitles/{nId}?... | nId -> numerico id dello show |
|------------------+----------------------+------------------------------------------|
| Ricerca sub | subtitles/search?... | q -> stringa lo show da cercare |
| | | show_id -> numerico id dello show |
| | | version -> string web-dl,720p, opzionale |
| | | page -> numerico pagina |
|------------------+----------------------+------------------------------------------|
ritorna boolean o dati dizionario (leggere documentazione itasa)
"""
bRet=False
lActions=['get','info','search','download']
lVersions=["1080i","1080p","720p","bdrip","bluray","dvdrip","hdtv","hr","web-dl"]
if sAction not in lActions: return False
if dParams.has_key('VERSION'):
if dParams['VERSION'].lower() not in lVersions:
dParams['VERSION']="web-dl"
else:
dParams['VERSION']="web-dl"
nShowId=0
if dParams.has_key("ID"):
nShowId=dParams['ID']
sWrite='subtitles'
if sAction=='get':
sWrite+='?show_id={nShowId}'.format(nShowId=nShowId)
if dParams.has_key('VERSION'):
sWrite+="&version={sVersion}".format(sVersion=dParams['VERSION'])
if dParams.has_key('PAGE'):
sWrite+="&page={nPage}".format(nPage=dParams['PAGE'])
if sAction=='info':
sWrite+='/{nId}'.format(nId=nShowId)
if sAction=='search':
sWrite+="/search?q={sQuery}&show_id={nShowId}".format(sQuery=dParams['QUERY'],nShowId=nShowId)
if dParams.has_key('VERSION'):
sWrite+='&version={sVersion}'.format(sVersion=dParams['VERSION'])
if dParams.has_key('PAGE'):
sWrite+='&page={nPage}'.format(nPage=dParams['PAGE'])
if sAction=='download':
sWrite+=self.__download(nShowId)
bRet=self.__write(sWrite)
return bRet
def login(self,sUser='',sPass=''):
"""
sUser stringa username dell'utente
sPass stringa password dell'utente
cosa fa: esegue il login su itasa, memorizza il codice di autenticazione in memoria
ritorna: boolean
"""
if len(sUser)==0:
sUser=self.getUsername().strip()
if len(sPass)==0:
sPass=self.getPassword().strip()
self.setUsername(sUser)
self.setPassword(sPass)
sWrite='users/login?username={username}&password={password}'.format(username=sUser,password=sPass)
# falso o oggeto StringIO
rVal=self.__write(sWrite)
self.ITASA["COOKIES"].append(self.HEADERS['set-cookie'].strip().split(';')[0])
if rVal:
dBuffer=json.loads(rVal.getvalue())
if dBuffer.has_key(self.ITASA['KEYS']['USER']):
self.setAuthCode(dBuffer[self.ITASA["KEYS"]['USER']]['login']['user']['authcode'])
self.setLogin(True)
else:
self.setLogin(False)
return self.isLogged()
## download dei sottotioli
## la funzione esegue il login se non effettuato
def __download(self,nIdSub):
if not self.isLogged():
bLogged=self.login()
dInfo=self.getSubtitles('info',{'ID':nIdSub})
dInfo=json.loads(dInfo.getvalue())
dInfo=self.__convertMe(dInfo)
self.setSubInfo(dInfo)
if self.isLogged():
if nIdSub > 0:
sWrite='/download?authcode={sAuthCode}&subtitle_id={nId}'.format(nId=nIdSub,sAuthCode=self.getAuthCode())
self.ITASA['LAST_ACTION']='DOWNLOAD'
return sWrite
return False
def __convertMe(self,data):
if isinstance(data,dict):
dictionary={}
for key,val in data.iteritems():
dictionary[self.__convertMe(key)]=self.__convertMe(val)
return dictionary
if isinstance(data,list):
return [convertMe(elem) for elem in data]
if isinstance(data,unicode):
return data.encode('utf-8')
else:
return data
def search(self,dSerie):
"""
Cerca uno show
ritorna id Show
"""
if len(dSerie)<1:
return False
dSearch={}
dNews=self.__convertMe(json.loads(self.getNews(bSearch=True,nId=0,sQ=dSerie['NOME'],nPage=0).getvalue()))
bKey=False
if dNews.has_key(self.ITASA['KEYS']['NEWS']):
dNews=dNews[self.ITASA['KEYS']['NEWS']]
if dNews.has_key('search'):
dNews=dNews['search']
if dNews.has_key('news'):
dNews=dNews['news']
bKey=True
if bKey:
dSearch=dNews
dNews=None
dSerie['ID']=self.__getValueFromNewsSearch('show_id',dSearch)
dSerie['CERCATO']=dSerie['NOME']
dSerie['NOME']=self.__getValueFromNewsSearch('show_name',dSearch)
dSerie['FOUND']=True
return dSerie
def getSubFromName(self,dSerie):
"""
"""
sNome=dSerie['NOME']+' '+dSerie['STAGIONE']+'x'+dSerie['EPISODIO']
dSerie['VQUALITA']=dSerie['QUALITA']
dSub=self.__getSubFromBuffer(self.__convertMe(json.loads(self.getSubtitles('get',{"ID":dSerie['ID'],"PAGE":0}).getvalue())))
if dSub:
nPage=0
bFound=False
nPages=int(dSub['pages'])
bEnd=False
while not bFound and not bEnd:
for key in dSub['subtitles']:
if dSub['subtitles'][key]['name']==sNome:
dSerie['QUALITA']=dSub['subtitles'][key]['version']
dSerie['SUBID']=dSub['subtitles'][key]['id']
dSerie['SUBFOUND']=True
bFound=True
if not bFound:
nPage=int(dSub['page'])+1
if nPage <= nPages:
dSub=self.__getSubFromBuffer(self.__convertMe(json.loads(self.getSubtitles('get',{"ID":dSerie['ID'],"PAGE":nPage}).getvalue())))
else:
bEnd=True
return dSerie
def __getSubFromBuffer(self,dSub):
bSubs=False
if dSub.has_key(self.ITASA['KEYS']['SUBS']):
dSub=dSub[self.ITASA['KEYS']['SUBS']]
if dSub.has_key('direct'):
dSub=dSub['direct']
else:
return False
return dSub
def __getValueFromNewsSearch(self,sValue,dSearch):
"""
Prende il primo show_id dalla ricerca
"""
rVal=''
if isinstance(dSearch,dict):
if dSearch.has_key('key_0'):
rVal=dSearch['key_0'][sValue]
return rVal
#def __getShowInfo(self,dSearch):
# print json.loads(self.getShow('info',dSearch))
#def extractCookie(self):
#return self.HEADERS['set-cookie'].strip().split(';')[0]
## parsing degli elementi minimi per effettuare un login
def __htmlParser(self,html_data):
html_proc = beatsop(html_data)
#form
txtinput = html_proc.findAll('form', {'name':'login'})
## cerca tra gli elementi di tipo hidden/submit nel form loginè
listform = ["submit","hidden"]
rVal={}
for elem in txtinput[0].findAll('input',{'type':listform}):
rVal[self.__convertMe(elem['name'])]=self.__convertMe(elem['value'])
return rVal
def getFile(self,dSerie,data):
sPath=''
sZipPath='./'+dSerie['ORIGINAL']
sSubPath='./'+dSerie['ORIGINAL']+'.srt'
try:
fp=open(sZipPath,'wb')
fp.write(data)
fp.close()
except Exception, ex:
print 'Cannot Create File'
try:
if os.path.isfile(sZipPath):
zf=zipfile.ZipFile(sZipPath,'r')
binData=zf.read(zf.namelist()[0])
os.remove(sZipPath)
fp=open(sSubPath,'wb')
if binData:
fp.write(binData)
else:
fp.write('FILE SUBS NOT FOUND')
fp.close()
binData=None
sPath=os.path.realpath(sSubPath)
except Exception, ex:
print 'Cannot find subs'
return sPath
def GetName(sFileName=''):
import re
rVal=False
sName=sFileName
p=re.match(r"(?P<NOME>([a-zA-Z\.])+)(?P<STAGIONE>[Ss][0-9]{1,2})(?P<EPISODIO>[Ee][0-9]{1,2})\.(?P<QUALITA>720|1080)[p]",sName)
if p:
dApp={}
dApp['NOME']=re.sub(r"\.",' ',p.group('NOME'))[:-1]
dApp['STAGIONE']=p.group('STAGIONE')[2:3]
dApp['EPISODIO']=p.group('EPISODIO')[1:3]
dApp['QUALITA']=p.group('QUALITA')
dApp['SUBFOUND']=False
dApp['FOUND']=False
dApp['ORIGINAL']='.'.join(x for x in sFileName.split('.')[:-1])
rVal=dApp
return rVal
if __name__=='__main__':
## USERNAME italiansubs.net
USERNAME='MyUser'
## PASSWORD italiansubs.net
PASSWORD='MyPass'
if len(sys.argv)>1:
try:
sName=u""+sys.argv[1]
dName=GetName(sName)
if not dName:
exit('Error Name')
except Exception:
exit('Errore nome')
else:
exit('Errore nessun nome trovato')
oItasa=Itasa()
## ritorna id show
dSearch=oItasa.search(dName)
dSub=oItasa.getSubFromName(dSearch)
data=''
if dSub['SUBFOUND']==True:
oItasa.login(USERNAME,PASSWORD)
oItasa.loginSite()
data=oItasa.getSubtitles('download',{'ID':dSub['SUBID']}).getvalue()
sPath=''
if data:
sPath=oItasa.getFile(dSearch,data)
print sPath
else:
print False
oItasa=None
|
{
"content_hash": "cbe04b8a4c229f0052bc7b3f8c310134",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 152,
"avg_line_length": 34.652027027027025,
"alnum_prop": 0.5075558155406065,
"repo_name": "mScotty/pyitasa",
"id": "c5dee81aad8fb2bd57f70f5f06816392246842a0",
"size": "20561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyitasa/Itasa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20561"
}
],
"symlink_target": ""
}
|
"""Remove year field from automobiles
Revision ID: 5ad1f9ba378e
Revises: 395cec770df2
Create Date: 2015-10-22 00:49:05.990505
"""
# revision identifiers, used by Alembic.
revision = '5ad1f9ba378e'
down_revision = '395cec770df2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('automobiles', 'year')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('automobiles', sa.Column('year', sa.INTEGER(), autoincrement=False, nullable=False))
### end Alembic commands ###
|
{
"content_hash": "0d3b51ae6856cd179c07f4495c56141f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 102,
"avg_line_length": 25.153846153846153,
"alnum_prop": 0.7018348623853211,
"repo_name": "richgieg/auto-store",
"id": "476395b8e1b1799d4145f72a2003efdb271677d6",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/5ad1f9ba378e_remove_year_field_from_automobiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2692"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "10138"
}
],
"symlink_target": ""
}
|
"""A setuptools based setup module.
"""
from __future__ import print_function
import os
import fnmatch
import re
import sys
import subprocess
import yaml
# Always prefer setuptools over distutils
from setuptools import setup, Command
from setuptools_lint.setuptools_command import PylintCommand
from six import string_types
from six.moves import reload_module
from yamllint.config import YamlLintConfig
from yamllint.cli import Format
from yamllint import linter
def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
''' find files matching file_regex '''
found = []
exclude_regex = ''
include_regex = ''
if exclude_dirs is not None:
exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'
if include_dirs is not None:
include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'
for root, dirs, files in os.walk(base_dir):
if exclude_dirs is not None:
# filter out excludes for dirs
dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)]
if include_dirs is not None:
# filter for includes for dirs
dirs[:] = [d for d in dirs if re.match(include_regex, d)]
matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None]
found.extend(matches)
return found
class OpenShiftAnsibleYamlLint(Command):
''' Command to run yamllint '''
description = "Run yamllint tests"
user_options = [
('excludes=', 'e', 'directories to exclude'),
('config-file=', 'c', 'config file to use'),
('format=', 'f', 'format to use (standard, parsable)'),
]
def initialize_options(self):
''' initialize_options '''
# Reason: Defining these attributes as a part of initialize_options is
# consistent with upstream usage
# Status: permanently disabled
# pylint: disable=attribute-defined-outside-init
self.excludes = None
self.config_file = None
self.format = None
def finalize_options(self):
''' finalize_options '''
# Reason: These attributes are defined in initialize_options and this
# usage is consistant with upstream usage
# Status: permanently disabled
# pylint: disable=attribute-defined-outside-init
if isinstance(self.excludes, string_types):
self.excludes = self.excludes.split(',')
if self.format is None:
self.format = 'standard'
assert (self.format in ['standard', 'parsable']), (
'unknown format {0}.'.format(self.format))
if self.config_file is None:
self.config_file = '.yamllint'
assert os.path.isfile(self.config_file), (
'yamllint config file {0} does not exist.'.format(self.config_file))
def run(self):
''' run command '''
if self.excludes is not None:
print("Excludes:\n{0}".format(yaml.dump(self.excludes, default_flow_style=False)))
config = YamlLintConfig(file=self.config_file)
has_errors = False
has_warnings = False
if self.format == 'parsable':
format_method = Format.parsable
else:
format_method = Format.standard_color
for yaml_file in find_files(os.getcwd(), self.excludes, None, r'\.ya?ml$'):
first = True
with open(yaml_file, 'r') as contents:
for problem in linter.run(contents, config):
if first and self.format != 'parsable':
print('\n{0}:'.format(os.path.relpath(yaml_file)))
first = False
print(format_method(problem, yaml_file))
if problem.level == linter.PROBLEM_LEVELS[2]:
has_errors = True
elif problem.level == linter.PROBLEM_LEVELS[1]:
has_warnings = True
if has_errors or has_warnings:
print('yammlint issues found')
raise SystemExit(1)
class OpenShiftAnsiblePylint(PylintCommand):
''' Class to override the default behavior of PylintCommand '''
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def find_all_modules(self):
''' find all python files to test '''
exclude_dirs = ['.tox', 'utils', 'test', 'tests', 'git']
modules = []
for match in find_files(os.getcwd(), exclude_dirs, None, r'\.py$'):
package = os.path.basename(match).replace('.py', '')
modules.append(('openshift_ansible', package, match))
return modules
def get_finalized_command(self, cmd):
''' override get_finalized_command to ensure we use our
find_all_modules method '''
if cmd == 'build_py':
return self
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def with_project_on_sys_path(self, func, func_args, func_kwargs):
''' override behavior, since we don't need to build '''
return func(*func_args, **func_kwargs)
class OpenShiftAnsibleGenerateValidation(Command):
''' Command to run generated module validation'''
description = "Run generated module validation"
user_options = []
def initialize_options(self):
''' initialize_options '''
pass
def finalize_options(self):
''' finalize_options '''
pass
# self isn't used but I believe is required when it is called.
# pylint: disable=no-self-use
def run(self):
''' run command '''
# find the files that call generate
generate_files = find_files('roles',
['inventory',
'test',
'playbooks',
'utils'],
None,
'generate.py$')
if len(generate_files) < 1:
print('Did not find any code generation. Please verify module code generation.') # noqa: E501
raise SystemExit(1)
errors = False
for gen in generate_files:
print('Checking generated module code: {0}'.format(gen))
try:
sys.path.insert(0, os.path.dirname(gen))
# we are importing dynamically. This isn't in
# the python path.
# pylint: disable=import-error
import generate
reload_module(generate)
generate.verify()
except generate.GenerateAnsibleException as gae:
print(gae.args)
errors = True
if errors:
print('Found errors while generating module code.')
raise SystemExit(1)
print('\nAll generate scripts passed.\n')
class OpenShiftAnsibleSyntaxCheck(Command):
''' Command to run Ansible syntax check'''
description = "Run Ansible syntax check"
user_options = []
# Colors
FAIL = '\033[91m' # Red
ENDC = '\033[0m' # Reset
def initialize_options(self):
''' initialize_options '''
pass
def finalize_options(self):
''' finalize_options '''
pass
def run(self):
''' run command '''
has_errors = False
for yaml_file in find_files(
os.path.join(os.getcwd(), 'playbooks', 'byo'),
None, None, r'\.ya?ml$'):
with open(yaml_file, 'r') as contents:
for line in contents:
# initialize_groups.yml is used to identify entry point playbooks
if re.search(r'initialize_groups\.yml', line):
print('-' * 60)
print('Syntax checking playbook: %s' % yaml_file)
try:
subprocess.check_output(
['ansible-playbook', '-i localhost,',
'--syntax-check', yaml_file]
)
except subprocess.CalledProcessError as cpe:
print('{}Execution failed: {}{}'.format(
self.FAIL, cpe, self.ENDC))
has_errors = True
# Break for loop, no need to continue looping lines
break
if has_errors:
raise SystemExit(1)
class UnsupportedCommand(Command):
''' Basic Command to override unsupported commands '''
user_options = []
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def initialize_options(self):
''' initialize_options '''
pass
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def finalize_options(self):
''' initialize_options '''
pass
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def run(self):
''' run command '''
print("Unsupported command for openshift-ansible")
setup(
name='openshift-ansible',
license="Apache 2.0",
cmdclass={
'install': UnsupportedCommand,
'develop': UnsupportedCommand,
'build': UnsupportedCommand,
'build_py': UnsupportedCommand,
'build_ext': UnsupportedCommand,
'egg_info': UnsupportedCommand,
'sdist': UnsupportedCommand,
'lint': OpenShiftAnsiblePylint,
'yamllint': OpenShiftAnsibleYamlLint,
'generate_validation': OpenShiftAnsibleGenerateValidation,
'ansible_syntax': OpenShiftAnsibleSyntaxCheck,
},
packages=[],
)
|
{
"content_hash": "835560871b650c7cb7a5c7fbc62f31ca",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 107,
"avg_line_length": 34.81016949152542,
"alnum_prop": 0.574934268185802,
"repo_name": "git001/openshift-ansible",
"id": "c6a132ae20c11ba840985eb24e29ea2849a2a531",
"size": "10269",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "5005"
},
{
"name": "HTML",
"bytes": "14650"
},
{
"name": "Python",
"bytes": "3286422"
},
{
"name": "Roff",
"bytes": "5645"
},
{
"name": "Shell",
"bytes": "80999"
}
],
"symlink_target": ""
}
|
import os
from flask import Flask, request, Response
from flask import render_template, url_for, redirect, send_from_directory
from flask import send_file, make_response, abort
from angular_flask import app, api_manager, lm
# routing for API endpoints, generated from the models designated as API_MODELS
from angular_flask.models import *
for model_name in app.config['API_MODELS']:
model_class = app.config['API_MODELS'][model_name]
api_manager.create_api(model_class, methods=['GET', 'POST'])
session = api_manager.session
@lm.user_loader
def load_user(user_id):
return User.get(user_id)
# routing for basic pages (pass routing onto the Angular app)
@app.route('/')
@app.route('/about')
@app.route('/blog')
@app.route('/dashboard')
def basic_pages(**kwargs):
return make_response(open('angular_flask/templates/index.html').read())
# routing for CRUD-style endpoints
# passes routing onto the angular frontend if the requested resource exists
from sqlalchemy.sql import exists
crud_url_models = app.config['CRUD_URL_MODELS']
@app.route('/<model_name>/')
@app.route('/<model_name>/<item_id>')
def rest_pages(model_name, item_id=None):
if model_name in crud_url_models:
model_class = crud_url_models[model_name]
if item_id is None or session.query(exists().where(
model_class.id == item_id)).scalar():
return make_response(open(
'angular_flask/templates/index.html').read())
abort(404)
# special file handlers and error handlers
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'img/favicon.ico')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
|
{
"content_hash": "2a68aa4da6ba58f37dcffee696c7261e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 29.683333333333334,
"alnum_prop": 0.6911847276810781,
"repo_name": "ben-h-johnson/duelysttrack",
"id": "38c8a157c742e71df54ad895bbef95fa85ce46cc",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angular_flask/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1570"
},
{
"name": "HTML",
"bytes": "11756"
},
{
"name": "Python",
"bytes": "6379"
}
],
"symlink_target": ""
}
|
import re
import config
from article_builder import ArticleBuilder
from keyword_extractor import KeywordExtractor
from product_searcher import ProductSearcher
from table_builder import TableBuilder
from wordpress_uploader import WordPressUploader
class Manager(object):
MIN_PRODUCTS = 5
def __init__(self):
self.product_searcher = ProductSearcher(config.CONFIG)
def _upload_article(self, keyword):
"""
Get a keyword and uploads an article.
:param keyword:
"""
keyword, browse_node = re.findall('(.*)(?:\?bn=(\d+)?)', keyword)[0]
products = self.product_searcher.search(config.PRODUCT_GROUP, keyword, browse_node=browse_node)
if len(products) < self.MIN_PRODUCTS:
return
article_builder = ArticleBuilder(keyword, products)
title = article_builder.get_title()
table_builder = TableBuilder(title, config.URL, config.USER_NAME,
config.PASSWORD)
table_id = table_builder.build(products)
wordpress_uploader = WordPressUploader(title, config.URL,
config.USER_NAME, config.PASSWORD)
content = article_builder.build(table_id)
# Chose The size of the main image
main_image_url = products[0].get_img_url('LargeImage')
main_image_url = main_image_url if main_image_url != 'null' else products[0].get_img_url()
categories = products[0].get_categories()
wordpress_uploader.upload_article(content, main_image_url,
article_builder.get_tags(), categories)
def run(self):
"""
Loops through the keywords, and uploads an article for each.
"""
keywords = KeywordExtractor.extract(config.KEYWORDS_FILE_PATH)
for keyword in keywords:
self._upload_article(keyword)
if '__main__' == __name__:
manager = Manager()
manager.run()
|
{
"content_hash": "646f77e5d189ec1434b2d4e20c6dadbc",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 103,
"avg_line_length": 36.629629629629626,
"alnum_prop": 0.6228513650151668,
"repo_name": "netarachelhershko/amazon_aotumation",
"id": "e30589fb7059605ea57659b97775e7b041d72c9d",
"size": "1978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28648"
}
],
"symlink_target": ""
}
|
from functions import getSecret
def test_secrets():
access_token = getSecret('twitter-rob')
assert (len(access_token))
|
{
"content_hash": "bb1c425805303cab3b5819e67ef2725f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 43,
"avg_line_length": 19.571428571428573,
"alnum_prop": 0.6715328467153284,
"repo_name": "robertdenton/cascadiaquakes",
"id": "4c4aa03d7058d68cf558067e32158bebf3465e53",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/secrets_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6689"
}
],
"symlink_target": ""
}
|
import logging
import os
import re
import unicodedata
from collections import defaultdict
from .base import Word, Sentence, Corpus, CorpusDirectoryIterator
from ..utils.setup_logging import setup_logging
setup_logging()
logger = logging.getLogger(__name__)
def _get_word(line):
word_info = line.split()
is_main_lemma = len(word_info) == 5 and word_info[4] == u'lemma'
return Word(word_info[1], tag=word_info[3][:2], lemma=word_info[2], is_main_lemma=is_main_lemma)
def _filter_symbols(line):
word_info = line.split()
return len(word_info) > 3 and not word_info[3].startswith("F")
class SenSemCorpusDirectoryIterator(CorpusDirectoryIterator):
def __init__(self, corpus_dir, sense_filter=3):
super(SenSemCorpusDirectoryIterator, self).__init__(corpus_dir)
self._sense_filter = sense_filter
def __iter__(self):
for fname in sorted((fin for fin in os.listdir(self._corpus_dir) if fin != "lemmas")):
fpath = os.path.join(self._corpus_dir, fname)
lemma = self.lemmas[int(fname)]
logger.info(u"Getting corpus from lemma {}".format(lemma).encode("utf-8"))
yield SenSemCorpus(lemma, fpath, self._sense_filter)
class SenSemCorpus(Corpus):
def __init__(self, lemma, fpath, sense_filter=3):
assert isinstance(lemma, unicode)
super(SenSemCorpus, self).__init__(lemma)
self.senses = defaultdict(int)
logger.info(u"Reading sentences from file {}".format(fpath).encode("utf-8"))
with open(fpath, "r") as f:
raw_sentences = f.read().decode("utf-8")
raw_sentences = re.sub(r"\n\n\n+", "\n\n", raw_sentences.strip(), flags=re.UNICODE).split("\n\n")
logger.info(u"Parsing sentences from file {}".format(fpath).encode("utf-8"))
for sentence in raw_sentences:
sentence = unicodedata.normalize("NFC", sentence).split("\n")
sense_info = sentence.pop(0).split()
if len(sense_info) != 3:
logger.info(u"Ignoring sentence {} of lemma {} and sense {}"
.format(sense_info[0], self.lemma, sense_info[1]).encode("utf-8"))
continue
words = map(_get_word, filter(_filter_symbols, sentence))
try:
predicate_index = map(lambda w: w.is_main_lemma, words).index(True)
except ValueError:
logger.info(u"Ignoring sentence {} of lemma {} and sense {}"
.format(sense_info[0], self.lemma, sense_info[1]).encode("utf-8"))
continue
self._sentences.append(Sentence(words, predicate_index, sense_info[1]))
self.senses[sense_info[1]] += 1
if sense_filter > 1:
logger.info(u"Filtering senses with less than {} instances in file {}".format(
sense_filter, fpath).encode("utf-8")
)
self.senses = {sense: count for sense, count in self.senses.iteritems() if count >= sense_filter}
self._sentences = filter(lambda s: s.sense in self.senses, self._sentences)
logger.info(u"All sentences parsed in file {}".format(fpath).encode("utf-8"))
def has_multiple_senses(self):
return len(self.senses) > 1
def get_senses(self):
return ((sense, count) for sense, count in self.senses.iteritems())
|
{
"content_hash": "d7be68b8fcb8a850dbaed21b927791bd",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 109,
"avg_line_length": 36.344086021505376,
"alnum_prop": 0.6088757396449704,
"repo_name": "crscardellino/dnnwsd",
"id": "cb1432a0ecf4b7054c2d172933bc3baf196c7189",
"size": "3405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dnnwsd/corpus/sensem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "341700"
},
{
"name": "Perl",
"bytes": "5865"
},
{
"name": "Python",
"bytes": "161134"
},
{
"name": "R",
"bytes": "24361"
},
{
"name": "Shell",
"bytes": "2174"
}
],
"symlink_target": ""
}
|
"""Template config validator."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASSES_SCHEMA as SENSOR_DEVICE_CLASSES_SCHEMA,
DOMAIN as SENSOR_DOMAIN,
)
from homeassistant.config import async_log_exception, config_without_domain
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME,
CONF_FRIENDLY_NAME_TEMPLATE,
CONF_ICON,
CONF_ICON_TEMPLATE,
CONF_NAME,
CONF_SENSORS,
CONF_STATE,
CONF_UNIQUE_ID,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
)
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.trigger import async_validate_trigger_config
from .const import (
CONF_ATTRIBUTE_TEMPLATES,
CONF_ATTRIBUTES,
CONF_AVAILABILITY,
CONF_AVAILABILITY_TEMPLATE,
CONF_PICTURE,
CONF_TRIGGER,
DOMAIN,
)
from .sensor import SENSOR_SCHEMA as PLATFORM_SENSOR_SCHEMA
CONVERSION_PLATFORM = {
CONF_ICON_TEMPLATE: CONF_ICON,
CONF_ENTITY_PICTURE_TEMPLATE: CONF_PICTURE,
CONF_AVAILABILITY_TEMPLATE: CONF_AVAILABILITY,
CONF_ATTRIBUTE_TEMPLATES: CONF_ATTRIBUTES,
CONF_FRIENDLY_NAME_TEMPLATE: CONF_NAME,
CONF_FRIENDLY_NAME: CONF_NAME,
CONF_VALUE_TEMPLATE: CONF_STATE,
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.template,
vol.Required(CONF_STATE): cv.template,
vol.Optional(CONF_ICON): cv.template,
vol.Optional(CONF_PICTURE): cv.template,
vol.Optional(CONF_AVAILABILITY): cv.template,
vol.Optional(CONF_ATTRIBUTES): vol.Schema({cv.string: cv.template}),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): SENSOR_DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
TRIGGER_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Required(CONF_TRIGGER): cv.TRIGGER_SCHEMA,
vol.Optional(SENSOR_DOMAIN): vol.All(cv.ensure_list, [SENSOR_SCHEMA]),
vol.Optional(CONF_SENSORS): cv.schema_with_slug_keys(PLATFORM_SENSOR_SCHEMA),
}
)
async def async_validate_config(hass, config):
"""Validate config."""
if DOMAIN not in config:
return config
trigger_entity_configs = []
for cfg in cv.ensure_list(config[DOMAIN]):
try:
cfg = TRIGGER_ENTITY_SCHEMA(cfg)
cfg[CONF_TRIGGER] = await async_validate_trigger_config(
hass, cfg[CONF_TRIGGER]
)
except vol.Invalid as err:
async_log_exception(err, DOMAIN, cfg, hass)
continue
if CONF_SENSORS not in cfg:
trigger_entity_configs.append(cfg)
continue
logging.getLogger(__name__).warning(
"The entity definition format under template: differs from the platform configuration format. See https://www.home-assistant.io/integrations/template#configuration-for-trigger-based-template-sensors"
)
sensor = list(cfg[SENSOR_DOMAIN]) if SENSOR_DOMAIN in cfg else []
for device_id, entity_cfg in cfg[CONF_SENSORS].items():
entity_cfg = {**entity_cfg}
for from_key, to_key in CONVERSION_PLATFORM.items():
if from_key not in entity_cfg or to_key in entity_cfg:
continue
val = entity_cfg.pop(from_key)
if isinstance(val, str):
val = template.Template(val)
entity_cfg[to_key] = val
if CONF_NAME not in entity_cfg:
entity_cfg[CONF_NAME] = template.Template(device_id)
sensor.append(entity_cfg)
cfg = {**cfg, "sensor": sensor}
trigger_entity_configs.append(cfg)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
config = config_without_domain(config, DOMAIN)
config[DOMAIN] = trigger_entity_configs
return config
|
{
"content_hash": "bdfeabf2872e4c85e7d219dc9e2a810d",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 211,
"avg_line_length": 32.07936507936508,
"alnum_prop": 0.6556160316674914,
"repo_name": "adrienbrault/home-assistant",
"id": "edef5673f31c97a8380a2fcb7ed3537b40ae224c",
"size": "4042",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/template/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
from solution import Solution
inpt = ["aa", "aab"]
sol = Solution()
result = sol.longestCommonPrefix(inpt)
print(result)
|
{
"content_hash": "411133fe6582ab87bd30a36cb9b543e9",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.7295081967213115,
"repo_name": "zhlinh/leetcode",
"id": "02f3d91d04b3c51b13daaca256f4429db0386c1d",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0014.Longest Common Prefix/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "478111"
}
],
"symlink_target": ""
}
|
import praw
import time
import datetime
import sqlite3
'''USER CONFIGURATION'''
USERNAME = ""
#This is the bot's Username. In order to send mail, he must have some amount of Karma.
PASSWORD = ""
#This is the bot's Password.
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter Bot"
SUBREDDIT = "GoldTesting"
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
MAXPOSTS = 30
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 20
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
DELAY = 86400
#This is the time between Daily threads, IN SECONDS. 1h = 3600 || 12h = 43200 || 24h = 86400 || 144h = 518400
#Obviously daily means 24h, but it's always nice to have configurability
BODY = '[d]\n\nThis thread is for the days you draw a lucky number for which there is no matching OTN thread.\n\nPlease follow this commenting format^1\n\n>My lucky number: #####\n\n>Time until midnight\n\n>My OTN thread\n\n>The Prize I wish to win\n\nThis way other players can check if a traded pokemon has the number.\n\nMay the odds be ever in your favor.\n\n^1: ^The ^last ^five ^characters ^of ^your ^first ^line ^should ^be ^your ^OTN ^only. ^Do ^NOT ^bolden, ^italicize, ^or ^otherwise ^mark ^it. ^Do ^not ^add ^puncutation ^to ^it'
#This is the body of Dailythreads. It follows reddit's usual markdown syntax where \n\n stars a new line
NORESULTS = 'We have checked your lucky number against our database, but found no matches!'
#This is what the bot says to you if your comment in Daily doesn't match any databased OTNs
YESRESULTS = 'We have matched your lucky number against our database. Check it out!'
#This is what the bot says to you if your comment in Daily matches some results. The result chart comes immediatly after this.
'''All done!'''
WAITS = str(WAIT)
try:
import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials
USERNAME = bot.getuG()
PASSWORD = bot.getpG()
USERAGENT = bot.getaG()
except ImportError:
pass
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS daily(id TEXT)')
print('Loaded previous Daily')
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT, otn TEXT)')
print('Loaded OTN numbers')
cur.execute('CREATE TABLE IF NOT EXISTS oldcomments(id TEXT)')
print('Loaded old comments')
sql.commit()
r = praw.Reddit(USERAGENT)
Trying = True
while Trying:
try:
r.login(USERNAME, PASSWORD)
print('Successfully logged in')
Trying = False
except praw.errors.InvalidUserPass:
print('Wrong Username or Password')
quit()
except Exception as e:
print("%s" % e)
time.sleep(5)
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool == False:
return timeNow
else:
return timeUnix
def generateReport(number):
print('Attemping to generate report for luckynumber ' + number)
cur.execute('SELECT * FROM oldposts')
f = cur.fetchall()
idlist = []
otnlist = []
results = []
for m in f:
idlist.append(m[0])
otnlist.append(m[1])
for m in range(len(otnlist)):
otn = otnlist[m]
mid = idlist[m]
if otn == number:
results.append(number + '|http://redd.it/' + mid)
elif otn[-4:] == number[-4:]:
results.append('*' + number[-4:] + '|http://redd.it/' + mid)
elif otn[-3:] == number[-3:]:
results.append('**' + number[-3:] + '|http://redd.it/' + mid)
elif otn[-2:] == number[-2:]:
results.append('***' + number[-2:] + '|http://redd.it/' + mid)
results = sorted(results)
if len(results) == 0:
print('\tNo results!')
s = NORESULTS
else:
print('\tSuccessfuly generated report')
s = '\n'.join(results)
s = YESRESULTS + '\n\nMatch|Thread\n:-|:-\n' + s
return s
def scan():
print('Scanning ' + SUBREDDIT + ' for new OTN posts')
subreddit = r.get_subreddit(SUBREDDIT)
posts = subreddit.get_new(limit=MAXPOSTS)
for post in posts:
pid = post.id
if post.link_flair_text == 'OTN':
cur.execute('SELECT * FROM oldposts WHERE id=?', [pid])
if not cur.fetchone():
ptitle = post.title
try:
int(ptitle)
except ValueError:
ptitle = 'NULL'
print(pid + ': ' + ptitle)
cur.execute('INSERT INTO oldposts VALUES(?, ?)', (pid, ptitle))
sql.commit()
def daily():
print('Managing Dailypost')
nowtime = getTime(True)
nowdate = time.strftime("%d %B %Y")
print('It is ' + nowdate + ' || ' + str(nowtime))
cur.execute('SELECT * FROM daily')
f = cur.fetchone()
try:
previd = f[0]
prevpost = r.get_info(thing_id='t3_' + previd)
prevtime = prevpost.created_utc
prevtitle = prevpost.title
except TypeError:
print('Database does not have a Dailythread yet. Creating one')
previd = '0'
prevtime = 0
prevtitle = 'None'
print('Previous Daily: ' + previd + ' "' + prevtitle + '"')
difference = nowtime - prevtime
if difference >= DELAY or nowdate not in prevtitle:
print('Previous Dailypost is too old')
try:
prevpost.mark_as_nsfw()
except UnboundLocalError:
pass
title = 'Daily lucky number checking thread - ' + nowdate
newpost = r.submit(SUBREDDIT, title, text=BODY, captcha=None)
newid = newpost.id
print('Created new Dailypost with id ' + newid)
cur.execute('DELETE FROM daily WHERE id=?', [previd])
cur.execute('INSERT INTO daily VALUES(?)', [newid])
sql.commit()
else:
print('Scanning root comments')
comments = prevpost.comments
for comment in comments:
if comment.is_root:
cid = comment.id
cur.execute('SELECT * FROM oldcomments WHERE id=?', [cid])
if not cur.fetchone():
cbody = comment.body.lower()
cbodysplit = cbody.split('\n\n')
try:
int(cbodysplit[0][-5:])
number = cbodysplit[0][-5:] #Str
report = generateReport(number)
comment.reply(report)
except ValueError:
pass
cur.execute('INSERT INTO oldcomments VALUES(?)', [cid])
sql.commit()
while True:
try:
scan()
except Exception as e:
print('An error has occured during scan:', e)
print('')
try:
daily()
except Exception as e:
print('An error has occured during daily:', e)
print('Running again in ' + WAITS + ' seconds.\n')
sql.commit()
time.sleep(WAIT)
|
{
"content_hash": "2a52d516df241c8022e74cc40096252b",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 540,
"avg_line_length": 32.32663316582914,
"alnum_prop": 0.6817969842997047,
"repo_name": "tehp/reddit",
"id": "96a0d87f9cfbcd1cfbe005505739f0517d174a1b",
"size": "6450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "LumioseLottery/lumiose.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1078988"
},
{
"name": "PostScript",
"bytes": "235598"
},
{
"name": "Python",
"bytes": "508351"
},
{
"name": "Shell",
"bytes": "289"
}
],
"symlink_target": ""
}
|
"""Eager-graph unified check numerics callback."""
import collections
import threading
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import op_callbacks_common
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import op_callbacks
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_debug_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# Many ops have benign NaN outputs, and running them with check_numerics
# on will create unwanted errors
# TODO(b/142497024): Replace this allowlist with function decorators in the ops
IGNORE_OP_OUTPUTS = (
# For FusedBatchNorm, if the input tensor is empty then batch_mean and
# batch_variance will be NaN. reserve_space holds intermediate values
# derived from batch_mean and batch_variance used for gradient calculation
(b"FusedBatchNorm", 1), # batch_mean
(b"FusedBatchNorm", 2), # batch_variance
(b"FusedBatchNorm", 3), # reserve_space_1
(b"FusedBatchNorm", 4), # reserve_space_2
# Same as above
(b"FusedBatchNormV2", 1), # batch_mean
(b"FusedBatchNormV2", 2), # batch_variance
(b"FusedBatchNormV2", 3), # reserve_space_1
(b"FusedBatchNormV2", 4), # reserve_space_2
# Same as above, but reserve_space_3 holds additional intermediate values
(b"FusedBatchNormV3", 1), # batch_mean
(b"FusedBatchNormV3", 2), # batch_variance
(b"FusedBatchNormV3", 3), # reserve_space_1
(b"FusedBatchNormV3", 4), # reserve_space_2
(b"FusedBatchNormV3", 5), # reserve_space_3
)
# Some frequently used ops are generally safe and we can skip them to reduce
# overhead. NOTE: This list is compiled by observing operations called by
# models in practice and is not a comprehensive list of safe operations.
SAFE_OPS = (
b"Concat",
b"ConcatV2",
b"ExpandDims",
b"Fill",
b"Gather",
b"Maximum",
b"Minimum",
b"Reshape",
b"Slice",
b"Squeeze",
b"Stack",
b"StridedSlice",
b"StridedSliceGrad",
b"TensorListConcatV2",
b"TensorListGather",
b"TensorListGetItem",
b"TensorListPopBack",
b"TensorListStack",
b"Transpose",
b"Unpack",
)
_state = threading.local()
_check_numerics_callback_create_counter = monitoring.Counter(
"/tensorflow/api/python/debugging/check_numerics_callback_create_counter",
"Counter for number of times the check_numerics op callback is created.")
def limit_string_length(string, max_len=50):
"""Limit the length of input string.
Args:
string: Input string.
max_len: (int or None) If int, the length limit. If None, no limit.
Returns:
Possibly length-limited string.
"""
if max_len is None or len(string) <= max_len:
return string
else:
return "..." + string[len(string) - max_len:]
# A dictionary that supports looking up the original input tensor names.
_CHECK_NUMERICS_INPUT_LOOKUP = collections.defaultdict(dict)
def _maybe_lookup_original_input_tensor(graph, tensor):
if (graph and
graph in _CHECK_NUMERICS_INPUT_LOOKUP and
tensor.name in _CHECK_NUMERICS_INPUT_LOOKUP[graph]):
return _CHECK_NUMERICS_INPUT_LOOKUP[graph][tensor.name]
else:
return tensor
def get_check_numerics_error_message(slot,
num_outputs,
op_type,
tensor,
inputs,
graph=None,
traceback=None,
stack_height_limit=30,
path_length_limit=50):
"""Create a meaningful and user-friendly error message about offending tensor.
The error message reveals the following info about the op that outputs
NaN/Infinity: dtype, shape (to the extent known at graph-construction time),
input tensors, stack trace for op creation (if is graph mode).
Args:
slot: (int) slot index of the tensor output.
num_outputs: (int) total number of outputs of the op.
op_type: (str) Type of the that generates `tensor`.
tensor: (Tensor) the offending tensor, i.e., the tensor that contains
Infinities or NaNs.
inputs: (array of Tensor) inputs to the op that generates `tensor`.
graph: (tf.Graph) the graph object that `tensor` belongs to. Available only
under graph mode.
traceback: (list of trace frames) the stack trace of the op's creation.
Available only under graph model.
stack_height_limit: (int or None) If int, limit to the height of the stack
trace printed in the error message. If None, no limit to the height.
path_length_limit: (int or None) Length limit for file paths included in the
formatted stack trace.
Returns:
(str) A formatted error message.
"""
eager_vs_graph_qualifier = "graph" if graph else "eagerly-executing"
message = "\n"
message += (
"\n!!! Detected Infinity or NaN in output %d of "
"%s op \"%s\" (# of outputs: %d) !!!\n" %
(slot, eager_vs_graph_qualifier, op_type, num_outputs))
message += " dtype: %s\n" % tensor.dtype
message += " shape: %s\n" % (tensor.shape,)
if not graph:
# This is an eager tensor. We can get its numpy value and count
# NaNs and Infs.
is_inf = np.isinf(tensor)
num_neg_inf = np.sum(np.logical_and(np.less(tensor, 0.), is_inf))
num_pos_inf = np.sum(np.logical_and(np.greater(tensor, 0.), is_inf))
num_nan = np.sum(np.isnan(tensor))
if num_neg_inf > 0:
message += " # of -Inf elements: %s\n" % num_neg_inf
if num_pos_inf > 0:
message += " # of +Inf elements: %s\n" % num_pos_inf
if num_nan:
message += " # of +NaN elements: %s\n" % num_nan
if len(inputs) > 1:
message += "\n Input tensors (%d):\n" % len(inputs)
for slot, input_tensor in enumerate(inputs):
message += " %d: %s\n" % (
slot, _maybe_lookup_original_input_tensor(graph, input_tensor))
elif len(inputs) == 1:
message += "\n Input tensor: %s\n" % (
_maybe_lookup_original_input_tensor(graph, inputs[0]))
if graph and hasattr(graph, "name") and graph.name:
message += " Graph name: \"%s\"\n" % graph.name
# Format the stack trace for the op's creation. We omit files that
# belong to tensorflow itself.
if graph and traceback:
message += (
"\n Stack trace of op's creation (\"->\": inferred user code):\n")
if stack_height_limit is not None and len(traceback) > stack_height_limit:
num_omitted_frames = len(traceback) - stack_height_limit
message += " + ... (Omitted %d frames)\n" % num_omitted_frames
for filepath, lineno, function_name, source_line in traceback[
-stack_height_limit:]:
user_code_indicator = " "
if not source_utils.guess_is_tensorflow_py_library(filepath):
user_code_indicator = " -> "
message += " + %s (L%d) %s\n" % (
limit_string_length(filepath, path_length_limit), lineno,
function_name)
if source_line is not None:
message += "%s| %s\n" % (user_code_indicator, source_line)
message += "\n"
return message
def _debug_summary(x):
return gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS))
class CheckNumericsCallback(object):
"""Wrapper for the numerics-checking callback for thread locality."""
def __init__(self, stack_height_limit, path_length_limit):
self._stack_height_limit = stack_height_limit
self._path_length_limit = path_length_limit
# A dict mapping Placeholder tensors to their instrumenting debug tensors.
# Used only under V1 graph mode, where we can't rely on auto control
# dependency to execute the debug tensors and hence need to attach the debug
# tensors as control dependencies of the ops that consume the Placeholder.
self._placeholder_to_debug_tensor = dict()
def callback(self,
op_type,
inputs,
attrs,
outputs,
op_name=None,
graph=None):
"""Eager-function unified callback for checking numerics."""
del attrs, op_name # Unused
op_type_bytes = compat.as_bytes(op_type)
is_v1_graph_mode = not ops.executing_eagerly_outside_functions()
if (op_type_bytes in op_callbacks_common.OP_CALLBACK_SKIP_OPS or
op_type_bytes in SAFE_OPS):
return None
if graph:
# Under graph mode. Insert check_numerics op.
instrumented_outputs = []
if is_v1_graph_mode:
for input_tensor in inputs:
if input_tensor in self._placeholder_to_debug_tensor and outputs:
outputs[0].op._add_control_input( # pylint: disable=protected-access
self._placeholder_to_debug_tensor[input_tensor].op)
for slot, output in enumerate(outputs):
if (output.dtype.is_floating and
(op_type_bytes, slot) not in IGNORE_OP_OUTPUTS):
checked_output = array_ops.check_numerics_v2(
# TF v2 has automatic control dependencies added to stateful async
# ops, which allows us to run check_numerics asynchronously.
# In the above case we use debug_summary to reduce all output
# tensors asynchronously from the op being checked and then
# process the tensor summary with check_numerics.
output if is_v1_graph_mode else _debug_summary(output),
get_check_numerics_error_message(
slot,
len(outputs),
op_type,
output,
inputs,
graph=graph,
traceback=output.op.traceback,
stack_height_limit=self._stack_height_limit,
path_length_limit=self._path_length_limit))
_CHECK_NUMERICS_INPUT_LOOKUP[graph][checked_output.name] = output
instrumented_outputs.append(self._get_output_tensor(
op_type_bytes, output, checked_output, is_v1_graph_mode))
else:
instrumented_outputs.append(output)
return instrumented_outputs
else:
if op_type_bytes == b"CheckNumericsV2":
# TODO(b/140334369): Remove this special casing logic once op_callback.
# automatically prevents infinite recursion in eager mode.
return None
# Under eager mode. Eagerly execute check_numerics op.
for slot, output in enumerate(outputs):
if (output.dtype.is_floating and
(op_type_bytes, slot) not in IGNORE_OP_OUTPUTS):
array_ops.check_numerics_v2(
output,
get_check_numerics_error_message(
slot, len(outputs), op_type, output, inputs,
stack_height_limit=self._stack_height_limit,
path_length_limit=self._path_length_limit))
def _get_output_tensor(self,
op_type,
tensor,
checked_tensor,
is_v1_graph_mode):
"""Determine what tensor to output from callback.
Args:
op_type: Type of the op that outputs the original symbolic tensor, as
`bytes`.
tensor: The original output symbolic tensor.
checked_tensor: The debugger-instrumented, numerics-checking tensor.
is_v1_graph_mode: Whether the debugged proggram is running under V1 graph
mode.
Returns:
A symbolic tensor to be returned by the dumping op_callback.
"""
if is_v1_graph_mode:
# Placeholders need special treatment under V1 graph mode. The
# callback can't simply override the Placeholder tensor to the debug
# tensor, as that would cause the Placeholder op to lack a value.
# The debug tensor is remembered and will be attached as control
# inputs to ops that consumer the Placeholders later.
if op_type == b"Placeholder":
self._placeholder_to_debug_tensor[tensor] = checked_tensor
return tensor
else:
return checked_tensor
else:
# Under non-v1 graph mode, rely on auto control dependency to run the
# checked tensor.
return tensor
@tf_export("debugging.enable_check_numerics")
def enable_check_numerics(stack_height_limit=30,
path_length_limit=50):
r"""Enable tensor numerics checking in an eager/graph unified fashion.
The numerics checking mechanism will cause any TensorFlow eager execution or
graph execution to error out as soon as an op's output tensor contains
infinity or NaN.
This method is idempotent. Calling it multiple times has the same effect
as calling it once.
This method takes effect only on the thread in which it is called.
When a op's float-type output tensor contains any Infinity or NaN, an
`tf.errors.InvalidArgumentError` will be thrown, with an error message that
reveals the following information:
- The type of the op that generated the tensor with bad numerics.
- Data type (dtype) of the tensor.
- Shape of the tensor (to the extent known at the time of eager execution
or graph construction).
- Name of the containing graph (if available).
- (Graph mode only): The stack trace of the intra-graph op's creation,
with a stack-height limit and a path-length limit for visual clarity.
The stack frames that belong to the user's code (as opposed to
tensorflow's internal code) are highlighted with a text arrow ("->").
- (Eager mode only): How many of the offending tensor's elements are
`Infinity` and `NaN`, respectively.
Once enabled, the check-numerics mechanism can be disabled by using
`tf.debugging.disable_check_numerics()`.
Example usage:
1. Catching infinity during the execution of a `tf.function` graph:
```py
import tensorflow as tf
tf.debugging.enable_check_numerics()
@tf.function
def square_log_x_plus_1(x):
v = tf.math.log(x + 1)
return tf.math.square(v)
x = -1.0
# When the following line runs, a function graph will be compiled
# from the Python function `square_log_x_plus_1()`. Due to the
# `enable_check_numerics()` call above, the graph will contain
# numerics checking ops that will run during the function graph's
# execution. The function call generates an -infinity when the Log
# (logarithm) op operates on the output tensor of the Add op.
# The program errors out at this line, printing an error message.
y = square_log_x_plus_1(x)
z = -y
```
2. Catching NaN during eager execution:
```py
import numpy as np
import tensorflow as tf
tf.debugging.enable_check_numerics()
x = np.array([[0.0, -1.0], [4.0, 3.0]])
# The following line executes the Sqrt op eagerly. Due to the negative
# element in the input array, a NaN is generated. Due to the
# `enable_check_numerics()` call above, the program errors immediately
# at this line, printing an error message.
y = tf.math.sqrt(x)
z = tf.matmul(y, y)
```
NOTE: If your code is running on TPUs, be sure to call
`tf.config.set_soft_device_placement(True)` before calling
`tf.debugging.enable_check_numerics()` as this API uses automatic outside
compilation on TPUs. For example:
```py
tf.config.set_soft_device_placement(True)
tf.debugging.enable_check_numerics()
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
strategy = tf.distribute.TPUStrategy(resolver)
with strategy.scope():
# ...
```
Args:
stack_height_limit: Limit to the height of the printed stack trace.
Applicable only to ops in `tf.function`s (graphs).
path_length_limit: Limit to the file path included in the printed stack
trace. Applicable only to ops in `tf.function`s (graphs).
"""
if not hasattr(_state, "check_numerics_callback"):
_state.check_numerics_callback = CheckNumericsCallback(
stack_height_limit, path_length_limit)
op_callbacks.add_op_callback(_state.check_numerics_callback.callback)
logging.info(
"Enabled check-numerics callback in thread %s",
threading.current_thread().name)
_check_numerics_callback_create_counter.get_cell().increase_by(1)
@tf_export("debugging.disable_check_numerics")
def disable_check_numerics():
"""Disable the eager/graph unified numerics checking mechanism.
This method can be used after a call to `tf.debugging.enable_check_numerics()`
to disable the numerics-checking mechanism that catches infinity and NaN
values output by ops executed eagerly or in tf.function-compiled graphs.
This method is idempotent. Calling it multiple times has the same effect
as calling it once.
This method takes effect only on the thread in which it is called.
"""
if not hasattr(_state, "check_numerics_callback"):
return
try:
op_callbacks.remove_op_callback(_state.check_numerics_callback.callback)
delattr(_state, "check_numerics_callback")
logging.info(
"Disabled check-numerics callback in thread %s",
threading.current_thread().name)
except KeyError:
# Tolerate disabling the check numerics callback without
# enable_check_numerics() being called first.
pass
|
{
"content_hash": "fad177421d309cc505b432a8ebc18265",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 81,
"avg_line_length": 38.90286975717439,
"alnum_prop": 0.6572093287181524,
"repo_name": "tensorflow/tensorflow",
"id": "fe22b6b9f8f832c17deeb77981d33f4b04aeef87",
"size": "18312",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tensorflow/python/debug/lib/check_numerics_callback.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1400913"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "126099822"
},
{
"name": "CMake",
"bytes": "182430"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11447433"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300213"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42782002"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621854"
},
{
"name": "Smarty",
"bytes": "89538"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7738020"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from .feeds import FoiRequestFeed, FoiRequestFeedAtom
urlpatterns = patterns("froide.foirequest.views",
url(r"^(?P<obj_id>\d+)$", 'shortlink', name="foirequest-notsolonglink"),
url(r"^(?P<obj_id>\d+)/auth/(?P<code>[0-9a-f]+)/$", 'auth', name="foirequest-longerauth"),
url(r"^(?P<slug>[-\w]+)/$", 'show', name="foirequest-show"),
url(r"^(?P<slug>[-\w]+)/suggest/public-body/$", 'suggest_public_body', name="foirequest-suggest_public_body"),
url(r"^(?P<slug>[-\w]+)/set/public-body/$", 'set_public_body', name="foirequest-set_public_body"),
url(r"^(?P<slug>[-\w]+)/set/status/$", 'set_status', name="foirequest-set_status"),
url(r"^(?P<slug>[-\w]+)/send/message/$", 'send_message', name="foirequest-send_message"),
url(r"^(?P<slug>[-\w]+)/escalation/message/$", 'escalation_message', name="foirequest-escalation_message"),
url(r"^(?P<slug>[-\w]+)/make/public/$", 'make_public', name="foirequest-make_public"),
url(r"^(?P<slug>[-\w]+)/set/law/$", 'set_law', name="foirequest-set_law"),
url(r"^(?P<slug>[-\w]+)/set/tags/$", 'set_tags', name="foirequest-set_tags"),
url(r"^(?P<slug>[-\w]+)/set/resolution/$", 'set_summary', name="foirequest-set_summary"),
url(r"^(?P<slug>[-\w]+)/add/postal-reply/$", 'add_postal_reply', name="foirequest-add_postal_reply"),
url(r"^(?P<slug>[-\w]+)/add/postal-reply/(?P<message_id>\d+)/$", 'add_postal_reply_attachment', name="foirequest-add_postal_reply_attachment"),
url(r"^(?P<slug>[-\w]+)/(?P<message_id>\d+)/set/public-body/$", 'set_message_sender', name="foirequest-set_message_sender"),
url(r"^(?P<slug>[-\w]+)/mark/not-foi/$", 'mark_not_foi', name="foirequest-mark_not_foi"),
url(r"^(?P<slug>[-\w]+)/mark/checked/$", 'mark_checked', name="foirequest-mark_checked"),
url(r"^(?P<slug>[-\w]+)/extend-deadline/$", 'extend_deadline', name="foirequest-extend_deadline"),
url(r"^(?P<slug>[-\w]+)/approve/(?P<attachment>\d+)/$", 'approve_attachment', name="foirequest-approve_attachment"),
url(r"^(?P<slug>[-\w]+)/approve/message/(?P<message>\d+)/$", 'approve_message', name="foirequest-approve_message"),
url(r"^(?P<slug>[-\w]+)/make-same/(?P<message_id>\d+)/$", 'make_same_request', name="foirequest-make_same_request"),
url(r"^(?P<slug>[-\w]+)/resend/$", 'resend_message', name="foirequest-resend_message"),
url(r"^(?P<slug>[-\w]+)/download/$", 'download_foirequest', name="foirequest-download"),
# Redaction
url(r"^(?P<slug>[-\w]+)/redact/(?P<attachment_id>\d+)/$", 'redact_attachment', name="foirequest-redact_attachment"),
)
# Feed
urlpatterns += patterns("",
url(r"^(?P<slug>[-\w]+)/feed/$", FoiRequestFeedAtom(), name="foirequest-feed_atom"),
url(r"^(?P<slug>[-\w]+)/rss/$", FoiRequestFeed(), name="foirequest-feed")
)
|
{
"content_hash": "59cd7ee1bdc97a8fa4c931f55ec5dc2c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 147,
"avg_line_length": 73.94736842105263,
"alnum_prop": 0.6185053380782918,
"repo_name": "catcosmo/froide",
"id": "bd6056c479279894e7813632c1f4b6674cbaf581",
"size": "2810",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "froide/foirequest/request_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17829"
},
{
"name": "HTML",
"bytes": "162326"
},
{
"name": "Java",
"bytes": "1814475"
},
{
"name": "JavaScript",
"bytes": "52679"
},
{
"name": "Makefile",
"bytes": "329"
},
{
"name": "Python",
"bytes": "1614641"
},
{
"name": "Shell",
"bytes": "1621"
}
],
"symlink_target": ""
}
|
from graphql import build_schema, parse, validate
from ..fixtures import big_schema_sdl # noqa: F401
def test_validate_invalid_query(benchmark, big_schema_sdl): # noqa: F811
schema = build_schema(big_schema_sdl, assume_valid=True)
query_ast = parse(
"""
{
unknownField
... on unknownType {
anotherUnknownField
...unknownFragment
}
}
fragment TestFragment on anotherUnknownType {
yetAnotherUnknownField
}
"""
)
result = benchmark(lambda: validate(schema, query_ast))
assert result == [
{
"message": "Cannot query field 'unknownField' on type 'Query'.",
"locations": [(3, 11)],
},
{"message": "Unknown type 'unknownType'.", "locations": [(4, 18)]},
{"message": "Unknown fragment 'unknownFragment'.", "locations": [(6, 16)]},
{"message": "Unknown type 'anotherUnknownType'.", "locations": [(10, 34)]},
{"message": "Fragment 'TestFragment' is never used.", "locations": [(10, 9)]},
]
|
{
"content_hash": "1032b3774689d418e6db65df8e050412",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 86,
"avg_line_length": 33.15151515151515,
"alnum_prop": 0.5621572212065814,
"repo_name": "graphql-python/graphql-core",
"id": "dec9bbc6080f0d360e6e36ae016f3e33a7143e24",
"size": "1094",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/benchmarks/test_validate_invalid_gql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2235538"
}
],
"symlink_target": ""
}
|
"""
Russian Wikipedia
+++++++++++++++++
"""
from revscoring.features import wikitext
from revscoring.features.modifiers import max
from revscoring.languages import russian
from . import wikipedia
cn_templates = wikitext.revision.template_names_matching(
r"Нет[ _]АИ",
name="ruwiki.revision.cn_templates")
# Links
category_links = wikitext.revision.wikilink_titles_matching(
r"Category|Категория\:", name="revision.category_links")
image_links = wikitext.revision.wikilink_titles_matching(
r"File|Image|Файл\:", name="revision.image_links")
local_wiki = [
russian.stemmed.revision.stem_chars,
(russian.stemmed.revision.stem_chars /
max(wikitext.revision.content_chars, 1)),
image_links,
image_links / max(wikitext.revision.content_chars, 1),
category_links,
category_links / max(wikitext.revision.content_chars, 1),
russian.dictionary.revision.dict_words,
russian.dictionary.revision.dict_words / max(wikitext.revision.words, 1),
cn_templates,
cn_templates / max(wikitext.revision.content_chars, 1),
]
wp10 = local_wiki + wikipedia.article
|
{
"content_hash": "837bc7b8758797e751bc35a1d5c47ac2",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 30.72222222222222,
"alnum_prop": 0.7251356238698011,
"repo_name": "wiki-ai/wikiclass",
"id": "b10718752fd7902e44ef0ba2092a210ed5695226",
"size": "1124",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "articlequality/feature_lists/ruwiki.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "16134"
},
{
"name": "Python",
"bytes": "103576"
}
],
"symlink_target": ""
}
|
from random import Random
import scrapy
from scrapy.selector import Selector, HtmlXPathSelector
from selenium import webdriver
import time
from cwgooglelinkedin.items import GoogleLinkedIn
import urlparse
class GoogleLinkedInsBrowserSpider(scrapy.Spider):
name = "googlelinkedin_browser"
allowed_domains = ["google.com"]
start_urls = [
'www.google.com',
]
def __init__(self, name=None, **kwargs):
self.driver = webdriver.Firefox()
from cwgooglelinkedin.database_factory import DatabaseFactory, DatabaseTypes
self._cache_db = DatabaseFactory.get_database(DatabaseTypes.cache, kwargs['mongo_uri'])
self._history_db = DatabaseFactory.get_database(DatabaseTypes.history, kwargs['mongo_uri'])
from cwgooglelinkedin.parser.response_parser import ResponseParse
self._crawl_parser = ResponseParse()
super(GoogleLinkedInsBrowserDebugSpider, self).__init__(name, **kwargs)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return super(GoogleLinkedInsBrowserSpider, cls).from_crawler(crawler,
args,
mongo_uri=crawler.settings.get('MONGODB_SERVER')
)
def spider_closed(self, spider):
self.driver.close()
def parse(self, response):
# def parsexxx(self, response):
hxs = HtmlXPathSelector(response)
links = hxs.select('//a[@class="card-click-target"]/@href').extract()
count = 0
for link in links:
appLink = urlparse.urljoin(response.url, link.strip())
count += 1
yield scrapy.Request(appLink, self.parse_detail)
|
{
"content_hash": "35b5a7556790c32219ad6fcc454d9f2c",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 105,
"avg_line_length": 33.77358490566038,
"alnum_prop": 0.6206703910614525,
"repo_name": "trujunzhang/djzhang-targets",
"id": "e2ac39640844bc61f0d168df1f49ceeec6f03799",
"size": "1814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cwgooglelinkedin/cwgooglelinkedin/spiders/googlelinkedin_browser_spider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7418804"
},
{
"name": "JavaScript",
"bytes": "936547"
},
{
"name": "PHP",
"bytes": "94539"
},
{
"name": "Python",
"bytes": "564898"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
}
|
from display.handlers.base import BaseHandler
class UiGeneralHandler(BaseHandler):
def get(self):
title = 'UiGeneralHandler'
self.render('ui/general.html', title = title, **self.render_dict)
|
{
"content_hash": "30a2ebffc100a01eabfc73a5cb627a84",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 73,
"avg_line_length": 35.166666666666664,
"alnum_prop": 0.7109004739336493,
"repo_name": "owlsn/h_crawl",
"id": "3edc0d6987e518f1cdd9b79e58b9b2a02daa190e",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "display/display/handlers/ui/general.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "3257"
},
{
"name": "Python",
"bytes": "6605"
}
],
"symlink_target": ""
}
|
match:
|
{
"content_hash": "7c0c08b1c677482a941b62c9220e12dd",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 6,
"avg_line_length": 7,
"alnum_prop": 0.7142857142857143,
"repo_name": "google/intellij-community",
"id": "fa72643e309c4eee43f220209f7f285859b6458d",
"size": "7",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "python/testData/psi/PatternMatchingRecoveryMatchWithColonParsedAsVariableTypeDeclaration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
__author__ = "Trevor Nielsen"
"""
Executes all of the macros in a list/tuple/whatever at the same time. It waits for all of the specified
macros to finish if they aren't daemons.
"""
from grt.core import GRTMacro
class ConcurrentMacros(GRTMacro):
"""
Executes all macros concurrently.
"""
def __init__(self, macros, timeout=0, daemon=False):
super().__init__(timeout, daemon=daemon)
self.macros = macros
def initialize(self):
for m in self.macros:
m.reset()
print('starting concurrent macro')
m.run()
def perform(self):
if all((not m.running or m.daemon for m in self.macros)):
self.kill()
def die(self):
print('concurrent macro finished')
for m in self.macros:
m.kill()
|
{
"content_hash": "f29fc36eba11862eb1e8df8f82d36ea0",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 103,
"avg_line_length": 24.696969696969695,
"alnum_prop": 0.6,
"repo_name": "grt192/2012rebound-rumble",
"id": "546beed198ccf58ec750e4048a670b3d55b1e153",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/grt/macro/concurrent_macros.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "662654"
},
{
"name": "Shell",
"bytes": "1968"
}
],
"symlink_target": ""
}
|
import base64
import hashlib
import os
import random
import re
import time
import uuid
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from rhumba import RhumbaPlugin
from twisted.internet import defer, reactor
from twisted.enterprise import adbapi
from seed.xylem.pg_compat import psycopg2, errorcodes, DictCursor
class APIError(Exception):
"""
Custom exception to make API errors easier to work with.
"""
def __init__(self, err_msg):
super(APIError, self).__init__()
self.err_msg = err_msg
class Plugin(RhumbaPlugin):
# FIXME: Setup is asynchronous and there may be a race condition if we try
# to process a request before setup finishes.
def __init__(self, *args, **kw):
setup_db = kw.pop('setup_db', True)
super(Plugin, self).__init__(*args, **kw)
self.servers = self.config['servers']
# Details for Xylems internal DB
self.db = self.config.get('db_name', 'xylem')
self.host = self.config.get('db_host', 'localhost')
self.port = self.config.get('db_port', 5432)
self.password = self.config.get('db_password', '')
self.username = self.config.get('db_username', 'postgres')
self.key = self.config['key']
if setup_db:
reactor.callWhenRunning(self._setup_db)
def _cipher(self, key_iv):
"""
Construct a Cipher object with suitable parameters.
The parameters used are compatible with the pycrypto code this
implementation replaced.
"""
key = hashlib.md5(self.key).hexdigest()
return Cipher(
algorithms.AES(key), modes.CFB8(key_iv), backend=default_backend())
def _encrypt(self, s):
key_iv = os.urandom(algorithms.AES.block_size / 8)
encryptor = self._cipher(key_iv).encryptor()
pwenc = encryptor.update(s) + encryptor.finalize()
return base64.b64encode(key_iv + pwenc)
def _decrypt(self, e):
block_size = algorithms.AES.block_size / 8
msg = base64.b64decode(e)
key_iv = msg[:block_size]
decryptor = self._cipher(key_iv).decryptor()
return decryptor.update(msg[block_size:]) + decryptor.finalize()
def _setup_db(self):
db_table = (
"CREATE TABLE databases (name varchar(66) UNIQUE, host"
" varchar(256), username varchar(256), password varchar(256));")
cur = self._get_xylem_db()
d = cur.runOperation(db_table)
ignore_pg_error(d, errorcodes.DUPLICATE_TABLE)
d.addBoth(cursor_closer(cur))
return d
def _create_password(self):
# Guranteed random dice rolls
return base64.b64encode(
hashlib.sha1(uuid.uuid1().hex).hexdigest())[:24]
def _create_username(self, db):
return base64.b64encode("mydb" + str(
time.time()+random.random()*time.time())).strip('=').lower()
def _get_connection(self, db, host, port, user, password):
return adbapi.ConnectionPool(
'psycopg2',
database=db,
host=host,
port=port,
user=user,
password=password,
cp_min=1,
cp_max=2,
cp_openfun=self._fixdb,
cursor_factory=DictCursor)
def _get_xylem_db(self):
return self._get_connection(
db=self.db,
host=self.host,
port=self.port,
user=self.username,
password=self.password)
def _fixdb(self, conn):
conn.autocommit = True
def call_create_database(self, args):
cleanups = [] # Will be filled with callables to run afterwards
def cleanup_cb(r):
d = defer.succeed(None)
for f in reversed(cleanups):
d.addCallback(lambda _: f())
return d.addCallback(lambda _: r)
def api_error_eb(f):
f.trap(APIError)
return {"Err": f.value.err_msg}
d = self._call_create_database(args, cleanups.append)
d.addBoth(cleanup_cb)
d.addErrback(api_error_eb)
return d
def _build_db_response(self, row):
return {
"Err": None,
"name": row['name'],
"hostname": row['host'],
"user": row['username'],
"password": self._decrypt(row['password']),
}
@defer.inlineCallbacks
def _call_create_database(self, args, add_cleanup):
# TODO: Validate args properly.
name = args['name']
if not re.match('^\w+$', name):
raise APIError("Database name must be alphanumeric")
xylemdb = self._get_xylem_db()
add_cleanup(cursor_closer(xylemdb))
find_db = "SELECT name, host, username, password FROM databases"\
" WHERE name=%s"
rows = yield xylemdb.runQuery(find_db, (name,))
if rows:
defer.returnValue(self._build_db_response(rows[0]))
else:
server = random.choice(self.servers)
connect_addr = server.get('connect_addr', server['hostname'])
rdb = self._get_connection(
'postgres',
connect_addr,
int(server.get('port', 5432)),
server.get('username', 'postgres'),
server.get('password'))
add_cleanup(cursor_closer(rdb))
check = "SELECT * FROM pg_database WHERE datname=%s;"
r = yield rdb.runQuery(check, (name,))
if not r:
user = self._create_username(name)
password = self._create_password()
create_u = "CREATE USER %s WITH ENCRYPTED PASSWORD %%s;" % user
yield rdb.runOperation(create_u, (password,))
create_d = "CREATE DATABASE %s ENCODING 'UTF8' OWNER %s;" % (
name, user)
yield rdb.runOperation(create_d)
rows = yield xylemdb.runQuery(
("INSERT INTO databases (name, host, username, password)"
" VALUES (%s, %s, %s, %s) RETURNING *;"),
(name, server['hostname'], user, self._encrypt(password)))
defer.returnValue(self._build_db_response(rows[0]))
else:
raise APIError('Database exists but not known to xylem')
def ignore_pg_error(d, pgcode):
"""
Ignore a particular postgres error.
"""
def trap_err(f):
f.trap(psycopg2.ProgrammingError)
if f.value.pgcode != pgcode:
return f
return d.addErrback(trap_err)
def cursor_closer(cur):
"""
Construct a cursor closing function that can be used on its own or as a
passthrough callback.
"""
def close_cursor(r=None):
if cur.running:
cur.close()
return r
return close_cursor
|
{
"content_hash": "e85cec110a9392c979da72152d05a772",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 79,
"avg_line_length": 31.84862385321101,
"alnum_prop": 0.5719429641365404,
"repo_name": "praekeltfoundation/seed-xylem",
"id": "6998243b7c85b1716a2932a784201e6c15971994",
"size": "6943",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "seed/xylem/postgres.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33395"
},
{
"name": "Shell",
"bytes": "3679"
}
],
"symlink_target": ""
}
|
import sys
import unittest2
from stripe import StripeError
from stripe.test.helper import StripeUnitTestCase
class StripeErrorTests(StripeUnitTestCase):
def test_formatting(self):
err = StripeError(u'öre')
if sys.version_info > (3, 0):
assert str(err) == u'öre'
else:
assert str(err) == '\xc3\xb6re'
assert unicode(err) == u'öre'
def test_formatting_with_request_id(self):
err = StripeError(u'öre', headers={'request-id': '123'})
if sys.version_info > (3, 0):
assert str(err) == u'Request 123: öre'
else:
assert str(err) == 'Request 123: \xc3\xb6re'
assert unicode(err) == u'Request 123: öre'
def test_formatting_with_none(self):
err = StripeError(None, headers={'request-id': '123'})
if sys.version_info > (3, 0):
assert str(err) == u'Request 123: <empty message>'
else:
assert str(err) == 'Request 123: <empty message>'
assert unicode(err) == u'Request 123: <empty message>'
if __name__ == '__main__':
unittest2.main()
|
{
"content_hash": "e211f4f856ded01d2cfb5d141e71e522",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 66,
"avg_line_length": 31.25,
"alnum_prop": 0.5768888888888889,
"repo_name": "uploadcare/stripe-python",
"id": "24c86629f9801183f9e86358175ca72432bbdf17",
"size": "1155",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stripe/test/test_error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "192568"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.db import connection
_survey_sql = """
WITH most_recent_survey AS (
SELECT DISTINCT ON (survey.blockface_id) survey.*
FROM survey_survey AS survey
ORDER BY survey.blockface_id, survey.created_at DESC
)"""
def _get_count(sql, params=[]):
with connection.cursor() as cursor:
cursor.execute(sql, params)
row = cursor.fetchone()
return row[0]
def get_user_tree_count(user):
sql = _survey_sql + """
SELECT COUNT(*)
FROM survey_tree AS tree
JOIN most_recent_survey
ON tree.survey_id = most_recent_survey.id
WHERE most_recent_survey.user_id = %s"""
return _get_count(sql, [user.pk])
def get_group_tree_count(group):
sql = _survey_sql + """
SELECT COUNT(*)
FROM survey_tree AS tree
JOIN most_recent_survey
ON tree.survey_id = most_recent_survey.id
JOIN survey_territory AS turf
ON most_recent_survey.blockface_id = turf.blockface_id
WHERE turf.group_id = %s"""
return _get_count(sql, [group.pk])
def get_total_tree_count():
sql = _survey_sql + """
SELECT COUNT(*)
FROM survey_tree AS tree
JOIN most_recent_survey
ON tree.survey_id = most_recent_survey.id"""
return _get_count(sql)
def get_user_species_count(user):
sql = _survey_sql + """
SELECT COUNT(*) FROM (
SELECT DISTINCT ON (tree.species_id) tree.species_id
FROM survey_tree AS tree
JOIN most_recent_survey
ON tree.survey_id = most_recent_survey.id
WHERE most_recent_survey.user_id = %s
AND tree.species_id IS NOT NULL
) subquery"""
return _get_count(sql, [user.pk])
def get_total_species_count():
sql = _survey_sql + """
SELECT COUNT(*) FROM (
SELECT DISTINCT ON (tree.species_id) tree.species_id
FROM survey_tree AS tree
JOIN most_recent_survey
ON tree.survey_id = most_recent_survey.id
WHERE tree.species_id IS NOT NULL
) subquery"""
return _get_count(sql)
|
{
"content_hash": "7386f15d068d05e4079bdeb0f0574f46",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 64,
"avg_line_length": 27.26829268292683,
"alnum_prop": 0.6010733452593918,
"repo_name": "RickMohr/nyc-trees",
"id": "2c21b6c7c0a03b5007f545093e5b283234a6d159",
"size": "2260",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/nyc_trees/libs/sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "171372"
},
{
"name": "CartoCSS",
"bytes": "878"
},
{
"name": "HTML",
"bytes": "157969"
},
{
"name": "JavaScript",
"bytes": "286316"
},
{
"name": "Makefile",
"bytes": "1524"
},
{
"name": "PLpgSQL",
"bytes": "3210"
},
{
"name": "Python",
"bytes": "404021"
},
{
"name": "Shell",
"bytes": "23399"
}
],
"symlink_target": ""
}
|
from ricky import __appname__, __version__
from setuptools import setup
long_description = ""
with open('requirements.txt') as f:
install_requires = [l for l in f.read().splitlines()
if not l.startswith('#')]
setup(
name=__appname__,
version=__version__,
scripts=[],
packages=[
'ricky',
],
install_requires=install_requires,
author="Paul Tagliamonte",
author_email="tag@pault.ag",
long_description=long_description,
description='tool for rebuilding packages using the Debile infrastructure',
license="Expat",
url="http://deb.io/",
platforms=['any'],
entry_points={
'console_scripts': [
'ricky-forge-changes = ricky.cli:forge_changes',
'ricky-upload = ricky.cli:upload_package',
],
}
)
|
{
"content_hash": "a65a28be664760ac3e9045d10ecec190",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 25.78125,
"alnum_prop": 0.5987878787878788,
"repo_name": "paultag/ricky",
"id": "4331a38a6c80f5233daac53ab4ed51001b84ec6c",
"size": "825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11729"
},
{
"name": "Shell",
"bytes": "1572"
}
],
"symlink_target": ""
}
|
from MafiaBot.MafiaRole import MafiaRole
from MafiaBot.MafiaAction import MafiaAction
from MafiaBot.Items.Probe import Probe
class Alien(MafiaRole):
def GetRolePM(self):
return 'You are an Alien. You may secretly probe other players during the night. Once you have probed all other remaining living players, you win the game. You may use !probes to see a list of probed players.'
@staticmethod
def GetRoleName():
return 'Alien'
@staticmethod
def GetRoleDescription():
return 'Aliens probe other players at night. Once they have probed all other living players, they win the game.'
def HandleCommand(self, command, param, mb, player):
if command == 'probes':
# get all other probed players
probed = [str(pl.name) for pl in mb.players.values() if (pl.IsProbed() and not pl.IsDead() and pl is not player)]
return 'The probed players are: '+', '.join(probed)
elif self.requiredaction:
if command == 'visit':
if not self.limiteduses == 0:
target = mb.GetPlayer(param)
if target is not None:
if not target.IsDead():
if target is player:
return 'You cannot give a probe to yourself!'
else:
mb.actionlist.append(MafiaAction(MafiaAction.SENDITEM, player, target, True, {'item': 'probe'}))
self.requiredaction = False
player.UpdateActions()
ret = 'You probe '+str(target)+' tonight.'
return ret
return 'Cannot find player '+param
return None
def BeginNightPhase(self, mb, player):
self.requiredaction = True
return 'Alien: You may probe another player tonight. Use !visit <player> to probe that player.'
def CheckSpecialWinCondition(self, mb):
# get a list of all alive players that are not probed
unprobed = [str(player.name) for player in mb.players.values() if (not player.IsProbed() and not player.IsDead())]
if unprobed:
return False
else:
return True
def SpecialWin(self, winner, mb):
mb.Send(mb.mainchannel, 'The Alien wins this game! Congratulations to '+str(winner.name), max_messages=10)
mb.Send(mb.mainchannel, 'The roles this game were - '+mb.GetRoleList(), max_messages=10)
def StartGame(self, player, mafiabot):
# hand probe to self. Makes checking for victory a lot easier
player.items['probe1'] = Probe('probe1', 0)
|
{
"content_hash": "e366115a70489c46672a84469353fb5b",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 217,
"avg_line_length": 45.35,
"alnum_prop": 0.5927967658948916,
"repo_name": "LLCoolDave/MafiaBot",
"id": "87ae9ba494d8318328094631efd8fc849d1b96f3",
"size": "2721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MafiaBot/Roles/Alien.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158720"
}
],
"symlink_target": ""
}
|
from helper import *
def doTest():
res = doCheck([' ', 'check', '-p', realpath('./_test.css')])
res = res.find('[ERROR] 1. should add @author in the head of')
equal(res, 0, 'check by cmd line is ok')
res = doFix([' ', 'fix', '-p', realpath('./_test.css')])
expect = '''.test {
width: 100px;
}'''
equal(res, expect, 'fix by cmd line is ok')
res = doCompress([' ', 'compress', '-p', realpath('./_test.css')])
expect = '''.test{width:100px}'''
equal(res, expect, 'compress by cmd line is ok')
res = doCompress([' ', 'compress', '-p', realpath('./_test_browsers.css')])
expect = '''.test{width:100px}.test[prop]{width:100px}'''
equal(res, expect, 'compress by cmd line is ok')
res = doCompress([' ', 'compress', '-p', '--browsers=ie6', realpath('./_test_browsers.css')])
expect = '''.test{width:100px}'''
equal(res, expect, 'compress by cmd line ie6 is ok')
res = doCompress([' ', 'compress', '-p', '--browsers=ie7', realpath('./_test_browsers.css')])
expect = '''.test{width:100px}.test[prop]{width:100px}'''
equal(res, expect, 'compress by cmd line ie7 is ok')
|
{
"content_hash": "60e67d90f4422fe1ab0816b46d8afacc",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 97,
"avg_line_length": 39.3448275862069,
"alnum_prop": 0.5766871165644172,
"repo_name": "wangjeaf/CSSCheckStyle",
"id": "a6235fb5d449adbe95d73921d2cd16068eb06d18",
"size": "1141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/commandline/Try.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "373226"
},
{
"name": "Shell",
"bytes": "928"
},
{
"name": "VimL",
"bytes": "1871"
}
],
"symlink_target": ""
}
|
"""
This module creates a random graph of a specified size that has
characteristics of a simple social network. For the size given, that
many nodes will be created with random properties; a similar (slightly
smaller) number of relationships will also be created, the number
differing due to the use of CREATE UNIQUE and the chance of clashes.
Creation of nodes and relationships alternates.
The main aim of this module is to allow exploration of varying
transaction and process batch size to see its effect on overall load
time. Each run consists of a number of transactions of `tx_size`
statements, each of which will send a batch of statements to the
server for processing every `rq_size` statements. Therefore, for a
total size of 1_200_000 nodes, a transaction size of 20_000 and a
process interval of 1_000, 60 transactions will be committed, each
over 20 individual HTTP requests.
Once created, the data can be explored in the browser using a query
such as `MATCH (p:Person {user_id:1}) RETURN p`.
"""
from __future__ import division, print_function
import random
from time import time
from py2neo import Graph, GraphError
from py2neo.cypher import CreateNode
CONSONANTS = "bcdfghjklmnprstvwz"
VOWELS = "aeiou"
CREATE_UNIQUE_RELATIONSHIP = """\
MATCH (a:Person) WHERE a.user_id = {A}
MATCH (b:Person) WHERE b.user_id = {B}
CREATE UNIQUE (a)-[:FOLLOWS]->(b)
"""
def random_name_generator():
while True:
words = []
for n in range(2):
word = [random.choice(CONSONANTS).upper()]
for syllable in range(random.randint(1, 4)):
word.append(random.choice(VOWELS))
word.append(random.choice(CONSONANTS))
words.append("".join(word))
yield " ".join(words)
random_name = random_name_generator()
class CreatePerson(CreateNode):
def __init__(self, user_id):
CreateNode.__init__(self, "Person", user_id=user_id,
name=next(random_name), born=random.randint(1900, 1999))
class RandomGraphGenerator(object):
def __init__(self, graph):
self.graph = graph
try:
self.graph.schema.create_uniqueness_constraint("Person", "user_id")
except GraphError:
print("Finding highest user_id\r", end="", flush=True)
self.max_user_id = graph.cypher.execute_one("MATCH (p:Person) RETURN max(p.user_id)")
print("Highest user_id is %d" % self.max_user_id)
else:
self.max_user_id = 0
def create_nodes(self, count, process_every):
""" Create a number of nodes in a single Cypher transaction.
"""
tx = self.graph.cypher.begin()
for i in range(1, count + 1):
self.max_user_id += 1
tx.append(CreatePerson(self.max_user_id))
if i % process_every == 0:
if i < count:
tx.process()
print("Created %d nodes\r" % i, end="", flush=True)
tx.commit()
def create_unique_relationships(self, count, process_every):
""" Create a number of unique relationships in a single Cypher transaction.
"""
tx = self.graph.cypher.begin()
for i in range(1, count + 1):
start_user_id = random.randint(1, self.max_user_id)
end_user_id = start_user_id
while end_user_id == start_user_id:
end_user_id = random.randint(1, self.max_user_id)
parameters = {
"A": start_user_id,
"B": end_user_id,
}
tx.append(CREATE_UNIQUE_RELATIONSHIP, parameters)
if i % process_every == 0:
if i < count:
tx.process()
print("Created %d unique relationships\r" % i, end="", flush=True)
tx.commit()
def main():
total_size = 1200000 # divisible in lots of ways
tx_size = 6000 # commit frequency (balance between no of commits and commit size)
rq_size = 1000 # process frequency (balance between no of requests and request size)
graph = Graph()
print("Creating %d nodes and %d unique relationships in transactions of %d "
"and processing every %d" % (total_size, total_size, tx_size, rq_size))
generator = RandomGraphGenerator(graph)
t0 = time()
for i in range(total_size // tx_size):
# Create nodes
t1 = time()
generator.create_nodes(tx_size, rq_size)
t2 = time()
print("Created %d nodes in %f seconds" % (tx_size, t2 - t1))
# Create unique relationships
t3 = time()
generator.create_unique_relationships(tx_size, rq_size)
t4 = time()
print("Created %d unique relationships in %f seconds" % (tx_size, t4 - t3))
t5 = time()
print("Entire bulk import took %f seconds" % (t5 - t0))
if __name__ == "__main__":
main()
|
{
"content_hash": "441f7a239e53bdbc370a983fe4f1c500",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 97,
"avg_line_length": 35.955882352941174,
"alnum_prop": 0.6165644171779141,
"repo_name": "nicolewhite/py2neo",
"id": "e8d07c8a2d144cf732766c599c3aaa8d7b36a88f",
"size": "5522",
"binary": false,
"copies": "2",
"ref": "refs/heads/release/2.0.8",
"path": "examples/bulk_import.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3840"
},
{
"name": "Makefile",
"bytes": "6765"
},
{
"name": "Python",
"bytes": "879521"
},
{
"name": "Shell",
"bytes": "8124"
}
],
"symlink_target": ""
}
|
"""The arguments helper for a server configuration."""
from plaso.lib import errors
from plaso.cli.helpers import interface
class BaseServerConfigHelper(interface.ArgumentsHelper):
"""CLI arguments helper class for server configuration."""
NAME = u'server_config'
DESCRIPTION = u'Argument helper for a server configuration.'
_DEFAULT_SERVER = u'127.0.0.1'
_DEFAULT_PORT = 80
@classmethod
def AddArguments(cls, argument_group):
"""Add command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group: the argparse group (instance of argparse._ArgumentGroup or
or argparse.ArgumentParser).
"""
argument_group.add_argument(
u'--server', dest=u'server', type=unicode, action=u'store',
default=None, metavar=u'HOSTNAME', help=(
u'The hostname or server IP address of the server.'))
argument_group.add_argument(
u'--port', dest=u'port', type=int, action=u'store', default=None,
metavar=u'PORT', help=u'The port number of the server.')
@classmethod
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options: the parser option object (instance of argparse.Namespace).
output_module: an output module (instance of OutputModule).
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
if not hasattr(output_module, u'SetServerInformation'):
raise errors.BadConfigObject(u'Unable to set server information.')
server = getattr(options, u'server', None)
if not server:
server = cls._DEFAULT_SERVER
port = getattr(options, u'port', None)
if port and not isinstance(port, (int, long)):
raise errors.BadConfigOption(u'Invalid port value not an integer.')
if not port:
port = cls._DEFAULT_PORT
output_module.SetServerInformation(server, port)
|
{
"content_hash": "d1084b696f221b8d449a288ec85f2b12",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 35,
"alnum_prop": 0.6960187353629976,
"repo_name": "jorik041/plaso",
"id": "394df447dea81bdb971a7d9a39dd0e135079a563",
"size": "2159",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "plaso/cli/helpers/server_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1276"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Protocol Buffer",
"bytes": "13905"
},
{
"name": "Python",
"bytes": "3032632"
},
{
"name": "Shell",
"bytes": "45900"
}
],
"symlink_target": ""
}
|
import numpy as np
import SimpleITK as sitk
from ContinuousRegistration.Source.util import warp_image, warp_point_set, compose_displacement_fields
from ContinuousRegistration.Source.util import logging
def tre(superelastix, point_sets, deformation_field_file_names):
try:
point_set_fixed0_to_moving1 = warp_point_set(superelastix, point_sets[0], deformation_field_file_names[0])
point_set_fixed1_to_moving0 = warp_point_set(superelastix, point_sets[1], deformation_field_file_names[1])
except Exception:
logging.error('Failed to compute tre for image pair (%s, %s).' % deformation_field_file_names)
return (
{'1. TRE': np.NaN},
{'1. TRE': np.NaN}
)
return (
{'1. TRE': np.nanmean(np.sqrt(np.nansum((point_set_fixed0_to_moving1 - point_sets[1]) ** 2, -1)))},
{'1. TRE': np.nanmean(np.sqrt(np.nansum((point_set_fixed1_to_moving0 - point_sets[0]) ** 2, -1)))}
)
def hausdorff(superelastix, point_sets, deformation_field_file_names):
try:
point_set_fixed0_to_moving1 = warp_point_set(superelastix, point_sets[0], deformation_field_file_names[0])
point_set_fixed1_to_moving0 = warp_point_set(superelastix, point_sets[1], deformation_field_file_names[1])
except Exception:
logging.error('Failed to compute hausdorff for image pair (%s, %s).' % deformation_field_file_names)
return (
{'2. Hausdorff': np.NaN},
{'2. Hausdorff': np.NaN}
)
return (
{'2. Hausdorff': np.nanmax(np.sqrt(np.nansum((point_set_fixed0_to_moving1 - point_sets[1]) ** 2, -1)))},
{'2. Hausdorff': np.nanmax(np.sqrt(np.nansum((point_set_fixed1_to_moving0 - point_sets[0]) ** 2, -1)))}
)
def inverse_consistency(superelastix, displacement_field_file_names, mask_file_names):
try:
composed_0 = sitk.GetArrayFromImage(
sitk.ReadImage(
compose_displacement_fields(
superelastix, displacement_field_file_names[0], displacement_field_file_names[1])))
composed_1 = sitk.GetArrayFromImage(
sitk.ReadImage(
compose_displacement_fields(
superelastix, displacement_field_file_names[1], displacement_field_file_names[0])))
except Exception:
return (
{'3. InverseConsistency': np.NaN},
{'3. InverseConsistency': np.NaN}
)
mask_0 = sitk.GetArrayFromImage(sitk.ReadImage(mask_file_names[0])) > 0
norm_0 = np.linalg.norm(composed_0[mask_0], axis=-1).flatten()
mask_1 = sitk.GetArrayFromImage(sitk.ReadImage(mask_file_names[1])) > 0
norm_1 = np.linalg.norm(composed_1[mask_1], axis=-1).flatten()
return (
{'3. InverseConsistency': float(np.nanmean(norm_0))},
{'3. InverseConsistency': float(np.nanmean(norm_1))}
)
def label_overlap(superelastix, label_file_names, deformation_field_file_names):
labelOverlapMeasurer = sitk.LabelOverlapMeasuresImageFilter()
labelOverlapMeasurer.SetGlobalDefaultCoordinateTolerance(1.0)
label_image_0_to_1_file_name = warp_image(superelastix, label_file_names[0], deformation_field_file_names[1], 'dsc_label_0_to_1')
try:
label_image_1 = sitk.ReadImage(label_file_names[1])
label_image_0_to_1 = sitk.Cast(sitk.ReadImage(label_image_0_to_1_file_name), label_image_1.GetPixelID())
labelOverlapMeasurer.Execute(label_image_1, label_image_0_to_1)
dsc_0 = labelOverlapMeasurer.GetDiceCoefficient()
jaccard_0 = labelOverlapMeasurer.GetJaccardCoefficient()
except Exception as e:
logging.error('Failed to compute DSC for %s: %s' % (label_file_names[0], e))
dsc_0 = jaccard_0 = np.NaN
label_image_1_to_0_file_name = warp_image(superelastix, label_file_names[1], deformation_field_file_names[0], 'dsc_label_1_to_0')
try:
label_image_0 = sitk.ReadImage(label_file_names[0])
label_image_1_to_0 = sitk.Cast(sitk.ReadImage(label_image_1_to_0_file_name), label_image_0.GetPixelID())
labelOverlapMeasurer.Execute(label_image_0, label_image_1_to_0)
dsc_1 = labelOverlapMeasurer.GetDiceCoefficient()
jaccard_1 = labelOverlapMeasurer.GetJaccardCoefficient()
except Exception as e:
logging.error('Failed to compute DSC for %s' % label_file_names[1])
dsc_1 = jaccard_1 = np.NaN
return (
{'1. Dice Similarity Coefficient': dsc_0, '2. Jaccard Coefficient': jaccard_0},
{'1. Dice Similarity Coefficient': dsc_1, '2. Jaccard Coefficient': jaccard_1}
)
|
{
"content_hash": "992499aaeeccefac8da5e273e914c10f",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 133,
"avg_line_length": 46.292929292929294,
"alnum_prop": 0.6543748636264456,
"repo_name": "SuperElastix/SuperElastix",
"id": "2c4526d247fb656975aa742575fa31ed9ec06f14",
"size": "4583",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "ContinuousRegistration/Source/metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4146"
},
{
"name": "C",
"bytes": "6741"
},
{
"name": "C++",
"bytes": "1497868"
},
{
"name": "CMake",
"bytes": "116969"
},
{
"name": "Python",
"bytes": "131224"
},
{
"name": "Shell",
"bytes": "5603"
}
],
"symlink_target": ""
}
|
"""Platform to retrieve Jewish calendar information for Home Assistant."""
from __future__ import annotations
from datetime import date as Date, datetime
import logging
from typing import Any
from hdate import HDate
from hdate.zmanim import Zmanim
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.const import DEVICE_CLASS_TIMESTAMP, SUN_EVENT_SUNSET
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.sun import get_astral_event_date
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType, StateType
import homeassistant.util.dt as dt_util
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
DATA_SENSORS = (
SensorEntityDescription(
key="date",
name="Date",
icon="mdi:star-david",
),
SensorEntityDescription(
key="weekly_portion",
name="Parshat Hashavua",
icon="mdi:book-open-variant",
),
SensorEntityDescription(
key="holiday",
name="Holiday",
icon="mdi:calendar-star",
),
SensorEntityDescription(
key="omer_count",
name="Day of the Omer",
icon="mdi:counter",
),
SensorEntityDescription(
key="daf_yomi",
name="Daf Yomi",
icon="mdi:book-open-variant",
),
)
TIME_SENSORS = (
SensorEntityDescription(
key="first_light",
name="Alot Hashachar",
icon="mdi:weather-sunset-up",
),
SensorEntityDescription(
key="talit",
name="Talit and Tefillin",
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="gra_end_shma",
name='Latest time for Shma Gr"a',
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="mga_end_shma",
name='Latest time for Shma MG"A',
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="gra_end_tfila",
name='Latest time for Tefilla Gr"a',
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="mga_end_tfila",
name='Latest time for Tefilla MG"A',
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="big_mincha",
name="Mincha Gedola",
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="small_mincha",
name="Mincha Ketana",
icon="mdi:calendar-clock",
),
SensorEntityDescription(
key="plag_mincha",
name="Plag Hamincha",
icon="mdi:weather-sunset-down",
),
SensorEntityDescription(
key="sunset",
name="Shkia",
icon="mdi:weather-sunset",
),
SensorEntityDescription(
key="first_stars",
name="T'set Hakochavim",
icon="mdi:weather-night",
),
SensorEntityDescription(
key="upcoming_shabbat_candle_lighting",
name="Upcoming Shabbat Candle Lighting",
icon="mdi:candle",
),
SensorEntityDescription(
key="upcoming_shabbat_havdalah",
name="Upcoming Shabbat Havdalah",
icon="mdi:weather-night",
),
SensorEntityDescription(
key="upcoming_candle_lighting",
name="Upcoming Candle Lighting",
icon="mdi:candle",
),
SensorEntityDescription(
key="upcoming_havdalah",
name="Upcoming Havdalah",
icon="mdi:weather-night",
),
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Jewish calendar sensor platform."""
if discovery_info is None:
return
sensors = [
JewishCalendarSensor(hass.data[DOMAIN], description)
for description in DATA_SENSORS
]
sensors.extend(
JewishCalendarTimeSensor(hass.data[DOMAIN], description)
for description in TIME_SENSORS
)
async_add_entities(sensors)
class JewishCalendarSensor(SensorEntity):
"""Representation of an Jewish calendar sensor."""
def __init__(
self,
data: dict[str, str | bool | int | float],
description: SensorEntityDescription,
) -> None:
"""Initialize the Jewish calendar sensor."""
self.entity_description = description
self._attr_name = f"{data['name']} {description.name}"
self._attr_unique_id = f"{data['prefix']}_{description.key}"
self._location = data["location"]
self._hebrew = data["language"] == "hebrew"
self._candle_lighting_offset = data["candle_lighting_offset"]
self._havdalah_offset = data["havdalah_offset"]
self._diaspora = data["diaspora"]
self._state: datetime | None = None
self._holiday_attrs: dict[str, str] = {}
@property
def native_value(self) -> datetime | StateType:
"""Return the state of the sensor."""
return self._state
async def async_update(self) -> None:
"""Update the state of the sensor."""
now = dt_util.now()
_LOGGER.debug("Now: %s Location: %r", now, self._location)
today = now.date()
event_date = get_astral_event_date(self.hass, SUN_EVENT_SUNSET, today)
if event_date is None:
_LOGGER.error("Can't get sunset event date for %s", today)
return
sunset = dt_util.as_local(event_date)
_LOGGER.debug("Now: %s Sunset: %s", now, sunset)
daytime_date = HDate(today, diaspora=self._diaspora, hebrew=self._hebrew)
# The Jewish day starts after darkness (called "tzais") and finishes at
# sunset ("shkia"). The time in between is a gray area (aka "Bein
# Hashmashot" - literally: "in between the sun and the moon").
# For some sensors, it is more interesting to consider the date to be
# tomorrow based on sunset ("shkia"), for others based on "tzais".
# Hence the following variables.
after_tzais_date = after_shkia_date = daytime_date
today_times = self.make_zmanim(today)
if now > sunset:
after_shkia_date = daytime_date.next_day
if today_times.havdalah and now > today_times.havdalah:
after_tzais_date = daytime_date.next_day
self._state = self.get_state(daytime_date, after_shkia_date, after_tzais_date)
_LOGGER.debug("New value for %s: %s", self.entity_description.key, self._state)
def make_zmanim(self, date: Date) -> Zmanim:
"""Create a Zmanim object."""
return Zmanim(
date=date,
location=self._location,
candle_lighting_offset=self._candle_lighting_offset,
havdalah_offset=self._havdalah_offset,
hebrew=self._hebrew,
)
@property
def extra_state_attributes(self) -> dict[str, str]:
"""Return the state attributes."""
if self.entity_description.key != "holiday":
return {}
return self._holiday_attrs
def get_state(
self, daytime_date: HDate, after_shkia_date: HDate, after_tzais_date: HDate
) -> Any | None:
"""For a given type of sensor, return the state."""
# Terminology note: by convention in py-libhdate library, "upcoming"
# refers to "current" or "upcoming" dates.
if self.entity_description.key == "date":
return after_shkia_date.hebrew_date
if self.entity_description.key == "weekly_portion":
# Compute the weekly portion based on the upcoming shabbat.
return after_tzais_date.upcoming_shabbat.parasha
if self.entity_description.key == "holiday":
self._holiday_attrs = {
"id": after_shkia_date.holiday_name,
"type": after_shkia_date.holiday_type.name,
"type_id": after_shkia_date.holiday_type.value,
}
return after_shkia_date.holiday_description
if self.entity_description.key == "omer_count":
return after_shkia_date.omer_day
if self.entity_description.key == "daf_yomi":
return daytime_date.daf_yomi
return None
class JewishCalendarTimeSensor(JewishCalendarSensor):
"""Implement attrbutes for sensors returning times."""
_attr_device_class = DEVICE_CLASS_TIMESTAMP
@property
def native_value(self) -> datetime | None:
"""Return the state of the sensor."""
if self._state is None:
return None
return dt_util.as_utc(self._state)
def get_state(
self, daytime_date: HDate, after_shkia_date: HDate, after_tzais_date: HDate
) -> Any | None:
"""For a given type of sensor, return the state."""
if self.entity_description.key == "upcoming_shabbat_candle_lighting":
times = self.make_zmanim(
after_tzais_date.upcoming_shabbat.previous_day.gdate
)
return times.candle_lighting
if self.entity_description.key == "upcoming_candle_lighting":
times = self.make_zmanim(
after_tzais_date.upcoming_shabbat_or_yom_tov.first_day.previous_day.gdate
)
return times.candle_lighting
if self.entity_description.key == "upcoming_shabbat_havdalah":
times = self.make_zmanim(after_tzais_date.upcoming_shabbat.gdate)
return times.havdalah
if self.entity_description.key == "upcoming_havdalah":
times = self.make_zmanim(
after_tzais_date.upcoming_shabbat_or_yom_tov.last_day.gdate
)
return times.havdalah
times = self.make_zmanim(dt_util.now()).zmanim
return times[self.entity_description.key]
|
{
"content_hash": "9b1e52685eaa4955b06a675558b490ed",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 89,
"avg_line_length": 33.42662116040955,
"alnum_prop": 0.6185419644680417,
"repo_name": "jawilson/home-assistant",
"id": "6db8003661428dbf466c4f8cf7adf663318580f5",
"size": "9794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/jewish_calendar/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
from ..ant import Ant
from ..world import World, Edge, Node
from ..solver import Solver
import unittest
from unittest import mock
class SolverTest(unittest.TestCase):
def setUp(self):
self.nodes = [Node(name='a'), Node(name='b')]
self.world = mock.Mock(World)
self.world.nodes = self.nodes[:]
def test_random_ants_when_even_and_ant_count_less_than_node_count(self):
freq = 2
node_count = len(self.nodes)
ant_count = node_count // freq
solver = Solver(self.world, ant_count=ant_count)
ants = solver.random_ants(even=True)
ants_on = dict.fromkeys(self.nodes, 0)
for a in ants:
self.assertFalse(a.start is None,
"At least one ant was not placed on a node to start"
)
ants_on[a.start] += 1
self.assertFalse(any(c > 1 for c in ants_on.values()),
"More than one ant was placed on the same node to start"
)
def test_random_ants_when_even_and_ant_count_exceeds_node_count(self):
freq = 2
node_count = len(self.nodes)
ant_count = freq * node_count
solver = Solver(self.world, ant_count=ant_count)
ants = solver.random_ants(even=True)
ants_on = dict.fromkeys(self.nodes, 0)
for a in ants:
self.assertFalse(a.start is None,
"At least one ant was not placed on a node to start"
)
ants_on[a.start] += 1
self.assertTrue(all(c == freq for c in ants_on.values()),
"Ants were not placed on nodes to start evenly")
self.assertEqual(sum(ants_on.values()), ant_count,
"Not all ants were placed on a node to start")
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "589a7742df2ef2443e0f07d8ccdcce6b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 34.49090909090909,
"alnum_prop": 0.5482340537691092,
"repo_name": "fmcardoso/aco-scs",
"id": "99b178b351e909b203bb6523cdee376a921f0189",
"size": "1897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pantspath/test/test_solver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "199584"
}
],
"symlink_target": ""
}
|
"""Script to ensure a configuration file exists."""
import argparse
import logging
import os
from collections import OrderedDict
from glob import glob
from platform import system
from unittest.mock import patch
from typing import Dict, List, Sequence
import homeassistant.bootstrap as bootstrap
import homeassistant.config as config_util
import homeassistant.loader as loader
import homeassistant.util.yaml as yaml
from homeassistant.exceptions import HomeAssistantError
REQUIREMENTS = ('colorlog>2.1,<3',)
if system() == 'Windows': # Ensure colorama installed for colorlog on Windows
REQUIREMENTS += ('colorama<=1',)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=protected-access
MOCKS = {
'load': ("homeassistant.util.yaml.load_yaml", yaml.load_yaml),
'load*': ("homeassistant.config.load_yaml", yaml.load_yaml),
'get': ("homeassistant.loader.get_component", loader.get_component),
'secrets': ("homeassistant.util.yaml._secret_yaml", yaml._secret_yaml),
'except': ("homeassistant.bootstrap.async_log_exception",
bootstrap.async_log_exception),
'package_error': ("homeassistant.config._log_pkg_error",
config_util._log_pkg_error),
}
SILENCE = (
'homeassistant.bootstrap.clear_secret_cache',
'homeassistant.bootstrap.async_register_signal_handling',
'homeassistant.core._LOGGER.info',
'homeassistant.loader._LOGGER.info',
'homeassistant.bootstrap._LOGGER.info',
'homeassistant.bootstrap._LOGGER.warning',
'homeassistant.util.yaml._LOGGER.debug',
)
PATCHES = {}
C_HEAD = 'bold'
ERROR_STR = 'General Errors'
def color(the_color, *args, reset=None):
"""Color helper."""
from colorlog.escape_codes import escape_codes, parse_colors
try:
if len(args) == 0:
assert reset is None, "You cannot reset if nothing being printed"
return parse_colors(the_color)
return parse_colors(the_color) + ' '.join(args) + \
escape_codes[reset or 'reset']
except KeyError as k:
raise ValueError("Invalid color {} in {}".format(str(k), the_color))
def run(script_args: List) -> int:
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description=("Check Home Assistant configuration."))
parser.add_argument(
'--script', choices=['check_config'])
parser.add_argument(
'-c', '--config',
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration")
parser.add_argument(
'-i', '--info',
default=None,
help="Show a portion of the config")
parser.add_argument(
'-f', '--files',
action='store_true',
help="Show used configuration files")
parser.add_argument(
'-s', '--secrets',
action='store_true',
help="Show secret information")
args = parser.parse_args()
config_dir = os.path.join(os.getcwd(), args.config)
config_path = os.path.join(config_dir, 'configuration.yaml')
if not os.path.isfile(config_path):
print('Config does not exist:', config_path)
return 1
print(color('bold', "Testing configuration at", config_dir))
domain_info = []
if args.info:
domain_info = args.info.split(',')
res = check(config_path)
if args.files:
print(color(C_HEAD, 'yaml files'), '(used /',
color('red', 'not used') + ')')
# Python 3.5 gets a recursive, but not in 3.4
for yfn in sorted(glob(os.path.join(config_dir, '*.yaml')) +
glob(os.path.join(config_dir, '*/*.yaml'))):
the_color = '' if yfn in res['yaml_files'] else 'red'
print(color(the_color, '-', yfn))
if len(res['except']) > 0:
print(color('bold_white', 'Failed config'))
for domain, config in res['except'].items():
domain_info.append(domain)
print(' ', color('bold_red', domain + ':'),
color('red', '', reset='red'))
dump_dict(config, reset='red')
print(color('reset'))
if domain_info:
if 'all' in domain_info:
print(color('bold_white', 'Successful config (all)'))
for domain, config in res['components'].items():
print(' ', color(C_HEAD, domain + ':'))
dump_dict(config)
else:
print(color('bold_white', 'Successful config (partial)'))
for domain in domain_info:
if domain == ERROR_STR:
continue
print(' ', color(C_HEAD, domain + ':'))
dump_dict(res['components'].get(domain, None))
if args.secrets:
flatsecret = {}
for sfn, sdict in res['secret_cache'].items():
sss = []
for skey, sval in sdict.items():
if skey in flatsecret:
_LOGGER.error('Duplicated secrets in files %s and %s',
flatsecret[skey], sfn)
flatsecret[skey] = sfn
sss.append(color('green', skey) if skey in res['secrets']
else skey)
print(color(C_HEAD, 'Secrets from', sfn + ':'), ', '.join(sss))
print(color(C_HEAD, 'Used Secrets:'))
for skey, sval in res['secrets'].items():
print(' -', skey + ':', sval, color('cyan', '[from:', flatsecret
.get(skey, 'keyring') + ']'))
return len(res['except'])
def check(config_path):
"""Perform a check by mocking hass load functions."""
res = {
'yaml_files': OrderedDict(), # yaml_files loaded
'secrets': OrderedDict(), # secret cache and secrets loaded
'except': OrderedDict(), # exceptions raised (with config)
'components': OrderedDict(), # successful components
'secret_cache': OrderedDict(),
}
# pylint: disable=unused-variable
def mock_load(filename):
"""Mock hass.util.load_yaml to save config files."""
res['yaml_files'][filename] = True
return MOCKS['load'][1](filename)
# pylint: disable=unused-variable
def mock_get(comp_name):
"""Mock hass.loader.get_component to replace setup & setup_platform."""
def mock_setup(*kwargs):
"""Mock setup, only record the component name & config."""
assert comp_name not in res['components'], \
"Components should contain a list of platforms"
res['components'][comp_name] = kwargs[1].get(comp_name)
return True
module = MOCKS['get'][1](comp_name)
if module is None:
# Ensure list
res['except'][ERROR_STR] = res['except'].get(ERROR_STR, [])
res['except'][ERROR_STR].append('{} not found: {}'.format(
'Platform' if '.' in comp_name else 'Component', comp_name))
return None
# Test if platform/component and overwrite setup
if '.' in comp_name:
module.setup_platform = mock_setup
if hasattr(module, 'async_setup_platform'):
del module.async_setup_platform
else:
module.setup = mock_setup
if hasattr(module, 'async_setup'):
del module.async_setup
return module
# pylint: disable=unused-variable
def mock_secrets(ldr, node):
"""Mock _get_secrets."""
try:
val = MOCKS['secrets'][1](ldr, node)
except HomeAssistantError:
val = None
res['secrets'][node.value] = val
return val
def mock_except(ex, domain, config, # pylint: disable=unused-variable
hass=None):
"""Mock bootstrap.log_exception."""
MOCKS['except'][1](ex, domain, config, hass)
res['except'][domain] = config.get(domain, config)
def mock_package_error( # pylint: disable=unused-variable
package, component, config, message):
"""Mock config_util._log_pkg_error."""
MOCKS['package_error'][1](package, component, config, message)
pkg_key = 'homeassistant.packages.{}'.format(package)
res['except'][pkg_key] = config.get('homeassistant', {}) \
.get('packages', {}).get(package)
# Patches to skip functions
for sil in SILENCE:
PATCHES[sil] = patch(sil)
# Patches with local mock functions
for key, val in MOCKS.items():
# The * in the key is removed to find the mock_function (side_effect)
# This allows us to use one side_effect to patch multiple locations
mock_function = locals()['mock_' + key.replace('*', '')]
PATCHES[key] = patch(val[0], side_effect=mock_function)
# Start all patches
for pat in PATCHES.values():
pat.start()
# Ensure !secrets point to the patched function
yaml.yaml.SafeLoader.add_constructor('!secret', yaml._secret_yaml)
try:
with patch('homeassistant.util.logging.AsyncHandler._process'):
bootstrap.from_config_file(config_path, skip_pip=True)
res['secret_cache'] = dict(yaml.__SECRET_CACHE)
except Exception as err: # pylint: disable=broad-except
print(color('red', 'Fatal error while loading config:'), str(err))
res['except'].setdefault(ERROR_STR, []).append(err)
finally:
# Stop all patches
for pat in PATCHES.values():
pat.stop()
# Ensure !secrets point to the original function
yaml.yaml.SafeLoader.add_constructor('!secret', yaml._secret_yaml)
bootstrap.clear_secret_cache()
return res
def line_info(obj, **kwargs):
"""Display line config source."""
if hasattr(obj, '__config_file__'):
return color('cyan', "[source {}:{}]"
.format(obj.__config_file__, obj.__line__ or '?'),
**kwargs)
return '?'
def dump_dict(layer, indent_count=3, listi=False, **kwargs):
"""Display a dict.
A friendly version of print yaml.yaml.dump(config).
"""
def sort_dict_key(val):
"""Return the dict key for sorting."""
key = str.lower(val[0])
return '0' if key == 'platform' else key
indent_str = indent_count * ' '
if listi or isinstance(layer, list):
indent_str = indent_str[:-1] + '-'
if isinstance(layer, Dict):
for key, value in sorted(layer.items(), key=sort_dict_key):
if isinstance(value, dict) or isinstance(value, list):
print(indent_str, key + ':', line_info(value, **kwargs))
dump_dict(value, indent_count + 2)
else:
print(indent_str, key + ':', value)
indent_str = indent_count * ' '
if isinstance(layer, Sequence):
for i in layer:
if isinstance(i, dict):
dump_dict(i, indent_count + 2, True)
else:
print(' ', indent_str, i)
|
{
"content_hash": "70774c9ace2f79892056c3135519bb2c",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 79,
"avg_line_length": 37.02693602693603,
"alnum_prop": 0.5819769027916705,
"repo_name": "Zac-HD/home-assistant",
"id": "154754c667a7722a7174c02ed2a3f8e6c5eab401",
"size": "10997",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/scripts/check_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1550595"
},
{
"name": "Python",
"bytes": "5315115"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14220"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from djblets.util.fields import JSONField
class ChangeDescription(models.Model):
"""
The recorded set of changes, containing optional description text
and fields that have changed.
This is a general model that can be used in applications for recording
changes how they see fit. A helper function, 'record_field_changed',
can be used to record information in a standard way for most value types,
but the 'fields_changed' dictionary can be manipulated however the caller
chooses.
A ChangeDescription is not bound to a particular model. It is up to models
to establish relationships with a ChangeDescription.
Each field in 'fields_changed' represents a changed field.
For string fields, the following fields will be available:
* 'old': The old value of the field
* 'new': The new value of the field
For list and set fields, the following fields will be available:
* 'removed': The fields that were removed, if any.
* 'added': The fields that were added, if any.
"""
timestamp = models.DateTimeField(_('timestamp'), default=timezone.now)
public = models.BooleanField(_("public"), default=False)
text = models.TextField(_("change text"), blank=True)
fields_changed = JSONField(_("fields changed"))
def record_field_change(self, field, old_value, new_value,
name_field=None):
"""
Records a field change.
This will encode field changes following the rules in the overlying
'ChangeDescription' documentation.
'name_field' can be specified for lists or other iterables. When
specified, each list item will be a tuple in the form of
(object_name, object_url, object_id). Otherwise, it will be a
tuple in the form of (item,).
It is generally expected that fields with lists of model objects will
have 'name_field' set, whereas lists of numbers or some other
value type will not. Specifying a 'name_field' for non-objects will
cause an AttributeError.
"""
def serialize_changed_obj_list(items, name_field):
if name_field:
return [(getattr(item, name_field),
item.get_absolute_url(),
item.id)
for item in list(items)]
else:
return [(item,) for item in list(items)]
if (type(old_value) != type(new_value) and
not (isinstance(old_value, basestring) and
isinstance(new_value, basestring))):
raise ValueError("%s (%s) and %s (%s) are of two different value "
"types." % (old_value, type(old_value),
new_value, type(new_value)))
if hasattr(old_value, "__iter__"):
old_set = set(old_value)
new_set = set(new_value)
self.fields_changed[field] = {
'old': serialize_changed_obj_list(old_value, name_field),
'new': serialize_changed_obj_list(new_value, name_field),
'added': serialize_changed_obj_list(new_set - old_set,
name_field),
'removed': serialize_changed_obj_list(old_set - new_set,
name_field),
}
else:
self.fields_changed[field] = {
'old': (old_value,),
'new': (new_value,),
}
def __unicode__(self):
return self.text
class Meta:
ordering = ['-timestamp']
get_latest_by = "timestamp"
|
{
"content_hash": "e57f0c156e9a0862aed947208e7575e9",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 78,
"avg_line_length": 40.23157894736842,
"alnum_prop": 0.587388801674516,
"repo_name": "atagar/ReviewBoard",
"id": "abb1074d104506a883e1f0a255e2a231841038f4",
"size": "3822",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reviewboard/changedescs/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "685"
},
{
"name": "C#",
"bytes": "340"
},
{
"name": "Java",
"bytes": "340"
},
{
"name": "JavaScript",
"bytes": "313642"
},
{
"name": "Objective-C",
"bytes": "288"
},
{
"name": "PHP",
"bytes": "225"
},
{
"name": "Perl",
"bytes": "103"
},
{
"name": "Python",
"bytes": "1736555"
},
{
"name": "Ruby",
"bytes": "172"
},
{
"name": "Shell",
"bytes": "829"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from distutils.version import StrictVersion
from django.db.models import F, Q, Exists, OuterRef
from api.base.exceptions import InvalidFilterOperator, InvalidFilterValue
from api.base.filters import ListFilterMixin
from api.base import utils
from osf.models import AbstractNode, NodeRelation, Node, Preprint
from osf.utils import permissions
class NodesFilterMixin(ListFilterMixin):
def param_queryset(self, query_params, default_queryset):
filters = self.parse_query_params(query_params)
auth_user = utils.get_user_auth(self.request)
if 'filter[preprint]' in query_params:
query = Preprint.objects.preprint_permissions_query(user=auth_user.user)
subquery = Preprint.objects.filter(query & Q(deleted__isnull=True) & Q(node=OuterRef('pk')))
queryset = default_queryset.annotate(preprints_exist=Exists(subquery))
else:
queryset = default_queryset
if filters:
for key, field_names in filters.items():
for field_name, operation in field_names.items():
if field_name == 'tags':
queryset = queryset.distinct('id')
# filter[parent]=null
if field_name == 'parent' and operation['op'] == 'eq' and not operation['value']:
queryset = queryset.get_roots()
query_params = deepcopy(query_params)
query_params.pop(key)
return super(NodesFilterMixin, self).param_queryset(query_params, queryset)
def build_query_from_field(self, field_name, operation):
if field_name == 'parent':
if operation['op'] == 'eq':
if operation['value']:
# filter[parent]=<nid>
parent = utils.get_object_or_error(AbstractNode, operation['value'], self.request, display_name='parent')
node_ids = NodeRelation.objects.filter(parent=parent, is_node_link=False).values_list('child_id', flat=True)
return Q(id__in=node_ids)
elif operation['op'] == 'ne':
if not operation['value']:
# filter[parent][ne]=null
return ~Q(root_id=F('id'))
# TODO: support this case in the future:
# else filter[parent][ne]=<nid>
raise InvalidFilterValue(detail='Only "null" is accepted as valid input to "filter[parent][ne]"')
else:
# filter[parent][gte]=''
raise InvalidFilterOperator(value=operation['op'], valid_operators=['eq', 'ne'])
if field_name == 'root':
if None in operation['value']:
raise InvalidFilterValue(value=operation['value'])
with_as_root_query = Q(root__guids___id__in=operation['value'])
return ~with_as_root_query if operation['op'] == 'ne' else with_as_root_query
if field_name == 'preprint':
preprint_query = (
Q(preprints_exist=True)
)
return preprint_query if utils.is_truthy(operation['value']) else ~preprint_query
return super(NodesFilterMixin, self).build_query_from_field(field_name, operation)
class UserNodesFilterMixin(NodesFilterMixin):
def build_query_from_field(self, field_name, operation):
if field_name == 'current_user_permissions':
if operation['value'] not in permissions.API_CONTRIBUTOR_PERMISSIONS:
raise InvalidFilterValue(value=operation['value'])
perm = operation['value']
# Filtering UserNodes on the requesting user's permissions to those nodes.
user = self.request.user
if user.is_anonymous:
# Anonymous users have no perms to the current node in current versions, and in
# older versions, will have read if node is public
return Q() if StrictVersion(self.request.version) < StrictVersion('2.11') and perm == permissions.READ else Q(id__in=[])
elif perm == permissions.READ:
return Q(id__in=self.build_node_list(user, permissions.READ_NODE))
elif perm == permissions.WRITE:
return Q(id__in=self.build_node_list(user, permissions.WRITE_NODE))
elif perm == permissions.ADMIN:
return Q(id__in=self.build_node_list(user, permissions.ADMIN_NODE))
return super(UserNodesFilterMixin, self).build_query_from_field(field_name, operation)
def build_node_list(self, user, perm, with_superuser=False):
return Node.objects.get_nodes_for_user(user, permission=perm).values_list('id', flat=True)
|
{
"content_hash": "0bd77bcbbad221bf9d141e1b64e04589",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 136,
"avg_line_length": 50.41489361702128,
"alnum_prop": 0.6100443131462334,
"repo_name": "CenterForOpenScience/osf.io",
"id": "e34733df1096ad334e661ae153e2cf862e759847",
"size": "4739",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api/nodes/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373895"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11640855"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
import contextlib
import errno
import logging
import os
import stat
from plumbum.commands.base import shquote
from plumbum.commands.processes import ProcessLineTimedOut, iter_lines
from plumbum.machines.base import PopenAddons
from plumbum.machines.remote import BaseRemoteMachine
from plumbum.machines.session import ShellSession
from plumbum.path.local import LocalPath
from plumbum.path.remote import RemotePath, StatRes
try:
# Sigh... we need to gracefully-import paramiko for Sphinx builds, etc
import paramiko
except ImportError:
class paramiko: # type: ignore[no-redef]
def __bool__(self):
return False
def __getattr__(self, name):
raise ImportError("No module named paramiko")
paramiko = paramiko() # type: ignore[operator]
logger = logging.getLogger("plumbum.paramiko")
class ParamikoPopen(PopenAddons):
def __init__(
self,
argv,
stdin,
stdout,
stderr,
encoding,
stdin_file=None,
stdout_file=None,
stderr_file=None,
):
self.argv = argv
self.channel = stdout.channel
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.custom_encoding = encoding
self.returncode = None
self.pid = None
self.stdin_file = stdin_file
self.stdout_file = stdout_file
self.stderr_file = stderr_file
def poll(self):
if self.returncode is None:
if self.channel.exit_status_ready():
return self.wait()
return self.returncode
def wait(self):
if self.returncode is None:
self.channel.recv_exit_status()
self.returncode = self.channel.exit_status
self.close()
return self.returncode
def close(self):
self.channel.shutdown_read()
self.channel.shutdown_write()
self.channel.close()
@staticmethod
def kill():
# possible way to obtain pid:
# "(cmd ; echo $?) & echo ?!"
# and then client.exec_command("kill -9 %s" % (pid,))
raise OSError("Cannot kill remote processes, we don't have their PIDs")
terminate = kill
def send_signal(self, sig):
raise NotImplementedError()
def communicate(self):
stdout = []
stderr = []
infile = self.stdin_file
sources = [
("1", stdout, self.stdout, self.stdout_file),
("2", stderr, self.stderr, self.stderr_file),
]
i = 0
while sources:
if infile:
try:
line = infile.readline()
except (ValueError, OSError):
line = None
logger.debug("communicate: %r", line)
if not line:
infile.close()
infile = None
self.stdin.close()
else:
self.stdin.write(line)
self.stdin.flush()
i = (i + 1) % len(sources)
_name, coll, pipe, outfile = sources[i]
line = pipe.readline()
# logger.debug("%s> %r", name, line)
if not line:
del sources[i]
elif outfile:
outfile.write(line)
outfile.flush()
else:
coll.append(line)
self.wait()
stdout = "".join(s for s in stdout).encode(self.custom_encoding)
stderr = "".join(s for s in stderr).encode(self.custom_encoding)
return stdout, stderr
def iter_lines(self, timeout=None, **kwargs):
if timeout is not None:
raise NotImplementedError(
"The 'timeout' parameter is not supported with ParamikoMachine"
)
return iter_lines(self, _iter_lines=_iter_lines, **kwargs)
__iter__ = iter_lines
class ParamikoMachine(BaseRemoteMachine):
"""
An implementation of :class:`remote machine <plumbum.machines.remote.BaseRemoteMachine>`
over Paramiko (a Python implementation of openSSH2 client/server). Invoking a remote command
translates to invoking it over SSH ::
with ParamikoMachine("yourhostname") as rem:
r_ls = rem["ls"]
# r_ls is the remote `ls`
# executing r_ls() is equivalent to `ssh yourhostname ls`, only without
# spawning a new ssh client
:param host: the host name to connect to (SSH server)
:param user: the user to connect as (if ``None``, the default will be used)
:param port: the server's port (if ``None``, the default will be used)
:param password: the user's password (if a password-based authentication is to be performed)
(if ``None``, key-based authentication will be used)
:param keyfile: the path to the identity file (if ``None``, the default will be used)
:param load_system_host_keys: whether or not to load the system's host keys (from ``/etc/ssh``
and ``~/.ssh``). The default is ``True``, which means Paramiko
behaves much like the ``ssh`` command-line client
:param missing_host_policy: the value passed to the underlying ``set_missing_host_key_policy``
of the client. The default is ``None``, which means
``set_missing_host_key_policy`` is not invoked and paramiko's
default behavior (reject) is employed
:param encoding: the remote machine's encoding (defaults to UTF8)
:param look_for_keys: set to False to disable searching for discoverable
private key files in ``~/.ssh``
:param connect_timeout: timeout for TCP connection
.. note:: If Paramiko 1.15 or above is installed, can use GSS_API authentication
:param bool gss_auth: ``True`` if you want to use GSS-API authentication
:param bool gss_kex: Perform GSS-API Key Exchange and user authentication
:param bool gss_deleg_creds: Delegate GSS-API client credentials or not
:param str gss_host: The targets name in the kerberos database. default: hostname
:param bool get_pty: Execute remote commands with allocated pseudo-tty. default: False
:param bool load_system_ssh_config: read system SSH config for ProxyCommand configuration. default: False
"""
class RemoteCommand(BaseRemoteMachine.RemoteCommand): # type: ignore[valid-type, misc]
def __or__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __gt__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __rshift__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __ge__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __lt__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __lshift__(self, *_):
raise NotImplementedError("Not supported with ParamikoMachine")
def __init__(
self,
host,
user=None,
port=None,
password=None,
keyfile=None,
load_system_host_keys=True,
missing_host_policy=None,
encoding="utf8",
look_for_keys=None,
connect_timeout=None,
keep_alive=0,
gss_auth=False,
gss_kex=None,
gss_deleg_creds=None,
gss_host=None,
get_pty=False,
load_system_ssh_config=False,
):
self.host = host
kwargs = {}
if user:
self._fqhost = f"{user}@{host}"
kwargs["username"] = user
else:
self._fqhost = host
self._client = paramiko.SSHClient()
if load_system_host_keys:
self._client.load_system_host_keys()
if port is not None:
kwargs["port"] = port
if keyfile is not None:
kwargs["key_filename"] = keyfile
if password is not None:
kwargs["password"] = password
if missing_host_policy is not None:
self._client.set_missing_host_key_policy(missing_host_policy)
if look_for_keys is not None:
kwargs["look_for_keys"] = look_for_keys
if connect_timeout is not None:
kwargs["timeout"] = connect_timeout
if gss_auth:
kwargs["gss_auth"] = gss_auth
kwargs["gss_kex"] = gss_kex
kwargs["gss_deleg_creds"] = gss_deleg_creds
if not gss_host:
gss_host = host
kwargs["gss_host"] = gss_host
if load_system_ssh_config:
ssh_config = paramiko.SSHConfig()
with open(os.path.expanduser("~/.ssh/config"), encoding="utf-8") as f:
ssh_config.parse(f)
with contextlib.suppress(KeyError):
hostConfig = ssh_config.lookup(host)
kwargs["sock"] = paramiko.ProxyCommand(hostConfig["proxycommand"])
self._client.connect(host, **kwargs)
self._keep_alive = keep_alive
self._sftp = None
self._get_pty = get_pty
BaseRemoteMachine.__init__(self, encoding, connect_timeout)
def __str__(self):
return f"paramiko://{self._fqhost}"
def close(self):
BaseRemoteMachine.close(self)
self._client.close()
@property
def sftp(self):
"""
Returns an SFTP client on top of the current SSH connection; it can be used to manipulate
files directly, much like an interactive FTP/SFTP session
"""
if not self._sftp:
self._sftp = self._client.open_sftp()
return self._sftp
def session(
self, isatty=False, term="vt100", width=80, height=24, *, new_session=False
):
# new_session is ignored for ParamikoMachine
trans = self._client.get_transport()
trans.set_keepalive(self._keep_alive)
chan = trans.open_session()
if isatty:
chan.get_pty(term, width, height)
chan.set_combine_stderr(True)
chan.invoke_shell()
stdin = chan.makefile("wb", -1)
stdout = chan.makefile("rb", -1)
stderr = chan.makefile_stderr("rb", -1)
proc = ParamikoPopen(["<shell>"], stdin, stdout, stderr, self.custom_encoding)
return ShellSession(proc, self.custom_encoding, isatty)
def popen(
self,
args,
stdin=None,
stdout=None,
stderr=None,
new_session=False, # pylint: disable=unused-argument
env=None,
cwd=None,
):
# new_session is ignored for ParamikoMachine
argv = []
envdelta = self.env.getdelta()
if env:
envdelta.update(env)
argv.extend(["cd", str(cwd or self.cwd), "&&"])
if envdelta:
argv.append("env")
argv.extend(f"{k}={shquote(v)}" for k, v in envdelta.items())
argv.extend(args.formulate())
cmdline = " ".join(argv)
logger.debug(cmdline)
si, so, se = self._client.exec_command(cmdline, 1, get_pty=self._get_pty)
return ParamikoPopen(
argv,
si,
so,
se,
self.custom_encoding,
stdin_file=stdin,
stdout_file=stdout,
stderr_file=stderr,
)
def download(self, src, dst):
if isinstance(src, LocalPath):
raise TypeError(f"src of download cannot be {src!r}")
if isinstance(src, RemotePath) and src.remote != self:
raise TypeError(f"src {src!r} points to a different remote machine")
if isinstance(dst, RemotePath):
raise TypeError(f"dst of download cannot be {dst!r}")
return self._download(
src if isinstance(src, RemotePath) else self.path(src),
dst if isinstance(dst, LocalPath) else LocalPath(dst),
)
def _download(self, src, dst):
if src.is_dir():
if not dst.exists():
self.sftp.mkdir(str(dst))
for fn in src:
self._download(fn, dst / fn.name)
elif dst.is_dir():
self.sftp.get(str(src), str(dst / src.name))
else:
self.sftp.get(str(src), str(dst))
def upload(self, src, dst):
if isinstance(src, RemotePath):
raise TypeError(f"src of upload cannot be {src!r}")
if isinstance(dst, LocalPath):
raise TypeError(f"dst of upload cannot be {dst!r}")
if isinstance(dst, RemotePath) and dst.remote != self:
raise TypeError(f"dst {dst!r} points to a different remote machine")
return self._upload(
src if isinstance(src, LocalPath) else LocalPath(src),
dst if isinstance(dst, RemotePath) else self.path(dst),
)
def _upload(self, src, dst):
if src.is_dir():
if not dst.exists():
self.sftp.mkdir(str(dst))
for fn in src:
self._upload(fn, dst / fn.name)
elif dst.is_dir():
self.sftp.put(str(src), str(dst / src.name))
else:
self.sftp.put(str(src), str(dst))
def connect_sock(self, dport, dhost="localhost", ipv6=False):
"""Returns a Paramiko ``Channel``, connected to dhost:dport on the remote machine.
The ``Channel`` behaves like a regular socket; you can ``send`` and ``recv`` on it
and the data will pass encrypted over SSH. Usage::
mach = ParamikoMachine("myhost")
sock = mach.connect_sock(12345)
data = sock.recv(100)
sock.send("foobar")
sock.close()
"""
if ipv6 and dhost == "localhost":
dhost = "::1"
srcaddr = ("::1", 0, 0, 0) if ipv6 else ("127.0.0.1", 0)
trans = self._client.get_transport()
trans.set_keepalive(self._keep_alive)
chan = trans.open_channel("direct-tcpip", (dhost, dport), srcaddr)
return SocketCompatibleChannel(chan)
#
# Path implementation
#
def _path_listdir(self, fn):
return self.sftp.listdir(str(fn))
def _path_read(self, fn):
f = self.sftp.open(str(fn), "rb")
data = f.read()
f.close()
return data
def _path_write(self, fn, data):
if self.custom_encoding and isinstance(data, str):
data = data.encode(self.custom_encoding)
f = self.sftp.open(str(fn), "wb")
f.write(data)
f.close()
def _path_stat(self, fn):
try:
st = self.sftp.stat(str(fn))
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
res = StatRes(
(
st.st_mode,
0,
0,
0,
st.st_uid,
st.st_gid,
st.st_size,
st.st_atime,
st.st_mtime,
0,
)
)
if stat.S_ISDIR(st.st_mode):
res.text_mode = "directory"
if stat.S_ISREG(st.st_mode):
res.text_mode = "regular file"
return res
def daemonic_popen(self, command, cwd="/", stdout=None, stderr=None, append=True):
raise NotImplementedError("This is not implemented on ParamikoMachine!")
###################################################################################################
# Make paramiko.Channel adhere to the socket protocol, namely, send and recv should fail
# when the socket has been closed
###################################################################################################
class SocketCompatibleChannel:
def __init__(self, chan):
self._chan = chan
def __getattr__(self, name):
return getattr(self._chan, name)
def send(self, s):
if self._chan.closed:
raise OSError(errno.EBADF, "Bad file descriptor")
return self._chan.send(s)
def recv(self, count):
if self._chan.closed:
raise OSError(errno.EBADF, "Bad file descriptor")
return self._chan.recv(count)
###################################################################################################
# Custom iter_lines for paramiko.Channel
###################################################################################################
def _iter_lines(
proc,
decode, # pylint: disable=unused-argument
linesize,
line_timeout=None,
):
from selectors import EVENT_READ, DefaultSelector
# Python 3.4+ implementation
def selector():
sel = DefaultSelector()
sel.register(proc.stdout.channel, EVENT_READ)
while True:
ready = sel.select(line_timeout)
if not ready and line_timeout:
raise ProcessLineTimedOut(
"popen line timeout expired",
getattr(proc, "argv", None),
getattr(proc, "machine", None),
)
for _key, _mask in ready:
yield
for _ in selector():
if proc.stdout.channel.recv_ready():
yield 0, proc.stdout.readline(linesize)
if proc.stdout.channel.recv_stderr_ready():
yield 1, proc.stderr.readline(linesize)
if proc.poll() is not None:
break
for line in proc.stdout:
yield 0, line
for line in proc.stderr:
yield 1, line
|
{
"content_hash": "98e44d3ed8e9249bc55cf67f22bcbd51",
"timestamp": "",
"source": "github",
"line_count": 515,
"max_line_length": 109,
"avg_line_length": 34.19223300970874,
"alnum_prop": 0.5530694531205633,
"repo_name": "tomerfiliba/plumbum",
"id": "6b16d17c36ef6a534a66b71631946c7cc2ee13c2",
"size": "17609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plumbum/machines/paramiko_machine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "456351"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
class StdTest(integration.ModuleCase):
'''
Test standard client calls
'''
def test_cli(self):
'''
Test cli function
'''
cmd_iter = self.client.cmd_cli(
'minion',
'test.kwarg',
['foo=bar', 'baz=quo'],
kwarg={'qux': 'quux'}
)
for ret in cmd_iter:
data = ret['minion']['ret']
self.assertIn('foo', data)
self.assertIn('baz', data)
self.assertIn('qux', data)
self.assertEqual(data['foo'], 'bar')
self.assertEqual(data['baz'], 'quo')
self.assertEqual(data['qux'], 'quux')
def test_iter(self):
'''
test cmd_iter
'''
cmd_iter = self.client.cmd_iter(
'minion',
'test.kwarg',
['foo=bar', 'baz=quo'],
kwarg={'qux': 'quux'}
)
for ret in cmd_iter:
data = ret['minion']['ret']
self.assertIn('foo', data)
self.assertIn('baz', data)
self.assertIn('qux', data)
self.assertEqual(data['foo'], 'bar')
self.assertEqual(data['baz'], 'quo')
self.assertEqual(data['qux'], 'quux')
def test_iter_no_block(self):
'''
test cmd_iter_no_block
'''
cmd_iter = self.client.cmd_iter_no_block(
'minion',
'test.kwarg',
['foo=bar', 'baz=quo'],
kwarg={'qux': 'quux'}
)
for ret in cmd_iter:
if ret is None:
continue
data = ret['minion']['ret']
self.assertIn('foo', data)
self.assertIn('baz', data)
self.assertIn('qux', data)
self.assertEqual(data['foo'], 'bar')
self.assertEqual(data['baz'], 'quo')
self.assertEqual(data['qux'], 'quux')
def test_full_returns(self):
'''
test cmd_iter
'''
ret = self.client.cmd_full_return(
'minion',
'test.kwarg',
['foo=bar', 'baz=quo'],
kwarg={'qux': 'quux'}
)
data = ret['minion']
self.assertIn('foo', data['ret'])
self.assertIn('baz', data['ret'])
self.assertIn('qux', data['ret'])
self.assertEqual(data['ret']['foo'], 'bar')
self.assertEqual(data['ret']['baz'], 'quo')
self.assertEqual(data['ret']['qux'], 'quux')
def test_kwarg_type(self):
'''
Test that kwargs end up on the client as the same type
'''
terrible_yaml_string = 'foo: ""\n# \''
ret = self.client.cmd_full_return(
'minion',
'test.arg_type',
['a', 1],
kwarg={'outer': {'a': terrible_yaml_string},
'inner': 'value'}
)
data = ret['minion']['ret']
self.assertIn('str', data['args'][0])
self.assertIn('int', data['args'][1])
self.assertIn('dict', data['kwargs']['outer'])
self.assertIn('str', data['kwargs']['inner'])
if __name__ == '__main__':
from integration import run_tests
run_tests(StdTest)
|
{
"content_hash": "891649808817b103f477cfb4ee802df2",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 62,
"avg_line_length": 30.778761061946902,
"alnum_prop": 0.46204715353651526,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "22a43c8935d9d86f26be7ad13f3247e6adb01865",
"size": "3524",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/tests/integration/client/kwarg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
from tools import *
from default_record import *
from xen.xend import uuid
from xen.xend import XendDomain, XendNode
from xen.xend import BNVMAPI, BNStorageAPI
from xen.xend.server.netif import randomMAC
from xen.xend.ConfigUtil import getConfigVar
from xen.xend.XendAPIConstants import *
from xen.xend.XendAuthSessions import instance as auth_manager
from xen.xend.XendLogging import log_unittest, init
init("/var/log/xen/unittest.log", "DEBUG", log_unittest)
log = log_unittest
MB = 1024 * 1024
XEND_NODE = XendNode.instance()
XEND_DOMAIN = XendDomain.instance()
VMAPI = BNVMAPI.instance()
STORAGEAPI = BNStorageAPI.instance()
SESSION = "SessionForTest"
# SESSION = VMAPI.session_login_with_password('root', 'onceas').get('Value')
SR_TYPE = 'ocfs2'
ISO_SR_TYPE = 'gpfs_iso'
VM_VDI_MAP = {}
if getConfigVar('compute', 'VM', 'disk_limit'):
DISK_LIMIT = int(getConfigVar('compute', 'VM', 'disk_limit'))
else:
DISK_LIMIT = 6
if getConfigVar('compute', 'VM', 'interface_limit'):
INTERFACE_LIMIT = int(getConfigVar('compute', 'VM', 'interface_limit'))
else:
INTERFACE_LIMIT = 6
def _get_ocfs2_SR():
sr = XEND_NODE.get_sr_by_type(SR_TYPE)
if not sr:
raise Exception("We need ocfs2 SR_ref here!")
else:
return sr[0]
SR_ref = _get_ocfs2_SR()
log.debug(">>>>>>>>>>>SR is: %s" % SR_ref)
def login_session():
return "SessionForTest"
def negative_session():
return "NegativeSession"
def negative_host():
return "NegativeHost"
def logout_session(session):
auth_manager().logout(session)
def destroy_VM_and_VDI(vm_ref, hard_shutdown_before_delete=False):
if VM_VDI_MAP:
vdi_ref = VM_VDI_MAP.get(vm_ref)
log.debug("destroy_VM_and_VDI, vdi_ref: %s" % vdi_ref)
if not vdi_ref:
vdi_ref = vm_ref
XEND_NODE.srs[SR_ref].destroy_vdi(vdi_ref, True, True)
if hard_shutdown_before_delete:
XEND_DOMAIN.domain_destroy(vm_ref)
XEND_DOMAIN.domain_delete(vm_ref, True)
def destroy_VDI(vdi_ref):
sr = XEND_NODE.get_sr_by_vdi(vdi_ref)
XEND_NODE.srs[sr].destroy_vdi(vdi_ref, True, True)
def start_VM(vm_ref, start_paused=False, force_start=True):
try:
log.debug(">>>>>>>>>>>start_VM")
VMAPI._VM_start(SESSION, vm_ref, start_paused, force_start)
power_state = VMAPI._VM_get_power_state(vm_ref).get('Value')
log.debug(">>>>>>>>>>>>>VM power state: %s<<<<<<<<<<<<<<" % power_state)
if cmp(power_state, XEN_API_VM_POWER_STATE[XEN_API_VM_POWER_STATE_RUNNING]) == 0:
return True
else:
return False
except Exception, e:
log.exception("<<<<<<<<<<<<start_VM failed! VM: %s;Exception: %s" %(vm_ref, e))
raise e
def set_VM_is_a_template(vm_ref):
return VMAPI._VM_set_is_a_template(SESSION, vm_ref, True)
def create_bootable_VM_with_VDI(memory_size = 512, vcpu_num = 1, disk_size = 10):
log.debug(">>>>>>>>>>>create_running_VM_with_VDI")
memory_size = memory_size * MB
vm_rec = dict(VM_default)
vm_rec['memory_static_max'] = memory_size
vm_rec['memory_dynamic_max'] = memory_size
vm_rec['VCPUs_max'] = vcpu_num
vm_rec['VCPUs_at_startup'] = vcpu_num
vm_ref = XEND_DOMAIN.create_domain(vm_rec)
try:
if vm_ref :
create_VBD_and_VDI(vm_ref, disk_size, True)
create_CD_attached_VM(vm_ref, "hdc", False)
create_console_attached_VM(vm_ref, "rfb")
return vm_ref
except Exception, e:
log.exception("<<<<<<<<<<<create_VM_with_VDI failed! VM: %s; Exception: %s" % (vm_ref, e))
XEND_DOMAIN.domain_delete(vm_ref, True)
raise e
def create_VM_with_VDI(memory_size = 512, vcpu_num = 1, disk_size = 10):
log.debug(">>>>>>>>>>>create_VM_with_VDI")
memory_size = memory_size * MB
vm_rec = dict(VM_default)
vm_rec['memory_static_max'] = memory_size
vm_rec['memory_dynamic_max'] = memory_size
vm_rec['VCPUs_max'] = vcpu_num
vm_rec['VCPUs_at_startup'] = vcpu_num
vm_ref = XEND_DOMAIN.create_domain(vm_rec)
try:
if vm_ref :
create_VBD_and_VDI(vm_ref, disk_size, True)
return vm_ref
except Exception, e:
log.exception("<<<<<<<<<<<create_VM_with_VDI failed! VM: %s; Exception: %s" % (vm_ref, e))
XEND_DOMAIN.domain_delete(vm_ref, True)
raise e
def create_VM(memory_size = 512, vcpu_num = 1):
try:
log.debug(">>>>>>>>>>>create VM")
memory_size = memory_size * MB
vm_rec = dict(VM_default)
vm_rec['memory_static_max'] = memory_size
vm_rec['memory_dynamic_max'] = memory_size
vm_rec['VCPUs_max'] = vcpu_num
vm_rec['VCPUs_at_startup'] = vcpu_num
vm_ref = XEND_DOMAIN.create_domain(vm_rec)
return vm_ref
except Exception, e:
log.exception("<<<<<<<<<<<create_VM failed! Exception: %s" % (e))
raise e
def create_VIF_attached_VM(attached_vm, mac, network):
try:
log.debug(">>>>>>>>>>>create_VIF_attached_VM")
vif_record = dict(vif_default)
vif_record['VM'] = attached_vm
vif_record['MTU'] = 1500
vif_record['MAC'] = mac
vif_record['network'] = network
response = VMAPI._VIF_create(SESSION, vif_record)
return response
except Exception, e:
log.exception("<<<<<<<<<<<create_VIF_attached_VM failed! VM: %s; Exception: %s" % (attached_vm, e))
raise e
def create_console_attached_VM(attached_vm, protocol):
try:
log.debug(">>>>>>>>>>create_console_attached_VM")
console_record = dict(console_default)
console_record['VM'] = attached_vm
console_record['protocol'] = protocol
response = VMAPI._console_create(SESSION, console_record)
return response
except Exception, e:
log.exception("<<<<<<<<<<<create_console_attached_VM failed! VM: %s; Exception: %s" % (attached_vm, e))
raise e
def create_CD_attached_VM(attached_vm, device, bootable):
try:
log.debug(">>>>>>>>>>create_CD_attached_VM")
vdi_uuid = _get_ISO_VDI()
vbd_record = dict(vbd_default)
vbd_record['VM'] = attached_vm
vbd_record['bootable'] = bootable
vbd_record['device'] = device
vbd_record['VDI'] = vdi_uuid
vbd_record['type'] = "CD"
response = VMAPI._VBD_create(SESSION, vbd_record)
return response
except Exception, e:
log.exception("<<<<<<<<<<<create_CD_attached_VM failed! VM: %s; Exception: %s" % (attached_vm, e))
raise e
def create_data_VBD_attached_VM(attached_vm, vdi_ref):
try:
return VMAPI._VM_create_data_VBD(SESSION, attached_vm, vdi_ref)
except Exception, e:
log.exception("<<<<<<<<<<<create_data_VBD_attached_VM failed! VM: %s; Exception: %s" % (attached_vm, e))
raise e
def get_first_VIF(vm_ref):
try:
vifs = VMAPI._VM_get_VIFs().get('Value')
if vifs:
return vifs[0]
return None
except Exception, e:
log.exception("<<<<<<<<<<<get_first_VIF failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def get_VIF_ovs_bridge(vif_ref):
try:
return XEND_DOMAIN.get_dev_property_by_uuid('vif', vif_ref, 'bridge')
except Exception, e:
log.exception("<<<<<<<<<<<get_VIF_ovs_bridge failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def get_negative_VIF():
return "THIS_IS_NEGATIVE_VIF"
def _get_ISO_VDI():
srs_ref = XEND_NODE.get_sr_by_type(ISO_SR_TYPE)
if srs_ref:
sr = XEND_NODE.get_sr(srs_ref[0])
vdis = sr.get_vdis()
if vdis:
for vdi in vdis:
if cmp(sr.get_vdi_by_uuid(vdi).name_label, 'cd-rom') == 0:
continue
return vdi
else:
raise Exception, "No ISO disk in system."
else:
raise Exception, "No ISO storage in system."
def gen_randomMAC():
return randomMAC()
def gen_negativeMAC():
return "THIS_IS_NEGATIVE_MAC"
def _createUuid():
return uuid.uuidFactory()
def gen_regularUuid():
return uuid.toString(_createUuid())
def gen_negativeUuid():
return "THIS_IS_NEGATIVE_UUID"
def gen_negativeName():
return "THIS_IS_NEGATIVE_NAME_$%!"
def gen_regularSnapshotName(ref):
return "ss-%s" % ref
def gen_negativeSnapshotName():
return "ss-!&&!"
def vm_api_VM_create_on_from_template(session, host, template_vm, new_vm_name, param_dict, ping):
try:
return VMAPI.VM_create_on_from_template(session, host, template_vm, new_vm_name, param_dict, ping)
except Exception, e:
log.exception("<<<<<<<<<<<vm_api_VM_create_on_from_template failed! VM: %s; Exception: %s" % (new_vm_name, e))
raise e
def vm_api_VM_snapshot(session, vm_ref, snap_name):
try:
return VMAPI.VM_snapshot(session, vm_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<vm_api_VM_snapshot failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def vm_api_VM_get_system_VDI(session, vm_ref):
try:
return VMAPI._VM_get_system_VDI(session, vm_ref)
except Exception, e:
log.exception("<<<<<<<<<<<vm_api_VM_get_system_VDI failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def vm_api_VM_rollback(session, vm_ref, snap_name):
try:
return VMAPI.VM_rollback(session, vm_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<vm_api_VM_rollback failed! VM: %s; Exception: %s" % (vm_ref, e))
raise e
def storage_api_VDI_snapshot(session, vdi_ref, snap_name):
try:
return STORAGEAPI.VDI_snapshot(session, vdi_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<storage_api_VDI_snapshot failed! VDI: %s; Exception: %s" % (vdi_ref, e))
raise e
def storage_api_VDI_rollback(session, vdi_ref, snap_name):
try:
return STORAGEAPI.VDI_rollback(session, vdi_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<storage_api_VDI_rollback failed! VDI: %s; Exception: %s" % (vdi_ref, e))
raise e
def storage_api_VDI_destroy_snapshot(session, vdi_ref, snap_name):
try:
return STORAGEAPI.VDI_destroy_snapshot(session, vdi_ref, snap_name)
except Exception, e:
log.exception("<<<<<<<<<<<storage_api_VDI_destroy_snapshot failed! VDI: %s; Exception: %s" % (vdi_ref, e))
raise e
def create_data_VDI(disk_size=10):
try:
log.debug(">>>>>>>>>>>in create_data_VDI")
vdi_uuid = gen_regularUuid()
vdi_record = dict(vdi_default)
vdi_record['uuid'] = vdi_uuid
vdi_record['virtual_size'] = disk_size
vdi_record['type'] = 'metadata'
vdi_record['sharable'] = True
vdi_record = STORAGEAPI._VDI_select_SR(SESSION, vdi_record)
sr = vdi_record.get('SR')
vdi_ref = XEND_NODE.srs[sr].create_vdi(vdi_record, True)
return vdi_ref
except Exception, e:
log.exception("<<<<<<<<<<<create_data_VDI failed! Exception: %s" % (e))
raise e
def create_VBD_and_VDI(vm_ref, disk_size, is_system_vbd):
log.debug(">>>>>>>>>>>in create_VBD_and_VDI")
vdi_uuid = gen_regularUuid()
sr_instance = XEND_NODE.get_sr(SR_ref)
location = "tap:aio:"+sr_instance.get_location()+"/"+vdi_uuid+"/disk.vhd";
vdi_record = dict(vdi_default)
vdi_record['uuid'] = vdi_uuid
vdi_record['virtual_size'] = disk_size
if is_system_vbd:
vdi_record['type'] = 'user'
else:
vdi_record['type'] = 'metadata'
vdi_record['sharable'] = True
vdi_record['SR_ref'] = SR_ref
vdi_record['location'] = location
vbd_record = dict(vbd_default)
vbd_record['VM'] = vm_ref
if is_system_vbd:
vbd_record['bootable'] = True
else:
vbd_record['bootable'] = False
if is_system_vbd:
vbd_record['device'] = 'hda'
vbd_record['mode'] ='RW'
vbd_record['type'] ='Disk'
vdi_ref = XEND_NODE.srs[SR_ref].create_vdi(vdi_record, True)
try:
VM_VDI_MAP[vm_ref] = vdi_ref
vbd_record['VDI'] = vdi_ref
dominfo = XEND_DOMAIN.get_vm_by_uuid(vm_ref)
vbd_ref = dominfo.create_vbd_for_xenapi(vbd_record, location)
log.debug(">>>>>>>>>>>vbd ref: %s" % vbd_ref)
XEND_DOMAIN.managed_config_save(dominfo)
return vbd_ref
except Exception, e:
log.debug("<<<<<<<<<<<VBD create failed! Destroy attached VDI: %s. %s" % (vdi_ref, e))
destroy_VDI(vdi_ref)
raise e
|
{
"content_hash": "855e0e5eb53ca805ddcfb498dbd6d0d3",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 118,
"avg_line_length": 36,
"alnum_prop": 0.5768876611418048,
"repo_name": "Hearen/OnceServer",
"id": "1c9b29b09de593d9ce8726ef6642790f5d69c318",
"size": "13032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pool_management/bn-xend-core/xend/tests/util/BNVMAPI_Util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "221692"
},
{
"name": "Java",
"bytes": "3750877"
},
{
"name": "Python",
"bytes": "4139846"
},
{
"name": "Shell",
"bytes": "18975"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-optimistic-lock',
version='1.0.1.dev0',
description='Offline optimistic locking for Django',
url='https://github.com/gavinwahl/django-optimistic-lock',
long_description=read('README.rst'),
license='BSD',
author='Gavin Wahl',
author_email='gavinwahl@gmail.com',
packages=['ool'],
install_requires=['django >= 1.11'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
{
"content_hash": "61471514eac70552ee162aa9268717aa",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 70,
"avg_line_length": 30.625,
"alnum_prop": 0.6122448979591837,
"repo_name": "gavinwahl/django-optimistic-lock",
"id": "67e9b77fafaa60c3e67a4a950df2be1017c13de7",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "128"
},
{
"name": "Makefile",
"bytes": "276"
},
{
"name": "Python",
"bytes": "18970"
}
],
"symlink_target": ""
}
|
from typing import Any, Dict
import sentry_sdk
from flask import Flask
from flask_compress import Compress
from flask_cors import CORS
from sentry_sdk.integrations.flask import FlaskIntegration
from werkzeug.middleware.proxy_fix import ProxyFix
from alerta.database.base import Database, QueryBuilder
from alerta.exceptions import ExceptionHandlers
from alerta.models.alarms import AlarmModel
from alerta.models.enums import Scope
from alerta.utils.audit import AuditTrail
from alerta.utils.config import Config
from alerta.utils.hooks import HookTrigger
from alerta.utils.key import ApiKeyHelper
from alerta.utils.logging import Logger
from alerta.utils.mailer import Mailer
from alerta.utils.plugin import Plugins
from alerta.utils.tracing import Tracing
from alerta.utils.webhook import CustomWebhooks
from alerta.version import __version__
# Sentry will read the DSN from SENTRY_DSN environment variable.
sentry_sdk.init(integrations=[FlaskIntegration()], release=__version__)
config = Config()
tracing = Tracing()
logger = Logger()
hooks = HookTrigger()
audit = AuditTrail()
alarm_model = AlarmModel()
cors = CORS()
compress = Compress()
handlers = ExceptionHandlers()
key_helper = ApiKeyHelper()
db = Database()
qb = QueryBuilder()
mailer = Mailer()
plugins = Plugins()
custom_webhooks = CustomWebhooks()
def create_app(config_override: Dict[str, Any] = None, environment: str = None) -> Flask:
app = Flask(__name__)
app.config['ENVIRONMENT'] = environment
config.init_app(app)
app.config.update(config_override or {})
tracing.setup_tracing(app)
logger.setup_logging(app)
if app.config['USE_PROXYFIX']:
app.wsgi_app = ProxyFix(app.wsgi_app) # type: ignore
hooks.init_app(app)
audit.init_app(app)
alarm_model.init_app(app)
Scope.init_app(app)
cors.init_app(app)
compress.init_app(app)
handlers.register(app)
key_helper.init_app(app)
db.init_db(app)
qb.init_app(app)
mailer.register(app)
plugins.register(app)
custom_webhooks.register(app)
from alerta.utils.format import CustomJSONEncoder
app.json_encoder = CustomJSONEncoder
from alerta.views import api
app.register_blueprint(api)
from alerta.webhooks import webhooks
app.register_blueprint(webhooks)
from alerta.auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
from alerta.management import mgmt
app.register_blueprint(mgmt)
return app
try:
from celery import Celery
except ImportError:
pass
def create_celery_app(app: Flask = None) -> 'Celery':
from alerta.utils.format import register_custom_serializer
register_custom_serializer()
app = app or create_app()
celery = Celery(
app.name,
backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL']
)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase): # type: ignore
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
|
{
"content_hash": "d9cf9df6ea303aba7060791df9be9e7a",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 89,
"avg_line_length": 26.21311475409836,
"alnum_prop": 0.7204502814258912,
"repo_name": "guardian/alerta",
"id": "986189aea66a40dc22198dcfcf2887b0ec82fe75",
"size": "3198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alerta/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5143"
},
{
"name": "JavaScript",
"bytes": "2971"
},
{
"name": "Makefile",
"bytes": "842"
},
{
"name": "Python",
"bytes": "355607"
},
{
"name": "Shell",
"bytes": "2090"
}
],
"symlink_target": ""
}
|
import logging
import optparse
import os
import sys
from StringIO import StringIO
import re
import unittest
# Run build_server so that files needed by tests are copied to the local
# third_party directory.
import build_server
build_server.main()
BASE_PATH = None
EXPLICIT_TEST_FILES = None
from fake_fetchers import ConfigureFakeFetchers
ConfigureFakeFetchers(os.path.join(sys.path[0], os.pardir))
# Import Handler later because it immediately makes a request to github. We need
# the fake urlfetch to be in place first.
from handler import Handler
class _MockResponse(object):
def __init__(self):
self.status = 200
self.out = StringIO()
self.headers = {}
def set_status(self, status):
self.status = status
class _MockRequest(object):
def __init__(self, path):
self.headers = {}
self.path = path
self.url = 'http://localhost' + path
class IntegrationTest(unittest.TestCase):
def _TestSamplesLocales(self, sample_path, failures):
# Use US English, Spanish, and Arabic.
for lang in ['en-US', 'es', 'ar']:
request = _MockRequest(sample_path)
request.headers['Accept-Language'] = lang + ';q=0.8'
response = _MockResponse()
try:
Handler(request, response, local_path=BASE_PATH).get()
if 200 != response.status:
failures.append(
'Samples page with language %s does not have 200 status.'
' Status was %d.' % (lang, response.status))
if not response.out.getvalue():
failures.append(
'Rendering samples page with language %s produced no output.' %
lang)
except Exception as e:
failures.append('Error rendering samples page with language %s: %s' %
(lang, e))
def _RunPublicTemplatesTest(self):
base_path = os.path.join(BASE_PATH, 'docs', 'templates', 'public')
if EXPLICIT_TEST_FILES is None:
test_files = []
for path, dirs, files in os.walk(base_path):
for dir_ in dirs:
if dir_.startswith('.'):
dirs.remove(dir_)
for name in files:
if name.startswith('.') or name == '404.html':
continue
test_files.append(os.path.join(path, name)[len(base_path + os.sep):])
else:
test_files = EXPLICIT_TEST_FILES
test_files = [f.replace(os.sep, '/') for f in test_files]
failures = []
for filename in test_files:
request = _MockRequest(filename)
response = _MockResponse()
try:
Handler(request, response, local_path=BASE_PATH).get()
if 200 != response.status:
failures.append('%s does not have 200 status. Status was %d.' %
(filename, response.status))
if not response.out.getvalue():
failures.append('Rendering %s produced no output.' % filename)
if filename.endswith('samples.html'):
self._TestSamplesLocales(filename, failures)
except Exception as e:
failures.append('Error rendering %s: %s' % (filename, e))
if failures:
self.fail('\n'.join(failures))
def testAllPublicTemplates(self):
logging.getLogger().setLevel(logging.ERROR)
logging_error = logging.error
try:
logging.error = self.fail
self._RunPublicTemplatesTest()
finally:
logging.error = logging_error
def testNonexistentFile(self):
logging.getLogger().setLevel(logging.CRITICAL)
request = _MockRequest('extensions/junk.html')
bad_response = _MockResponse()
Handler(request, bad_response, local_path=BASE_PATH).get()
self.assertEqual(404, bad_response.status)
request_404 = _MockRequest('404.html')
response_404 = _MockResponse()
Handler(request_404, response_404, local_path=BASE_PATH).get()
self.assertEqual(200, response_404.status)
self.assertEqual(response_404.out.getvalue(), bad_response.out.getvalue())
def testCron(self):
if EXPLICIT_TEST_FILES is not None:
return
logging_error = logging.error
try:
logging.error = self.fail
request = _MockRequest('/cron/trunk')
response = _MockResponse()
Handler(request, response, local_path=BASE_PATH).get()
self.assertEqual(200, response.status)
self.assertEqual('Success', response.out.getvalue())
finally:
logging.error = logging_error
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-p',
'--path',
default=os.path.join(
os.path.abspath(os.path.dirname(__file__)),
os.pardir,
os.pardir))
parser.add_option('-a',
'--all',
action='store_true',
default=False)
(opts, args) = parser.parse_args()
if not opts.all:
EXPLICIT_TEST_FILES = args
BASE_PATH = opts.path
suite = unittest.TestSuite(tests=[
IntegrationTest('testNonexistentFile'),
IntegrationTest('testCron'),
IntegrationTest('testAllPublicTemplates')
])
result = unittest.TestResult()
suite.run(result)
if result.failures:
print('*----------------------------------*')
print('| integration_test.py has failures |')
print('*----------------------------------*')
for test, failure in result.failures:
print(test)
print(failure)
exit(1)
exit(0)
|
{
"content_hash": "087a8ae6f2b6fa5fde59f98c93db3e0e",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 80,
"avg_line_length": 33.425,
"alnum_prop": 0.6196709050112191,
"repo_name": "junmin-zhu/chromium-rivertrail",
"id": "da02e75296e397a519d168d9d3cc63a4d0370531",
"size": "5537",
"binary": false,
"copies": "7",
"ref": "refs/heads/v8-binding",
"path": "chrome/common/extensions/docs/server2/integration_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1172794"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "75806807"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "145161929"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "1546515"
},
{
"name": "JavaScript",
"bytes": "18675242"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "Matlab",
"bytes": "5234"
},
{
"name": "Objective-C",
"bytes": "6981387"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "926245"
},
{
"name": "Python",
"bytes": "8088373"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3239"
},
{
"name": "Shell",
"bytes": "1513486"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XML",
"bytes": "13493"
}
],
"symlink_target": ""
}
|
import wx
import wx.calendar
from wx.lib.masked import TimeCtrl
from wx.lib.agw import hypertreelist as HTL
from datetime import datetime, time
from lib import Task, DATA, PRIORITIES, DEFAULT_PRIORITY
from decorators import requires_selection
ID_ADD_TASK = 1000
ID_ADD_SUBTASK = 1010
ID_COLLAPSE = 1020
ID_EXPAND = 1030
HIDE_COMPLETE = False
class TaskList(HTL.HyperTreeList):
"""
This is the widget that houses the tasks
"""
def __init__(self, parent):
self.parent = parent
style = wx.SUNKEN_BORDER | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.TR_HIDE_ROOT | wx.TR_FULL_ROW_HIGHLIGHT | wx.TR_ROW_LINES | wx.TR_EDIT_LABELS #| wx.TR_COLUMN_LINES | HTL.TR_AUTO_CHECK_PARENT
HTL.HyperTreeList.__init__(self, parent, -1, style=style)
self.AddColumn('%')
self.AddColumn('!')
self.AddColumn('Task')
self.AddColumn('Due')
self.SetMainColumn(2)
self.root = self.AddRoot('Tasks')
self.GetMainWindow().Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnEndEdit)
self.Bind(HTL.EVT_TREE_ITEM_CHECKED, self.OnItemToggled)
def EvaluateCompleteness(self, item=None):
"""Determines how complete various task trees are"""
pass
def OnEndEdit(self, evt):
print 'Save task?', evt.GetLabel(), evt.GetItem()
task = evt.GetItem().GetData()
if task:
task.summary = evt.GetLabel()
def OnLeftDClick(self, evt):
pt = evt.GetPosition()
item, flags, column = self.HitTest(pt)
if item and (flags & wx.TREE_HITTEST_ONITEMLABEL):
#self.EditLabel(item)
self.parent.EditTask(item)
evt.Skip()
def OnItemToggled(self, evt):
item = evt.GetItem()
task = item.GetData()
if task:
task.is_complete = item.IsChecked()
if HIDE_COMPLETE:
item.Hide(task.is_complete)
self.EvaluateCompleteness()
def SetTasks(self, tasks):
for task in tasks:
self.AddTask(task, refresh=False)
self.Refresh()
self.ExpandAll()
def AddTask(self, task, parent=None, refresh=True):
if parent is None:
parent = self.root
task.parent = parent
item = self.AppendItem(parent, task.summary, ct_type=1)
item.SetData(task)
for child in task.children:
self.AddTask(child, item, refresh=refresh)
if refresh:
self.Refresh()
def Refresh(self, erase=True, rect=None, parent=None):
"""Refreshes the tree when a task has changed"""
if parent is None:
parent = self.root
for child in parent.GetChildren():
task = child.GetData()
if task:
self.SetItemText(child, '0%', 0)
self.SetItemText(child, str(task._priority), 1)
self.SetItemText(child, task.summary, 2)
child.Check(task.is_complete)
if HIDE_COMPLETE:
child.Hide(task.is_complete)
if task.due_date:
self.SetItemText(child, task.due_date.strftime('%H:%M %m/%d/%y'), 3)
else:
self.SetItemText(child, '', 3)
self.Refresh(parent=child)
super(TaskList, self).Refresh()
class TaskInfoDialog(wx.Dialog):
def __init__(self, *args, **kwds):
self.task = kwds.pop('task', None)
kwds['style'] = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | wx.THICK_FRAME
wx.Dialog.__init__(self, *args, **kwds)
self.panel = wx.Panel(self, -1)
self.txtSummary = wx.TextCtrl(self.panel, -1, "")
self.lblNotes = wx.StaticText(self.panel, -1, _('Notes:'), style=wx.ALIGN_RIGHT)
self.txtNotes = wx.TextCtrl(self.panel, -1, "", style=wx.TE_MULTILINE|wx.TE_RICH|wx.TE_WORDWRAP)
self.lblPriority = wx.StaticText(self.panel, -1, _('Priority:'), style=wx.ALIGN_RIGHT)
choices = [p[1] for p in sorted(PRIORITIES.items(), key=lambda p: p[0])]
self.cmbPriority = wx.ComboBox(self.panel, -1, choices=choices, style=wx.CB_DROPDOWN)
self.chkIsComplete = wx.CheckBox(self.panel, -1, _('Is Complete'))
self.lblDateDue = wx.StaticText(self.panel, -1, _('Due:'), style=wx.ALIGN_RIGHT)
self.chkIsDue = wx.CheckBox(self.panel, -1, _('Has due date'))
self.calDueDate = wx.calendar.CalendarCtrl(self.panel, -1)
self.txtTime = TimeCtrl(self.panel, id=-1,
value=datetime.now().strftime('%X'),
style=wx.TE_PROCESS_TAB,
validator=wx.DefaultValidator,
format='24HHMMSS',
fmt24hr=True,
displaySeconds=True,
)
self.__set_properties()
self.__do_layout()
self.chkIsDue.Bind(wx.EVT_CHECKBOX, self.ToggleDueDate)
self.txtSummary.SetFocus()
if self.task is not None:
self.SetTask(self.task)
def __set_properties(self):
self.SetTitle(_('Task Information'))
self.cmbPriority.SetValue(PRIORITIES[DEFAULT_PRIORITY])
self.calDueDate.Enable(False)
self.txtTime.Enable(False)
def __do_layout(self):
mainSizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.FlexGridSizer(5, 2, 5, 5)
lblSubject = wx.StaticText(self.panel, -1, _('Summary:'))
sizer.Add(lblSubject, 0, wx.EXPAND, 0)
sizer.Add(self.txtSummary, 0, wx.ALL|wx.EXPAND, 0)
sizer.Add(self.lblNotes, 0, wx.EXPAND, 0)
sizer.Add(self.txtNotes, 0, wx.EXPAND, 0)
sizer.Add(self.lblPriority, 0, wx.EXPAND, 0)
sizer.Add(self.cmbPriority, 0, wx.EXPAND, 0)
sizer.Add((20, 20), 0, 0, 0)
sizer.Add(self.chkIsComplete, 0, 0, 0)
sizer.Add(self.lblDateDue, 0, wx.ALIGN_RIGHT, 0)
sizer.Add(self.chkIsDue, 0, 0, 0)
sizer.Add((20, 20), 0, 0, 0)
sizer.Add(self.calDueDate, 0, 0, 0)
sizer.Add((20, 20), 0, 0, 0)
sizer.Add(self.txtTime, 0, 0, 0)
self.panel.SetSizer(sizer)
sizer.AddGrowableRow(1)
sizer.AddGrowableCol(1)
mainSizer.Add(self.panel, 1, wx.ALL|wx.EXPAND, 5)
mainSizer.AddF(self.CreateStdDialogButtonSizer(wx.OK|wx.CANCEL),
wx.SizerFlags(0).Expand().Border(wx.BOTTOM|wx.RIGHT, 5))
self.SetSizer(mainSizer)
mainSizer.Fit(self)
self.Layout()
self.Centre()
size = (290, 450)
self.SetMinSize(size)
self.SetSize(size)
def ToggleDueDate(self, evt):
en = self.chkIsDue.IsChecked()
self.calDueDate.Enable(en)
self.txtTime.Enable(en)
def GetTask(self):
if self.task is None:
self.task = Task()
if self.chkIsDue.IsChecked():
due = self.calDueDate.PyGetDate()
tm = self.txtTime.GetValue()
try:
tm = datetime.strptime(tm, '%H:%M:%S').time()
except:
tm = datetime.strptime(tm, '%H:%M').time()
due = datetime.combine(due, tm)
else:
due = None
self.task.summary = self.txtSummary.GetValue()
self.task.is_complete = self.chkIsComplete.IsChecked()
self.task.due_date = due
self.task.priority = self.cmbPriority.GetValue()
self.task.notes = self.txtNotes.GetValue()
return self.task
def SetTask(self, task):
self.txtSummary.SetValue(task.summary)
self.txtNotes.SetValue(task.notes)
self.cmbPriority.SetStringSelection(task.priority)
self.chkIsComplete.SetValue(task.is_complete)
if task.due_date is not None:
self.chkIsDue.SetValue(True)
self.calDueDate.PySetDate(task.due_date)
self.txtTime.SetValue(task.due_date.strftime('%X'))
self.task = task
class TreeDoFrame(wx.Frame):
"""
This is the main TreeDo window, where your tasks are laid out before you.
"""
def __init__(self):
wx.Frame.__init__(self, None, -1, title=_('TreeDo'), size=(350, 500))
self.SetMinSize((300, 300))
self.CenterOnParent()
self.toolbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT)
self.toolbar.SetToolBitmapSize((24, 24))
save_img = wx.Bitmap('res/save.png', wx.BITMAP_TYPE_PNG)
add_img = wx.Bitmap('res/add.png', wx.BITMAP_TYPE_PNG)
add_sub_img = wx.Bitmap('res/add_subtask.png', wx.BITMAP_TYPE_PNG)
collapse_img = wx.Bitmap('res/collapse.png', wx.BITMAP_TYPE_PNG)
expand_img = wx.Bitmap('res/expand.png', wx.BITMAP_TYPE_PNG)
delete_img = wx.Bitmap('res/delete.png', wx.BITMAP_TYPE_PNG)
self.toolbar.AddSimpleTool(wx.ID_SAVE, save_img, _('Save Task List'), _('Save the task list to the hard drive'))
self.toolbar.AddSimpleTool(ID_ADD_TASK, add_img, _('Add Task'), _('Create a new task'))
self.toolbar.AddSimpleTool(ID_ADD_SUBTASK, add_sub_img, _('Add Sub-Task'), _('Create a new subtask'))
#self.toolbar.AddSimpleTool(ID_COLLAPSE, collapse_img, _('Collapse'), _('Collapse all tasks'))
self.toolbar.AddSimpleTool(ID_EXPAND, expand_img, _('Expand'), _('Expand all tasks'))
self.toolbar.AddSimpleTool(wx.ID_DELETE, delete_img, _('Delete'), _('Delete this task'))
self.Bind(wx.EVT_TOOL, self.OnToolClick)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
self.tree = TaskList(self)
sizer.Add(self.tree, 1, wx.EXPAND)
self.Bind(wx.EVT_SIZE, self.UpdateColumnWidths)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.ToggleToolbarButtons)
self.tree.SetTasks(DATA.get_list())
self.ToggleToolbarButtons()
def UpdateColumnWidths(self, evt=None):
width, height = self.GetSize()
self.tree.SetColumnWidth(0, 40)
self.tree.SetColumnWidth(1, 20)
self.tree.SetColumnWidth(2, width - 180)
self.tree.SetColumnWidth(3, 100)
evt.Skip()
def ToggleToolbarButtons(self, evt=None):
"""Enable or disable certain toolbar buttons based on the selection"""
enable_sub_btns = (self.tree.GetSelection() != self.tree.root)
self.toolbar.EnableTool(ID_ADD_SUBTASK, enable_sub_btns)
self.toolbar.EnableTool(wx.ID_DELETE, enable_sub_btns)
if evt:
evt.Skip()
def AddTask(self, parent=None):
"""Allows the user to add a new task"""
taskDlg = TaskInfoDialog(self, -1, _('Task Info'))
if taskDlg.ShowModal() == wx.ID_OK:
task = taskDlg.GetTask()
self.tree.AddTask(task, parent)
@requires_selection
def AddSubTask(self):
"""Allows the user to add a new task to the selected task"""
parent = self.tree.GetSelection()
return self.AddTask(parent)
@requires_selection
def EditSelectedTask(self):
"""Allows the user to edit the selected task"""
item = self.tree.GetSelection()
self.EditTask(item)
def EditTask(self, item):
"""Allows the user to edit a task's information"""
task = item.GetData()
taskDlg = TaskInfoDialog(self, -1, _('Task Info'), task=task)
if taskDlg.ShowModal() == wx.ID_OK:
task = taskDlg.GetTask()
item.SetData(task)
self.tree.Refresh()
@requires_selection
def DeleteSelectedTask(self):
"""Allows the user to delete the selected task"""
item = self.tree.GetSelection()
self.DeleteTask(item)
def DeleteTask(self, item):
"""Allows the user to delete a task"""
if item.HasChildren():
print 'Deleting item with children'
self.tree.DeleteChildren(item)
self.tree.Delete(item)
def OnToolClick(self, evt):
eid = evt.GetId()
if eid == ID_ADD_TASK:
self.AddTask()
elif eid == ID_ADD_SUBTASK:
self.AddSubTask()
elif eid == ID_COLLAPSE:
for item in self.tree.GetChildren():
item.Collapse(self.tree)
elif eid == ID_EXPAND:
self.tree.ExpandAll()
elif eid == wx.ID_SAVE:
self.Persist()
elif eid == wx.ID_DELETE:
self.DeleteSelectedTask()
def Persist(self):
"""Persists the task list to the filesystem"""
DATA.persist(self.tree.root)
|
{
"content_hash": "3674d00f717718321657f9dbad04d302",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 216,
"avg_line_length": 34.40871934604905,
"alnum_prop": 0.5882958504909724,
"repo_name": "codekoala/treedo",
"id": "8202f03672beb11ae1ea6c4b0e9b2b948fc1dcb5",
"size": "12628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treedo/gui.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22867"
}
],
"symlink_target": ""
}
|
import allure
from allure.constants import AttachmentType
@allure.step("Test foo")
def test_foo():
prepare()
allure.attach("Response", "<div>bla-bla-bla</div>", AttachmentType.HTML)
pass
@allure.step("Prepare")
def prepare():
pass
|
{
"content_hash": "cc00a5d8ee7782be02a77395ef8e8375",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 76,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.6975806451612904,
"repo_name": "victorivanovspb/texzametki",
"id": "cfbffe5d587a345f4f1aca67a011429c389b00e5",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/examples/example_allure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gnuplot",
"bytes": "2217"
},
{
"name": "HTML",
"bytes": "379"
},
{
"name": "JavaScript",
"bytes": "19291"
},
{
"name": "Python",
"bytes": "1938"
},
{
"name": "Shell",
"bytes": "5408"
}
],
"symlink_target": ""
}
|
"""
This module provides some facilities for constructing and plotting trees. It
is mostly a wrapper around a very limited subset of functions from the R
`ape` package (Analyses of Phylogenetics and Evolution).
R must be installed, the `ape` R package must be installed, and the Python
package ``rpy2`` must be installed, e.g.::
$ apt-get install r-base
$ pip install rpy2
$ R
> install.packages("ape")
See also the examples at:
- http://nbviewer.ipython.org/github/alimanfoo/anhima/blob/master/examples/tree.ipynb
""" # noqa
from __future__ import division, print_function, absolute_import
# standard library dependencies
import tempfile
import logging
# third party dependencies
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
logger = logging.getLogger(__name__)
debug = logging.debug
_r_initialised = False
r = None
ro = None
grdevices = None
ape = None
def _init_r():
"""Private function to initialise R, only executed when needed.
"""
global _r_initialised
global r
global ro
global grdevices
global ape
if not _r_initialised:
import rpy2.robjects as ro # noqa
from rpy2.robjects import r
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri as numpy2ri
numpy2ri.activate()
grdevices = importr('grDevices')
ape = importr(
'ape',
robject_translations={
'delta.plot': 'delta_dot_plot',
'dist.dna': 'dist_dot_dna',
'dist.nodes': 'dist_dot_nodes',
'node.depth': 'node_dot_depth',
'node.depth.edgelength': 'node_dot_depth_dot_edgelength',
'node.height': 'node_dot_height',
'node.height.clado': 'node_dot_height_dot_clado',
'prop.part': 'prop_dot_part',
}
)
# Define custom R functions to help with coloring tree edges by
# population. These functions were written by Jacob Almagro-Garcia
# <jg10@sanger.ac.uk> at the Wellcome Trust Sanger Institute.
r("""
library(ape)
######################################################################################################################
#' Computes the number of leaves of each group that hang from each branch.
#' @param phylotree A tree of class phylo.
#' @param labelgroups A vector with the group of the tip labels (named with the labels).
#' @return A named matrix with the membership counts for each interior edge of the tree.
######################################################################################################################
computeEdgeGroupCounts <- function(phylotree, labelgroups) {
labels <- phylotree$tip.label
num_tips <- length(labels)
edge_names <- unique(sort(c(phylotree$edge)))
# This matrix will keep track of the group counts for each edge.
edge_group_counts <- matrix(0, nrow=length(edge_names), ncol=length(unique(sort(labelgroups))))
rownames(edge_group_counts) <- edge_names
colnames(edge_group_counts) <- unique(labelgroups)
# Init the leaf branches.
sapply(1:num_tips, function(l) {
edge_group_counts[as.character(l), as.character(labelgroups[phylotree$tip.label[l]])] <<- 1
})
# Sort edges by the value of the descendent
# The first segment will contain the leaves whereas the second the branches (closer to leaves first).
# We need to do this because leaves are numbered 1:num_tips and the branches CLOSER to the leaves
# with higher numbers.
edges <- phylotree$edge[order(phylotree$edge[,2]),]
branches <- edges[num_tips:nrow(edges),]
edges[num_tips:nrow(edges),] <- branches[order(branches[,1],decreasing=T),]
invisible(apply(edges, 1, function(edge) {
# Check if we are connecting a leaf.
if(edge[2] <= num_tips) {
e <- as.character(edge[1])
g <- as.character(labelgroups[phylotree$tip.label[edge[2]]])
edge_group_counts[e,g] <<- edge_group_counts[e,g] + 1
}
else {
e1 <- as.character(edge[1])
e2 <- as.character(edge[2])
edge_group_counts[e1,] <<- edge_group_counts[e1,] + edge_group_counts[e2,]
}
}))
return(edge_group_counts)
}
######################################################################################################################
#' Assigns the color of the majority group (hanging from) each branch.
#' @param phylotree A tree of class phylo.
#' @param edge_group_counts A named matrix with the group counts for each branch.
#' @param groupcolors A named vector with the color of each group.
#' @param equality_color The color to be used if there is no majority group.
#' @return A vector with the colors to be used with the tree branches.
######################################################################################################################
assignMajorityGroupColorToEdges <- function(phylotree, edge_group_counts, groupcolors, equality_color="gray") {
edge_colors <- apply(phylotree$edge, 1, function(branch) {
e <- as.character(branch[2])
major_group_index <- which.max(edge_group_counts[e,])
if(all(edge_group_counts[e,] == edge_group_counts[e,major_group_index]))
return(equality_color)
else
return(groupcolors[colnames(edge_group_counts)[major_group_index]])
})
return(edge_colors)
}
""") # noqa
_r_initialised = True
def nj(dist_square, labels=None):
"""Wrapper for the `ape` ``nj`` function, which performs the
neighbor-joining tree estimation of Saitou and Nei (1987).
Parameters
----------
dist_square : array_like, shape (`n_samples`, `n_samples`)
A pairwise distance matrix in square form.
labels : sequence of strings, optional
A sequence of strings to label the tips of the tree. Must be in the
same order as rows of the distance matrix.
Returns
-------
An R object of class "phylo".
See Also
--------
anhima.dist.pairwise_distance
"""
# setup R
_init_r()
# normalise inputs
dist_square = np.asarray(dist_square)
assert dist_square.ndim == 2
assert dist_square.shape[0] == dist_square.shape[1]
# convert distance matrix to R
m = ro.vectors.Matrix(dist_square)
# assign row and column labels
if labels:
# map all strings to str
labels = [str(l) for l in labels]
s = ro.StrVector(labels)
m.rownames = s
m.colnames = s
# build the tree
tree = ape.nj(m)
return tree
def bionj(dist_square, labels=None):
"""Wrapper for the `ape` ``bionj`` function, which performs the BIONJ
algorithm of Gascuel (1997).
Parameters
----------
dist_square : array_like, shape (`n_samples`, `n_samples`)
A pairwise distance matrix in square form.
labels : sequence of strings, optional
A sequence of strings to label the tips of the tree. Must be in the
same order as rows of the distance matrix.
Returns
-------
An R object of class "phylo".
See Also
--------
anhima.dist.pairwise_distance
"""
# setup R
_init_r()
# normalise inputs
dist_square = np.asarray(dist_square)
assert dist_square.ndim == 2
assert dist_square.shape[0] == dist_square.shape[1]
# convert distance matrix to R
m = ro.vectors.Matrix(dist_square)
# assign row and column labels
if labels:
# map all strings to str
labels = [str(l) for l in labels]
s = ro.StrVector(labels)
m.rownames = s
m.colnames = s
# build the tree
tree = ape.bionj(m)
return tree
def plot_phylo(tree, plot_kwargs=None, add_scale_bar=None,
filename=None, width=None, height=None, units=None, res=None,
pointsize=None, bg=None, ax=None, imshow_kwargs=None):
"""Wrapper for the `ape` ``plot.phylo`` function, which plots phylogenetic
trees. Plotting will use the R `png` graphics device.
Parameters
----------
tree : R object of class "phylo"
The tree to plot.
plot_kwargs : dict-like, optional
A dictionary of keyword arguments that will be passed through to the
`ape` function ``plot.phylo()``. See the documentation for the `ape`
package for a full list of supported arguments.
add_scale_bar : dict-like, optional
A dictionary of keyword arguments that will be passed through to the
`ape` function ``add.scale.bar()``. See the documentation for the
`ape` package for a full list of supported arguments.
filename : string, optional
File path for the generated PNG image. If None, a temporary file will
be used.
width : int or float, optional
Width of the plot in `units`.
height : int or float, optional
Height of the plot in `units`.
units : {'px', 'in', 'cm', 'mm'}, optional
The units in which 'height' and 'width' are given. Can be 'px' (pixels,
the default), 'in' (inches), 'cm' or 'mm'.
res : int, optional
The nominal resolution in ppi which will be recorded in the bitmap
file, if a positive integer. Also used for 'units' other than the
default, and to convert points to pixels.
pointsize : float, optional
The default pointsize of plotted text, interpreted as big points (
1/72 inch) at 'res' ppi.
bg : color, optional
The background color.
ax : axes, optional
The axes on which to draw. If not provided, a new figure will be
created.
imshow_kwargs : dict-like
Additional keyword arguments passed through to `imshow()`.
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
# setup R
_init_r()
# setup image file
if filename is None:
tmp = tempfile.NamedTemporaryFile(suffix='.png')
filename = tmp.name
# initialise PNG device
png_arg_names = 'width', 'height', 'units', 'res', 'pointsize', 'bg'
png_args = dict()
for n in png_arg_names:
v = locals()[n]
if v is not None:
png_args[n] = v
debug(filename)
debug(png_args)
grdevices.png(filename, **png_args)
# plot
if plot_kwargs is None:
plot_kwargs = dict()
# adapt values for certain properties
for k in 'tip.color', 'edge.color':
if k in plot_kwargs:
v = plot_kwargs[k]
if isinstance(v, (list, tuple, np.ndarray)):
plot_kwargs[k] = ro.StrVector(v)
debug(plot_kwargs)
ape.plot_phylo(tree, **plot_kwargs)
# add scale bar
if add_scale_bar is not None:
ape.add_scale_bar(**add_scale_bar)
# finalise PNG device
grdevices.dev_off()
# read in PNG for matplotlib plotting
png = mpimg.imread(filename)
# set up axes for matplotlib plotting
if ax is None:
# try to make the figure exactly the right size for image native
# resolution
pxw, pxh = png.shape[:2]
dpi = plt.rcParams['savefig.dpi']
w, h = pxw/dpi, pxh/dpi
fig, ax = plt.subplots(figsize=(w, h))
# no margin
fig.subplots_adjust(0, 0, 1, 1, hspace=0, wspace=0)
if imshow_kwargs is None:
imshow_kwargs = dict()
imshow_kwargs.setdefault('aspect', 'equal')
imshow_kwargs.setdefault('interpolation', 'none')
ax.imshow(png, **imshow_kwargs)
ax.set_axis_off()
return ax
def write_tree(tree, filename=None, **kwargs):
"""
Wrapper for the `ape` ``write.tree`` function, which writes in a file a
tree in parenthetic format using the Newick (also known as New Hampshire)
format.
Parameters
----------
tree : R object of class "phylo"
The tree to be written.
filename : string, optional
The name of the file to write to. If ommitted, write the file to a
string and return it.
**kwargs : keyword arguments
All further keyword arguments are passed through to ``write.tree``.
Returns
-------
result : string
A string if `filename` is None, otherwise no return value.
"""
# setup R
_init_r()
# write the file
if filename is None:
kwargs['file'] = ''
else:
kwargs['file'] = filename
result = ape.write_tree(tree, **kwargs)
# handle the case where tree is written to stdout
if filename is None:
return result[0]
def read_tree(filename, **kwargs):
"""
Wrapper for the `ape` ``read.tree`` function, which reads a file which
contains one or several trees in parenthetic format known as the Newick
or New Hampshire format.
Parameters
----------
filename : string
Name of the file to read.
**kwargs : keyword arguments
All further keyword arguments are passed through to ``read.tree``.
Returns
-------
tree : R object of class "phylo"
If several trees are read in the file, the returned object is of
class "multiPhylo", and is a list of objects of class "phylo". The name
of each tree can be specified by tree.names, or can be read from the
file (see details).
"""
# setup R
_init_r()
kwargs['file'] = filename
return ape.read_tree(**kwargs)
def color_edges_by_group_majority(tree, labels, groups,
colors,
equality_color='gray'):
"""
Color the edges of a tree according to the majority group membership of
the descendant tips.
Parameters
----------
tree : R object of class "phylo"
The tree containing the edges to be colored.
labels : sequence of strings
The tip labels.
groups : sequence of strings
A sequence of strings of the same length as `labels`, where each item
is an identifier for the group to which the corresponding tip belongs.
colors : dict-like
A dictionary mapping groups to colors.
equality_color : string, optional
The color to use in the event of a tie.
Returns
-------
edge_colors : list of strings
A list of colors for the edges of the tree, to be passed into
:func:`plot_phylo`.
"""
# setup R
_init_r()
r_groups = ro.StrVector([str(g) for g in groups])
r_groups.names = ro.StrVector([str(l) for l in labels])
counts = r.computeEdgeGroupCounts(tree, r_groups)
r_colors = ro.StrVector([str(v) for v in colors.values()])
r_colors.names = ro.StrVector([str(k) for k in colors.keys()])
edge_colors = r.assignMajorityGroupColorToEdges(
tree, counts, groupcolors=r_colors, equality_color=equality_color
)
return list(edge_colors)
|
{
"content_hash": "abc9f98fa35db5667e490956a31cdbd7",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 118,
"avg_line_length": 30.3305954825462,
"alnum_prop": 0.6107237153882608,
"repo_name": "alimanfoo/anhima",
"id": "b9a920a7d9a2479ba51acf150590a3e729ba6a17",
"size": "14795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anhima/tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "78864"
},
{
"name": "Python",
"bytes": "242188"
},
{
"name": "Shell",
"bytes": "1061"
}
],
"symlink_target": ""
}
|
"""Convert the CFS grib data into something mimicing IEMRE.
This will allow for downstream usage by PSIMS/Drydown. Run from RUN_NOON.sh
"""
import os
import sys
import datetime
import numpy as np
from scipy.interpolate import NearestNDInterpolator
from tqdm import tqdm
import pygrib
from pyiem.util import utc, ncopen, logger
from pyiem import iemre
LOG = logger()
DEFAULTS = {"srad": 0.0, "high_tmpk": 100.0, "low_tmpk": 400.0, "p01d": 0.0}
MULTIPLIER = {"p01d": 6 * 3600.0}
AGGFUNC = {
"srad": np.add,
"high_tmpk": np.maximum,
"low_tmpk": np.minimum,
"p01d": np.add,
}
def merge(nc, valid, gribname, vname):
"""Merge in the grib data"""
fn = valid.strftime(
(
f"/mesonet/ARCHIVE/data/%Y/%m/%d/model/cfs/%H/{gribname}"
".01.%Y%m%d%H.daily.grib2"
)
)
if not os.path.isfile(fn):
LOG.info("Missing %s, aborting", fn)
sys.exit()
grbs = pygrib.open(fn)
lats = None
lons = None
xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)
ncvar = nc.variables[vname]
for grib in tqdm(
grbs, total=grbs.messages, desc=vname, disable=not sys.stdout.isatty()
):
ftime = valid + datetime.timedelta(hours=grib.forecastTime)
# move us safely back to get into the proper date
cst = ftime - datetime.timedelta(hours=7)
if cst.year != valid.year:
continue
if lats is None:
lats, lons = grib.latlons()
vals = grib.values * MULTIPLIER.get(vname, 1)
nn = NearestNDInterpolator((lons.flat, lats.flat), vals.flat)
vals = nn(xi, yi)
tstep = iemre.daily_offset(cst.date())
current = ncvar[tstep, :, :]
if current.mask.all():
current[:, :] = DEFAULTS[vname]
ncvar[tstep, :, :] = AGGFUNC[vname](current, vals)
if vname != "srad":
return
# HACK so above, we added all the solar radiation data together, so we
# should divide this number by four to rectify it back to avg W m-2
for tstep in range(nc.variables[vname].shape[0]):
nc.variables[vname][tstep] = nc.variables[vname][tstep] / 4.0
def create_netcdf(valid):
"""Create and return the netcdf file"""
ncfn = "/mesonet/data/iemre/temp_cfs_%s.nc" % (valid.strftime("%Y%m%d%H"),)
nc = ncopen(ncfn, "w")
nc.title = "IEM Regridded CFS Member 1 Forecast %s" % (valid.year,)
nc.platform = "Grided Forecast"
nc.description = "IEM Regridded CFS on 0.125 degree grid"
nc.institution = "Iowa State University, Ames, IA, USA"
nc.source = "Iowa Environmental Mesonet"
nc.project_id = "IEM"
nc.realization = 1
nc.Conventions = "CF-1.0"
nc.contact = "Daryl Herzmann, akrherz@iastate.edu, 515-294-5978"
nc.history = ("%s Generated") % (
datetime.datetime.now().strftime("%d %B %Y"),
)
nc.comment = "No comment at this time"
# Setup Dimensions
nc.createDimension("lat", iemre.NY)
nc.createDimension("lon", iemre.NX)
days = iemre.daily_offset(valid.replace(month=12, day=31)) + 1
nc.createDimension("time", int(days))
# Setup Coordinate Variables
lat = nc.createVariable("lat", float, ("lat",))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.axis = "Y"
lat[:] = iemre.YAXIS
lon = nc.createVariable("lon", float, ("lon",))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.axis = "X"
lon[:] = iemre.XAXIS
tm = nc.createVariable("time", float, ("time",))
tm.units = "Days since %s-01-01 00:00:0.0" % (valid.year,)
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
tm[:] = np.arange(0, int(days))
high = nc.createVariable(
"high_tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
high.units = "K"
high.scale_factor = 0.01
high.long_name = "2m Air Temperature Daily High"
high.standard_name = "2m Air Temperature"
high.coordinates = "lon lat"
low = nc.createVariable(
"low_tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
low.units = "K"
low.scale_factor = 0.01
low.long_name = "2m Air Temperature Daily Low"
low.standard_name = "2m Air Temperature"
low.coordinates = "lon lat"
p01d = nc.createVariable(
"p01d", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
p01d.units = "mm"
p01d.scale_factor = 0.01
p01d.long_name = "Precipitation"
p01d.standard_name = "Precipitation"
p01d.coordinates = "lon lat"
p01d.description = "Precipitation accumulation for the day"
rsds = nc.createVariable(
"srad", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
rsds.units = "W m-2"
rsds.scale_factor = 0.01
rsds.long_name = "surface_downwelling_shortwave_flux_in_air"
rsds.standard_name = "surface_downwelling_shortwave_flux_in_air"
rsds.coordinates = "lon lat"
rsds.description = "Global Shortwave Irradiance"
nc.close()
nc = ncopen(ncfn, "a")
return nc
def finalize(nc):
"""Cleanup after our work."""
filename = nc.filepath()
# Close the netcdf file
nc.close()
# Rename it
newfilename = filename.replace("temp_", "")
LOG.debug("Renaming %s to %s", filename, newfilename)
os.rename(filename, newfilename)
def main(argv):
"""Go Main Go"""
if len(argv) == 4:
today = datetime.date(int(argv[1]), int(argv[2]), int(argv[3]))
else:
# Run for 12z yesterday
today = datetime.date.today() - datetime.timedelta(days=1)
LOG.debug("running for today=%s", today)
for hour in [0, 6, 12, 18]:
valid = utc(today.year, today.month, today.day, hour)
# Create netcdf file
nc = create_netcdf(valid)
# merge in the data
for gribname, vname in zip(
["dswsfc", "tmax", "tmin", "prate"],
["srad", "high_tmpk", "low_tmpk", "p01d"],
):
merge(nc, valid, gribname, vname)
# profit
finalize(nc)
if __name__ == "__main__":
main(sys.argv)
|
{
"content_hash": "e9d2176e81521924344bbb235d3353b9",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 31.788659793814432,
"alnum_prop": 0.604021404248419,
"repo_name": "akrherz/iem",
"id": "4fa74b896c9f1fc155bd0171cb0ca54cd6bd15d5",
"size": "6167",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/yieldfx/cfs2iemre_netcdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16912"
},
{
"name": "HTML",
"bytes": "1092923"
},
{
"name": "Hack",
"bytes": "7078"
},
{
"name": "JavaScript",
"bytes": "244253"
},
{
"name": "PHP",
"bytes": "3492474"
},
{
"name": "Python",
"bytes": "3279270"
},
{
"name": "Rich Text Format",
"bytes": "30075"
},
{
"name": "Shell",
"bytes": "72284"
}
],
"symlink_target": ""
}
|
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
import os
import requests
from beer_search_v2.models import Brewery, UntappdStyle, Country, AlcoholCategory, ContainerType, Product
from collections import OrderedDict
def get_main_display():
"""
Returns a list of dictionaries containing the information needed to display and filter
the main table on the index page.
"""
# First we gather/define the requisite information
jog_name = "Sérpöntunarlisti Járns og Glers"
beer = AlcoholCategory.objects.get(name="beer")
gift_box = ContainerType.objects.get(name="Gjafaaskja")
keg = ContainerType.objects.get(name="Kútur")
products = Product.objects.select_related(
"container",
"product_type",
"product_type__country",
"product_type__alcohol_category",
"product_type__untappd_info",
"product_type__untappd_info__brewery",
"product_type__untappd_info__brewery__country",
"product_type__untappd_info__style__simplifies_to"
).filter(
Q(product_type__alcohol_category=beer) | Q(product_type__untappd_info__isnull=False),
).exclude(
container=gift_box
).exclude(
container=keg
).order_by("product_type__alias")
# Then we curate it
type_dict = OrderedDict()
for product in products.all():
pid = product.product_type_id
if pid not in type_dict: # Initialize with all data common among all products of the same type
type_dict[pid] = {
"name": str(product.product_type),
"productId": pid,
"containers": [product.container.name],
"abv": product.product_type.abv,
"minVolume": product.volume,
"maxVolume": product.volume,
"minPrice": product.price,
"maxPrice": product.price,
"stores": [status["store"] for status in product.atvr_stock],
"available": product.available_in_atvr or product.available_in_jog,
"firstSeenAt": product.first_seen_at
}
if product.available_in_jog:
type_dict[pid]["stores"].append(jog_name)
if product.product_type.country:
type_dict[pid]["country"] = product.product_type.country.name
if product.product_type.untappd_info:
u_info = product.product_type.untappd_info
if u_info.style and u_info.style.simplifies_to:
type_dict[pid]["style"] = u_info.style.simplifies_to.name
type_dict[pid]["style_url"] = u_info.style.simplifies_to.get_absolute_url()
if u_info.brewery:
type_dict[pid]["brewery"] = str(u_info.brewery)
if "country" not in type_dict[pid]: # Country info is shaky, stored with great redundancy
if u_info.brewery.country:
type_dict[pid]["country"] = u_info.brewery.country.name
elif u_info.brewery.country_name:
type_dict[pid]["country"] = u_info.brewery.country_name
if u_info.rating:
type_dict[pid]["untappdRating"] = u_info.rating
# Fillers for those entries with no known information
if "country" not in type_dict[pid]:
type_dict[pid]["country"] = "?"
if "brewery" not in type_dict[pid]:
type_dict[pid]["brewery"] = "?"
if "style" not in type_dict[pid]:
type_dict[pid]["style"] = "?"
else:
type_dict[pid]["maxVolume"] = max(type_dict[pid]["maxVolume"], product.volume)
type_dict[pid]["minVolume"] = min(type_dict[pid]["minVolume"], product.volume)
type_dict[pid]["maxPrice"] = max(type_dict[pid]["maxPrice"], product.price)
type_dict[pid]["minPrice"] = min(type_dict[pid]["minPrice"], product.price)
if product.container.name not in type_dict[pid]["containers"]:
type_dict[pid]["containers"].append(product.container.name)
type_dict[pid]["stores"].extend([status["store"] for status in product.atvr_stock if
status["store"] not in type_dict[pid]["stores"]])
if product.available_in_jog and jog_name not in type_dict[pid]["stores"]:
type_dict[pid]["stores"].append(jog_name)
type_dict[pid]["available"] = type_dict[pid][
"available"] or product.available_in_atvr or product.available_in_jog
if type_dict[pid]["firstSeenAt"] and product.first_seen_at: # These are sometimes None...
type_dict[pid]["firstSeenAt"] = min(type_dict[pid]["firstSeenAt"], product.first_seen_at)
elif product.first_seen_at:
type_dict[pid]["firstSeenAt"] = product.first_seen_at
return [item for item in type_dict.values()]
def get_main_display_v3():
# TODO: Use the SQL in mainquery.sql to replace get_main_display
pass
def update_untappd_item(untappd_entity, verbose=True):
url = "https://api.untappd.com/v4/beer/info/{0}/".format(untappd_entity.untappd_id)
payload = {
"client_id": os.environ.get("UNTAPPD_CLIENT"),
"client_secret": os.environ.get("UNTAPPD_SECRET"),
"compact": "true"
}
json_data = requests.get(url, params=payload).json()
try:
assert json_data["meta"]["code"] == 200
except AssertionError:
if verbose:
print("Update of entity {} for {} did not complete successfully".format(
untappd_entity.untappd_id, untappd_entity
))
return
old_rating = untappd_entity.rating
new_rating = json_data["response"]["beer"]["rating_score"]
untappd_entity.rating = new_rating
if untappd_entity.style is None:
style_name = json_data["response"]["beer"]["beer_style"]
style = get_untappd_style_instance(style_name)
untappd_entity.style = style
if verbose:
print("Added style {0} to {1}.".format(style_name, untappd_entity.untappd_name))
if untappd_entity.brewery is None:
untappd_id = json_data["response"]["beer"]["brewery"]["brewery_id"]
untappd_brewery_name = json_data["response"]["beer"]["brewery"]["brewery_name"]
country_name = json_data["response"]["beer"]["brewery"]["country_name"]
brewery = get_brewery_instance(untappd_id, untappd_brewery_name, country_name)
untappd_entity.brewery = brewery
if verbose:
print("Added brewery {0} to {1}.".format(untappd_brewery_name, untappd_entity.untappd_name))
if not untappd_entity.untappd_name:
untappd_entity.untappd_name = json_data["response"]["beer"]["beer_name"]
if verbose:
print("Added untappd API name {} to entity {}.".format(
untappd_entity.untappd_name,
untappd_entity.untappd_id)
)
if not untappd_entity.logo_url:
untappd_entity.logo_url = json_data["response"]["beer"]["beer_label"]
if verbose:
print("Added logo to {}.".format(untappd_entity.untappd_name))
old_abv = untappd_entity.abv
new_abv = json_data["response"]["beer"]["beer_abv"]
untappd_entity.abv = new_abv
old_ibu = untappd_entity.ibu
new_ibu = json_data["response"]["beer"]["beer_ibu"]
untappd_entity.ibu = new_ibu
untappd_entity.save()
if verbose:
print("Successfully updated rating {0} from {1} to {2}".format(
untappd_entity.untappd_name, old_rating, new_rating)
)
if old_abv != new_abv:
print("Successfully updated abv of {} from {} to {}".format(
untappd_entity.untappd_name, old_abv, new_abv
))
if old_ibu != new_ibu:
print("Successfully updated ibu of {} from {} to {}".format(
untappd_entity.untappd_name, old_ibu, new_ibu
))
def get_untappd_style_instance(style_name, verbose=True):
try:
style = UntappdStyle.objects.get(name=style_name)
except ObjectDoesNotExist:
style = UntappdStyle()
style.name = style_name
style.save()
if verbose:
print("Created new style: {0}".format(style_name))
return style
def get_brewery_instance(untappd_id, brewery_name, brewery_country, verbose=True):
try:
brewery = Brewery.objects.get(untappd_id=untappd_id)
except ObjectDoesNotExist:
brewery = Brewery()
brewery.untappd_id = untappd_id
brewery.name = brewery_name
brewery.save()
if verbose:
print("Created new brewery: {0}".format(brewery_name))
return brewery
def get_country_instance(country_name):
if not country_name:
country_name = "Óþekkt"
try:
country = Country.objects.get(name__iexact=country_name)
except ObjectDoesNotExist:
country = Country()
country.name = country_name
country.save()
return country
def get_alcohol_category_instance(cat_name):
try:
category = AlcoholCategory.objects.get(name=cat_name)
except ObjectDoesNotExist:
category = AlcoholCategory()
category.name = cat_name
category.save()
return category
def get_container_instance(container_name):
try:
container = ContainerType.objects.get(name=container_name)
except ObjectDoesNotExist:
container = ContainerType()
container.name = container_name
container.save()
return container
|
{
"content_hash": "536986adcfe590427819c4a0f6cf96c0",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 115,
"avg_line_length": 40.525,
"alnum_prop": 0.5999383096853794,
"repo_name": "Ernir/bjorleitin",
"id": "cd25b830f78ed91e533f5966f31bac32aeaf002c",
"size": "9732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beer_search_v2/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2346"
},
{
"name": "HTML",
"bytes": "31637"
},
{
"name": "JavaScript",
"bytes": "12604"
},
{
"name": "Python",
"bytes": "98299"
},
{
"name": "Shell",
"bytes": "602"
}
],
"symlink_target": ""
}
|
import inspect
from pycket import values
from pycket.error import SchemeException
from rpython.rlib import jit, objectmodel, unroll
from rpython.rlib.objectmodel import specialize
class EndOfInput(Exception):
pass
class ArgParser(object):
_attrs_ = ['context', 'args', 'index']
_immutable_fields_ = ['context', 'args']
def __init__(self, context, args, start_at=0):
assert start_at >= 0
self.context = context
self.args = args
self.index = start_at
def __nonzero__(self):
return self.has_more()
def has_more(self):
return 0 <= self.index < len(self.args)
def next(self):
if not self.has_more():
raise EndOfInput
index = self.index
val = self.args[index]
return val
@specialize.arg(1)
def expect(self, *args):
val = self.next()
if validate_arg(val, *args):
self.index += 1
return val
raise SchemeException(
"%s: expected %s at argument %d got %s" %
(self.context, errorname(*args), self.index, val.tostring()))
@specialize.arg(1)
@jit.unroll_safe
def expect_many(self, *args):
length = len(self.args) - self.index
results = [None] * length
for i in range(length):
results[i] = self.expect(*args)
return results
def is_constant_class(cls):
return inspect.isclass(cls)
@specialize.arg(1)
def validate_arg(value, arg, *args):
if is_constant_class(arg):
if isinstance(value, arg):
return True
elif value is arg:
return True
return bool(args) and validate_arg(value, *args)
@specialize.arg(0)
def _errorname(arg, *args):
if is_constant_class(arg):
retval = arg.errorname
else:
retval = arg.tostring()
if not args:
return retval
else:
return retval + " " + _errorname(*args)
@specialize.arg(0)
def errorname(arg, *args):
if not args:
return _errorname(arg)
return "(or/c %s)" % _errorname(arg, *args)
from rpython.rtyper.extregistry import ExtRegistryEntry
class Entry(ExtRegistryEntry):
_about_ = is_constant_class
def compute_result_annotation(self, s_cls):
from rpython.annotator.model import SomeBool, SomePBC, SomeInstance
r = SomeBool()
assert s_cls.is_constant()
if isinstance(s_cls, SomePBC):
r.const = inspect.isclass(s_cls.const)
elif isinstance(s_cls, SomeInstance):
r.const = False
else:
assert False
return r
def specialize_call(self, hop):
from rpython.rtyper.lltypesystem import lltype
hop.exception_cannot_occur()
return hop.inputconst(lltype.Bool, hop.s_result.const)
|
{
"content_hash": "97303a1119ac980dc99db55ef22f9d9a",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 77,
"avg_line_length": 26.752380952380953,
"alnum_prop": 0.6023495906016376,
"repo_name": "samth/pycket",
"id": "ed3f8b9130d483b88e40342673d0ace40e3c286e",
"size": "2810",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pycket/argument_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "654"
},
{
"name": "Eagle",
"bytes": "1986"
},
{
"name": "KiCad",
"bytes": "76411"
},
{
"name": "Makefile",
"bytes": "2680"
},
{
"name": "Python",
"bytes": "1050245"
},
{
"name": "Racket",
"bytes": "694687"
},
{
"name": "Scheme",
"bytes": "215"
},
{
"name": "Shell",
"bytes": "8656"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.views.generic.base import TemplateView
from django.contrib import admin as default_admin
from .admin import admin, staff, user
urlpatterns = patterns('',
url(r'^admin/', include(default_admin.site.urls)),
url(r'^adminpanel/', include(admin.urls)),
url(r'^staffpanel/', include(staff.urls)),
url(r'^userpanel/', include(user.urls)),
#url(r'^', include('django.contrib.auth.urls')),
url(r'^$', TemplateView.as_view(template_name='index.html')),
)
|
{
"content_hash": "994b5e1f3c73401dbdbafc14e5804851",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 65,
"avg_line_length": 38.214285714285715,
"alnum_prop": 0.7009345794392523,
"repo_name": "barszczmm/django-wpadmin",
"id": "f0eb3504605bc781a3b6d42217580c4c6eb828cd",
"size": "535",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test_project/test_project/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51789"
},
{
"name": "HTML",
"bytes": "39814"
},
{
"name": "JavaScript",
"bytes": "5244"
},
{
"name": "Python",
"bytes": "45582"
},
{
"name": "Shell",
"bytes": "742"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from scimath.units.acceleration import *
|
{
"content_hash": "c8ae7d52ef858ac4f95f784045fda43f",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 40,
"avg_line_length": 40,
"alnum_prop": 0.8,
"repo_name": "enthought/etsproxy",
"id": "ed63c43de533f35fe8abd0a8921883b1a63acda8",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/units/acceleration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
from biicode.common.model.brl.brl_user import BRLUser
from biicode.common.utils.serializer import Serializer, DictDeserializer, SetDeserializer
from biicode.server.model.epoch.utc_datetime import UtcDatetime
from biicode.common.exception import InvalidNameException, ForbiddenException, BiiException
from biicode.server.exception import DuplicateBlockException
import hashlib
from biicode.server.model.permissions.permissions import Permissions
from biicode.common.model.brl.brl_block import BRLBlock
from biicode.common.model.id import ID
from biicode.server.utils.passlib_pbkdf2_sha512_wrapper import encrypt, verify
from biicode.server.model.social_account import SocialAccount
import time
from biicode.server.utils.encryption import AESEncryption
from biicode.server.conf import BII_AES_SECRET_KEY, BII_MAX_USER_WORKSPACE_SIZE
class User(object):
SERIAL_ID_KEY = '_id'
SERIAL_ENCRYPTED_PASSWORD = "p"
SERIAL_PASSWORD_TIMESTAMP = "pt"
# Profile data
SERIAL_FIRSTNAME = "f"
SERIAL_LASTNAME = "l"
SERIAL_COUNTRY = "c"
SERIAL_DESCRIPTION = "d"
SERIAL_EMAIL = "e"
SERIAL_VISIBLE_EMAIL = "v"
SERIAL_ALLOW_MAILING = "a"
SERIAL_ACTIVE = "u"
SERIAL_STAFF = "s"
SERIAL_JOINED_DATE = "j"
SERIAL_CONFIRMATION_DATE = "cd"
SERIAL_CONFIRMATION_TOKEN = "ct"
# Old user workspace fields
SERIAL_MOD_COUNTER = 'wc'
SERIAL_BLOCKS = 'wb'
SERIAL_HIVES = 'wh'
SERIAL_ADMINISTRATORS = 'wa'
SERIAL_NUMERIC_ID = 'n'
SERIAL_SOCIAL_ACCOUNTS = 'sa'
# Achievements
SERIAL_READ_API_COUNTER = 'ac1'
SERIAL_PUBLISH_COUNTER = 'ac2'
SERIAL_REUSE_COUNTER = 'ac3'
# Additional user profile fields
SERIAL_STREET1 = "ps1"
SERIAL_STREET2 = "ps2"
SERIAL_CITY = "pc"
SERIAL_POSTAL_CODE = "ppc"
SERIAL_REGION = "pr"
SERIAL_TAX_ID = "pti"
SERIAL_VAT = "pv"
# OAUTH CREDENTIALS
SERIAL_OAUTH_GITHUB_TOKEN = "oh"
SERIAL_OAUTH_GOOGLE_TOKEN = "og"
# MAX WORKSPACE SIZE
SERIAL_MAX_WORKSPACE_SIZE = "m"
EMAIL_REGEX = r"[^@]+@[^@]+\.[^@]+"
# Invited by
SERIAL_INVITED_BY = "in"
def __init__(self, brl_id):
self.ID = brl_id
self.password_timestamp = None
self._encrypted_password = None
# TODO: Confirmed email accounts? <= Only one at the moment, migrate active account
# Basic fields
self._email = None
self.firstname = None
self.lastname = None
self.country = None
self.description = None
self.visible_email = False
# Admin fields
self.staff = False
self.allow_mailing = True
# Activation fields
self.active = False
self.confirmation_token = None
self.joined_date = None
self.confirmation_date = None
# Workspace fields
self.block_counter = 0
self.blocks = {} # {BRLBlock => (set(TAGS), "description", bytes_size)}
self.administrators = Permissions()
# Dict of brl_user => Num grants (block, hive or administrators)
self.numeric_id = None
self.social_accounts = {}
# Achievements
self.read_api_counter = 0
self.publish_counter = 0
self.reuse_counter = 0
# Profile fields
self.street_1 = ""
self.street_2 = ""
self.city = ""
self.postal_code = ""
self.region = ""
self.tax_id = ""
self.vat = ""
# OAuth credentials
self.oauth_google_token = None
self.oauth_github_token = None
# Max workspace size
self.max_workspace_size = BII_MAX_USER_WORKSPACE_SIZE
# Invited by
self.invited_by = None
def delete_block(self, brl_block):
# we do a pop, just in case it doesn't work
self.blocks.pop(brl_block)
@property
def email(self):
return self._email
@email.setter
def email(self, theemail):
if theemail:
import re
if not re.match(User.EMAIL_REGEX, theemail):
raise InvalidNameException("Invalid email")
self._email = theemail
@property
def password(self):
return None
@password.setter
def password(self, new_plain_password):
if new_plain_password:
if len(new_plain_password) < 6: # TODO: Restrict with Regular expression?
raise InvalidNameException("Password must have at least 6 characters")
self._encrypted_password = encrypt(new_plain_password)
self.password_timestamp = time.time()
def valid_password(self, plain_password):
return verify(plain_password, self._encrypted_password)
@property
def gravatar_email_hash(self):
if self._email:
return hashlib.md5(self.email.lower()).hexdigest()
@property
def full_name(self):
return " ".join([self.firstname or "", self.lastname or ""])
# ######### OLD WORKSPACE METHODS ###########
def add_block(self, brl, tags=None, description=""):
'''Adds a block to the user workspace'''
tags = tags or set()
if brl.owner != self.ID:
raise ForbiddenException('Can not add not own block: %s %s' % (str(brl), self.ID))
if brl in self.blocks:
raise DuplicateBlockException('Block %s already exist for %s')
matching = [x for x in self.blocks if brl.lower() == x.lower()]
if matching:
raise BiiException("You're trying to publish block named %s. There is "
"already a block named %s among your blocks" % (brl, matching[0]))
# Add tags and description
self.blocks[brl] = [set(tags), description, 0] # 0 bytes
block_id = self.numeric_id + self.block_counter
self.block_counter += 1
return block_id
def add_block_tag(self, brl_block, tag):
self.blocks[brl_block][0].add(tag)
def remove_block_tag(self, brl_block, tag):
self.blocks[brl_block][0].remove(tag)
def get_block_tags(self, brl_block):
return self.blocks[brl_block][0]
def set_block_description(self, brl_block, new_description):
self.blocks[brl_block][1] = new_description
def get_block_description(self, brl_block):
return self.blocks[brl_block][1]
def add_block_size_bytes(self, brl_block, num_bytes):
self.blocks[brl_block][2] += num_bytes
def get_block_size_bytes(self, brl_block):
return self.blocks[brl_block][2]
@property
def blocks_bytes(self):
return sum(self.get_block_size_bytes(brl_block) for brl_block in self.blocks.keys())
def fill_user_oauth_token(self, provider, token):
if provider == "google":
self.oauth_google_token = token
elif provider == "github":
self.oauth_github_token = token
@property
def ga_client_id(self):
"""Analytics client id"""
aes_manager = AESEncryption(BII_AES_SECRET_KEY)
client_id = aes_manager.encrypt(self.ID)
return client_id
# END OLD WORKSPACE METHODS ###########
def serialize(self):
return Serializer().build(
(self.SERIAL_ID_KEY, self.ID),
(self.SERIAL_NUMERIC_ID, self.numeric_id),
(self.SERIAL_ENCRYPTED_PASSWORD, self._encrypted_password),
(self.SERIAL_PASSWORD_TIMESTAMP, self.password_timestamp),
(self.SERIAL_FIRSTNAME, self.firstname),
(self.SERIAL_LASTNAME, self.lastname),
(self.SERIAL_COUNTRY, self.country),
(self.SERIAL_DESCRIPTION, self.description),
(self.SERIAL_EMAIL, self.email),
(self.SERIAL_VISIBLE_EMAIL, self.visible_email),
(self.SERIAL_ALLOW_MAILING, self.allow_mailing),
(self.SERIAL_ACTIVE, self.active),
(self.SERIAL_STAFF, self.staff),
(self.SERIAL_JOINED_DATE, self.joined_date),
(self.SERIAL_CONFIRMATION_DATE, self.confirmation_date),
(self.SERIAL_CONFIRMATION_TOKEN, self.confirmation_token),
# old workspace fields
(self.SERIAL_MOD_COUNTER, self.block_counter),
(self.SERIAL_BLOCKS, self.blocks),
(self.SERIAL_ADMINISTRATORS, self.administrators),
(self.SERIAL_SOCIAL_ACCOUNTS, self.social_accounts),
(self.SERIAL_READ_API_COUNTER, self.read_api_counter),
(self.SERIAL_PUBLISH_COUNTER, self.publish_counter),
(self.SERIAL_REUSE_COUNTER, self.reuse_counter),
# Additional profile fields
(self.SERIAL_STREET1, self.street_1),
(self.SERIAL_STREET2, self.street_2),
(self.SERIAL_CITY, self.city),
(self.SERIAL_POSTAL_CODE, self.postal_code),
(self.SERIAL_REGION, self.region),
(self.SERIAL_TAX_ID, self.tax_id),
(self.SERIAL_VAT, self.vat),
(self.SERIAL_OAUTH_GITHUB_TOKEN, self.oauth_github_token),
(self.SERIAL_OAUTH_GOOGLE_TOKEN, self.oauth_google_token),
(self.SERIAL_MAX_WORKSPACE_SIZE, self.max_workspace_size),
# Invited by
(self.SERIAL_INVITED_BY, self.invited_by),
)
@staticmethod
def deserialize(doc):
brl = BRLUser(doc[User.SERIAL_ID_KEY])
user = User(brl)
user._encrypted_password = doc[User.SERIAL_ENCRYPTED_PASSWORD]
user.password_timestamp = doc.get(User.SERIAL_PASSWORD_TIMESTAMP, None)
if User.SERIAL_NUMERIC_ID in doc:
user.numeric_id = ID.deserialize(doc[User.SERIAL_NUMERIC_ID])
# Profile fields
user.firstname = doc.get(User.SERIAL_FIRSTNAME, None)
user.lastname = doc.get(User.SERIAL_LASTNAME, None)
user.country = doc.get(User.SERIAL_COUNTRY, None)
user.description = doc.get(User.SERIAL_DESCRIPTION, None)
user.email = doc.get(User.SERIAL_EMAIL, None)
user.visible_email = doc.get(User.SERIAL_VISIBLE_EMAIL, 0) == 1
user.allow_mailing = doc.get(User.SERIAL_ALLOW_MAILING, 0) == 1
user.active = doc.get(User.SERIAL_ACTIVE, 0) == 1
user.staff = doc.get(User.SERIAL_STAFF, 0) == 1
user.joined_date = UtcDatetime.deserialize(doc.get(User.SERIAL_JOINED_DATE, None))
user.confirmation_date = UtcDatetime.deserialize(doc.get(User.SERIAL_CONFIRMATION_DATE,
None))
user.confirmation_token = doc.get(User.SERIAL_CONFIRMATION_TOKEN, None)
# Old workspace methods
user.block_counter = doc.get(User.SERIAL_MOD_COUNTER, 0)
blocks_data = doc.get(User.SERIAL_BLOCKS, {})
user.blocks = DictDeserializer(BRLBlock, BlockMetaInfoDeserializer).deserialize(blocks_data)
user.administrators = Permissions.deserialize(doc.get(User.SERIAL_ADMINISTRATORS, {}))
social_accounts_doc = doc.get(User.SERIAL_SOCIAL_ACCOUNTS)
user.social_accounts = DictDeserializer(str, SocialAccount).deserialize(social_accounts_doc)
# Achievements
user.read_api_counter = doc.get(User.SERIAL_READ_API_COUNTER, 0)
user.publish_counter = doc.get(User.SERIAL_PUBLISH_COUNTER, 0)
user.reuse_counter = doc.get(User.SERIAL_REUSE_COUNTER, 0)
# Additional profile fields
user.street_1 = doc.get(User.SERIAL_STREET1, "")
user.street_2 = doc.get(User.SERIAL_STREET2, "")
user.city = doc.get(User.SERIAL_CITY, "")
user.postal_code = doc.get(User.SERIAL_POSTAL_CODE, "")
user.region = doc.get(User.SERIAL_REGION, "")
user.tax_id = doc.get(User.SERIAL_TAX_ID, "")
user.vat = doc.get(User.SERIAL_VAT, "")
# OAuth
user.oauth_github_token = doc.get(User.SERIAL_OAUTH_GITHUB_TOKEN, None)
user.oauth_google_token = doc.get(User.SERIAL_OAUTH_GOOGLE_TOKEN, None)
# Max workspace size, default BII_MAX_USER_WORKSPACE_SIZE
user.max_workspace_size = doc.get(User.SERIAL_MAX_WORKSPACE_SIZE,
BII_MAX_USER_WORKSPACE_SIZE)
# Invited by
user.invited_by = doc.get(User.SERIAL_INVITED_BY, None)
return user
class BlockMetaInfoDeserializer(object):
'''Deserializes the tuple of values of each brlblock metainfo.
(tags, description)'''
@staticmethod
def deserialize(data):
if len(data) == 2: # In case migration failed we insert 0 bytes to size
data.append(0)
return [SetDeserializer(unicode).deserialize(data[0]), unicode(data[1]), data[2]]
|
{
"content_hash": "0555e385eae91f9a8e0aca435694703e",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 100,
"avg_line_length": 38.54545454545455,
"alnum_prop": 0.6158018867924528,
"repo_name": "biicode/bii-server",
"id": "5aa227dfd2618e9e02000438161e646f9f92c702",
"size": "12720",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "model/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "400132"
}
],
"symlink_target": ""
}
|
import sys
import os
import serial
import time
from os.path import join
def send_data( c, data ):
c.read( 100 );
length = len( data )
c.write( "%d " % length )
print "Sending:%d bytes" % length
buffersize = 512
sent = 0
while sent < length:
print "S"
buffer = min( buffersize, length - sent )
print sent, sent+buffer
#import pdb;pdb.set_trace()
c.write( data[sent:sent+buffer] )
#if buffer == 0:
# print "No data sent. Cancelling."
# return
sent += buffer
print length, "|", sent
#t = ""
#while t == "":
t = c.read( 1 );
sys.stdout.write( t )
#time.sleep(0.01)
def install_binary( c, sourcepath ):
filename = os.path.basename( sourcepath )
targetpath = "C:\\sys\\bin\\" + filename
f = open( sourcepath,'rb' ); data = f.read(); f.close()
cmd = "put %s\n" % targetpath
print cmd
c.write( cmd )
send_data(c, data )
def install_resource( c, sourcepath ):
targetpath = "C:\\" + sourcepath
sourcepath = join( "GCCE", "LogMan", sourcepath )
f = open( sourcepath,'rb' ); data = f.read(); f.close()
cmd = "put %s\n" % targetpath
#print cmd
c.write( cmd )
send_data(c, data )
def install( c, source, target ):
sourcepath = join( "GCCE", "LogMan", source )
f = open( sourcepath,'rb' ); data = f.read(); f.close()
cmd = "put %s\n" % target
print cmd
c.write( cmd )
send_data(c, data )
def start():
# Program parameter definitions
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--port", dest="port",
help="Used COM port number",
default = None )
(options, args) = parser.parse_args()
portnumber = options.port
if portnumber is None:
portnumber = raw_input("Give serial port number: ")
port = "//./COM%s" % portnumber
print "Using port", port
c = serial.Serial( port, 115200, timeout = 1 )
#c.write( "cp c:\\data\\logman.exe c:\\sys\\bin\n" )
c.write( "put c:\\sys\\bin\\LogMan.exe\n")
install_binary( c, join( "GCCE", "LogMan", "sys", "bin", "LogMan.exe" ) )
install( c, join( "private", "10003a3f", "import", "apps", "LogManGui.rsc" ),
join( "C:\\", "resource", "apps", "LogManGui.rsc" ) )
#install_resource( c, join( "private", "10003a3f", "import", "apps", "LogManGui_reg.rsc" ) )
#f =
#c.write( "y\n" ) # Query
t = " "
while t != "":
t = c.read( 1 );
sys.stdout.write(t)#t.decode("utf-8") )
c.close( )
start()
|
{
"content_hash": "e81ec28e05111465e9e2dce9b55acc75",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 96,
"avg_line_length": 26.44859813084112,
"alnum_prop": 0.5095406360424029,
"repo_name": "SymbiSoft/logman-for-symbian",
"id": "bb6cc3096041801b6322c553212afcd161ee10da",
"size": "2830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "install.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3694"
},
{
"name": "C++",
"bytes": "156020"
},
{
"name": "Makefile",
"bytes": "604"
},
{
"name": "Pan",
"bytes": "383"
},
{
"name": "Python",
"bytes": "44197"
}
],
"symlink_target": ""
}
|
from unittest.mock import MagicMock
import pytest
from allauth.socialaccount.models import SocialAccount, SocialLogin
from pontoon.base.adapter import PontoonSocialAdapter
# We have to support customized adapter during the transition of accounts
# between providers
def _get_sociallogin(user, provider):
"""
Returns an ready sociallogin object for the given auth provider.
"""
socialaccount = SocialAccount(
user=user,
uid="1234",
provider=provider,
)
socialaccount.extra_data = {"email": user.email}
sociallogin = SocialLogin()
sociallogin.account = socialaccount
return sociallogin
@pytest.fixture
def social_adapter0(request, user_a):
log_mock = MagicMock()
adapter = PontoonSocialAdapter()
sociallogin = _get_sociallogin(user_a, "fxa")
return user_a, adapter, sociallogin, log_mock
@pytest.mark.django_db
def test_adapter_base_get_connect_normal_auth_account(social_adapter0):
user, adapter, sociallogin, log_mock = social_adapter0
log_mock.return_value = False
adapter.pre_social_login(
MagicMock(),
sociallogin,
)
assert sociallogin.account.pk
assert sociallogin.user == user
@pytest.mark.django_db
def test_adapter_base_connect_existing_account(social_adapter0):
user, adapter, sociallogin, log_mock = social_adapter0
adapter.pre_social_login(
MagicMock(),
sociallogin,
)
assert sociallogin.account.pk
assert sociallogin.user == user
|
{
"content_hash": "d3d8c0fcdc1609e0709fdc0e8543d88e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 73,
"avg_line_length": 25.559322033898304,
"alnum_prop": 0.7095490716180372,
"repo_name": "mozilla/pontoon",
"id": "77286696dd6e5bccfd7011e6dbc5667dde3ef57c",
"size": "1508",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pontoon/base/tests/test_adapter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "238788"
},
{
"name": "Dockerfile",
"bytes": "1772"
},
{
"name": "Fluent",
"bytes": "39684"
},
{
"name": "HTML",
"bytes": "181601"
},
{
"name": "JavaScript",
"bytes": "1163971"
},
{
"name": "Jinja",
"bytes": "1894"
},
{
"name": "Makefile",
"bytes": "5635"
},
{
"name": "Procfile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "1488341"
},
{
"name": "Shell",
"bytes": "6245"
},
{
"name": "TypeScript",
"bytes": "457667"
}
],
"symlink_target": ""
}
|
"""
A modification of updating_plot3.py.
Three of the plots are now oriented vertically, but the dataspace of all
6 plots is still linked. Panning along the X axis of a vertical plot
will move the Y axis of one of the horizontally-oriented plots, and vice
versa.
"""
# Major library imports
from numpy import arange
from scipy.special import jn
# Enthought library imports
from enable.api import Window
from enable.example_support import DemoFrame, demo_main
from traits.api import HasTraits
from pyface.timer.api import Timer
# Chaco imports
from chaco.api import create_line_plot, OverlayPlotContainer, ArrayDataSource
from chaco.tools.api import MoveTool, PanTool, ZoomTool
COLOR_PALETTE = ("mediumslateblue", "maroon", "darkgreen", "goldenrod",
"purple", "indianred")
PLOT_SIZE = 250
class AnimatedPlot(HasTraits):
def __init__(self, x, y, color="blue", bgcolor="white", orientation="h"):
self.y_values = y[:]
if type(x) == ArrayDataSource:
self.x_values = x.get_data()[:]
plot = create_line_plot((x, self.y_values), color=color,
bgcolor=bgcolor, add_grid=True,
add_axis=True, orientation=orientation)
else:
self.x_values = x[:]
plot = create_line_plot((self.x_values,self.y_values), color=color,
bgcolor=bgcolor, add_grid=True,
add_axis=True, orientation=orientation)
plot.resizable = ""
plot.bounds = [PLOT_SIZE, PLOT_SIZE]
plot.unified_draw = True
plot.tools.append(PanTool(plot, drag_button="right"))
plot.tools.append(MoveTool(plot))
plot.overlays.append(ZoomTool(plot, tool_mode="box", always_on=False))
self.plot = plot
self.numpoints = len(self.x_values)
self.current_index = self.numpoints/2
self.increment = 2
def timer_tick(self):
if self.current_index <= self.numpoints/3:
self.increment = 2
elif self.current_index == self.numpoints:
self.increment = -2
self.current_index += self.increment
if self.current_index > self.numpoints:
self.current_index = self.numpoints
self.plot.index.set_data(self.x_values[:self.current_index])
self.plot.value.set_data(self.y_values[:self.current_index])
self.plot.request_redraw()
class PlotFrame(DemoFrame):
def _create_window(self):
numpoints = 50
low = -5
high = 15.0
x = arange(low, high, (high-low)/numpoints)
container = OverlayPlotContainer(bgcolor="lightgray")
common_index = None
index_range = None
value_range = None
self.animated_plots = []
for i, color in enumerate(COLOR_PALETTE):
if not common_index:
animated_plot = AnimatedPlot(x, jn(i,x), color)
plot = animated_plot.plot
common_index = plot.index
index_range = plot.index_mapper.range
value_range = plot.value_mapper.range
else:
if i % 2 == 1:
orientation = "v"
else:
orientation = "h"
animated_plot = AnimatedPlot(common_index, jn(i,x), color,
orientation=orientation)
plot = animated_plot.plot
plot.index_mapper.range = index_range
plot.value_mapper.range = value_range
container.add(plot)
self.animated_plots.append(animated_plot)
for i, a_plot in enumerate(self.animated_plots):
a_plot.plot.position = [50 + (i%3)*(PLOT_SIZE+50),
50 + (i//3)*(PLOT_SIZE+50)]
self.timer = Timer(100.0, self.onTimer)
self.container = container
return Window(self, -1, component=container)
def onTimer(self, *args):
for plot in self.animated_plots:
plot.timer_tick()
return
if __name__ == "__main__":
# Save demo so that it doesn't get garbage collected when run within
# existing event loop (i.e. from ipython).
demo = demo_main(PlotFrame, size=(950, 650), title="Updating line plot")
|
{
"content_hash": "32a29b771db7bbbd33a308f49f85093e",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 79,
"avg_line_length": 35.60655737704918,
"alnum_prop": 0.5844843462246777,
"repo_name": "burnpanck/chaco",
"id": "30c64d89795c45064948c46f48cdf4076dad415a",
"size": "4366",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/demo/updating_plot/updating_plot4.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57089"
},
{
"name": "C++",
"bytes": "9881"
},
{
"name": "Gnuplot",
"bytes": "611"
},
{
"name": "Python",
"bytes": "1761203"
}
],
"symlink_target": ""
}
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from rpcore.pluginbase.base_plugin import BasePlugin
from .volumetrics_stage import VolumetricsStage
class Plugin(BasePlugin):
name = "Volumetric Lighting"
author = "tobspr <tobias.springer1@gmail.com>"
description = ("This plugins adds support for volumetric lighting")
version = "0.1 alpha (!)"
required_plugins = ("pssm",)
def on_stage_setup(self):
self.stage = self.create_stage(VolumetricsStage)
self.stage.required_inputs.append("PSSMSceneSunShadowMVP")
self.stage.required_pipes.append("PSSMSceneSunShadowMapPCF")
self.stage.enable_volumetric_shadows = self.get_setting("enable_volumetric_shadows")
|
{
"content_hash": "74434811a204aacb43572df7ab6e9e3c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 92,
"avg_line_length": 40.40909090909091,
"alnum_prop": 0.7705286839145107,
"repo_name": "eswartz/RenderPipeline",
"id": "bd86761d8fa0362083e3314e3d86204ad645344e",
"size": "1778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpplugins/volumetrics/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1241"
},
{
"name": "C",
"bytes": "21397"
},
{
"name": "C++",
"bytes": "160537"
},
{
"name": "GLSL",
"bytes": "712004"
},
{
"name": "Groff",
"bytes": "114"
},
{
"name": "Python",
"bytes": "1374140"
}
],
"symlink_target": ""
}
|
import argparse
import io
import os
import pstats
import sys
from collections import Counter
from itertools import chain
def get_lines(dir_path):
rval = []
for name in sorted(os.listdir(dir_path)):
path = os.path.join(dir_path, name)
if not os.path.isdir(path):
with io.open(path, "rt") as f:
for line in f:
rval.append(line.rstrip())
return rval
def check_output(items, exp_items):
if len(items) != len(exp_items):
raise RuntimeError("n. output items = %d (expected: %d)" % (
len(items), len(exp_items)
))
for i, (it, exp_it) in enumerate(zip(items, exp_items)):
if it != exp_it:
raise RuntimeError("wrong output item #%d: %r (expected: %r)" % (
i, it, exp_it
))
def check_counters(counter, exp_counter):
return check_output(sorted(counter.items()), sorted(exp_counter.items()))
def word_count(lines):
return Counter(chain(*(_.split() for _ in lines)))
def check_map_only(in_dir, out_dir):
uc_lines = [_.upper() for _ in get_lines(in_dir)]
out_values = [_.split("\t", 1)[1] for _ in get_lines(out_dir)]
check_output(out_values, uc_lines)
def check_map_reduce(in_dir, out_dir):
wc = word_count(get_lines(in_dir))
out_pairs = (_.split("\t", 1) for _ in get_lines(out_dir))
out_wc = {k: int(v) for k, v in out_pairs}
check_counters(out_wc, wc)
def check_pstats(pstats_dir):
pstats_names = os.listdir(pstats_dir)
try:
bn = pstats_names[0]
except IndexError:
raise RuntimeError("%r is empty" % (pstats_dir,))
pstats.Stats(os.path.join(pstats_dir, bn))
CHECKS = {
"map_only_java_writer": check_map_only,
"map_only_python_writer": check_map_only,
"map_reduce_combiner": check_map_reduce,
"map_reduce_java_rw": check_map_reduce,
"map_reduce_java_rw_pstats": check_map_reduce,
"map_reduce_python_partitioner": check_map_reduce,
"map_reduce_python_reader": check_map_reduce,
"map_reduce_python_writer": check_map_reduce,
"map_reduce_raw_io": check_map_reduce,
"map_reduce_slow_java_rw": check_map_reduce,
"map_reduce_slow_python_rw": check_map_reduce,
}
if __name__ == "__main__":
choices = sorted(CHECKS)
parser = argparse.ArgumentParser()
parser.add_argument("name", metavar="NAME", choices=choices,
help="one of: %s" % "; ".join(choices))
parser.add_argument("mr_in", metavar="IN_DIR", help="MapReduce in dir")
parser.add_argument("mr_out", metavar="OUT_DIR", help="MapReduce out dir")
args = parser.parse_args(sys.argv[1:])
check = CHECKS[args.name]
check(args.mr_in, args.mr_out)
if "pstats" in args.name:
check_pstats("%s.stats" % args.mr_out)
sys.stdout.write("OK\n")
|
{
"content_hash": "a7e4f4dedc077d4239f37a33dac5b00b",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 31.42222222222222,
"alnum_prop": 0.6113861386138614,
"repo_name": "simleo/pydoop",
"id": "3ce32d7d894a48cd1a6cfd4d1760b396e5e752ae",
"size": "3460",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "int_test/mapred_submitter/check.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "202110"
},
{
"name": "C++",
"bytes": "101371"
},
{
"name": "Dockerfile",
"bytes": "9590"
},
{
"name": "Emacs Lisp",
"bytes": "153"
},
{
"name": "Java",
"bytes": "177920"
},
{
"name": "Python",
"bytes": "400609"
},
{
"name": "Shell",
"bytes": "29222"
}
],
"symlink_target": ""
}
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy import log
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip\
as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class JustVitaminsSpider(BaseSpider):
name = 'justvitamins.co.uk-merckgroup'
allowed_domains = ['www.justvitamins.co.uk', 'justvitamins.co.uk']
start_urls = ('http://www.justvitamins.co.uk/A-Z-Product-Listing.aspx',)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# getting product links from A-Z product list
links = hxs.select('//div[@class="Product"]/a/@href').extract()
for prod_url in links:
url = urljoin_rfc(get_base_url(response), prod_url)
yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
name = hxs.select('//td[@class="ProductDetails"]/h1/text()').extract()
if name:
name = name[0].strip()
url = response.url
url = urljoin_rfc(get_base_url(response), url)
items = hxs.select('//div[@class="Item"]')
for item in items:
loader = ProductLoader(item=Product(), selector=item)
loader.add_value('url', url)
#loader.add_value('name', name[0])
sku = ''.join(item.select('./text()').extract())
n = name
if sku:
n += ' ' + sku.strip()
loader.add_value('name', n)
loader.add_xpath('price', './/span[@class="price"]/text()')
loader.add_xpath('price', './div[@class="price"]/span/text()')
yield loader.load_item()
|
{
"content_hash": "1f5a0ba02994f37fc7750fcef912aa8f",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 33.76119402985075,
"alnum_prop": 0.5972590627763041,
"repo_name": "ddy88958620/lib",
"id": "a4cd1db499dafd42dbb1e67fa7c5bbc8349b116e",
"size": "2262",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/scrapy/merckgroup/justvitamins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from twitterpandas import TwitterPandas
from examples.keys import TWITTER_OAUTH_SECRET, TWITTER_OAUTH_TOKEN, TWITTER_CONSUMER_SECRET, TWITTER_CONSUMER_KEY
__author__ = 'willmcginnis'
def friendship_option():
# get our own user id
user_id = tp.api_id
# use it to find all of our own friends (people we follow)
df = tp.followers_friendships(id_=user_id, rich=True)
total_followers = df.shape[0]
# filter the df down to only those who don't follow us back
df = df['source_follows_target' == False]
# print out the info:
print('I don\'t follow a total of %d of those who follow me on twitter.' % (df.shape[0], ))
print('...that\'s about %4.2f%% of all of my followers.\n' % ((float(df.shape[0]) / total_followers) * 100, ))
print(df['target_user_screen_name'].values.tolist())
def user_method_option():
# get our own user id
user_id = tp.api_id
# use it to find all of our own friends (people we follow)
df = tp.followers(id_=user_id)
total_followers = df.shape[0]
# filter the df down to only those who don't follow us back
df = df['following' == False]
# print out the info:
print('I don\'t follow a total of %d of those who follow me on twitter.' % (df.shape[0], ))
print('...that\'s about %4.2f%% of all of my followers.\n' % ((float(df.shape[0]) / total_followers) * 100, ))
print(df['screen_name'].values.tolist())
if __name__ == '__main__':
# create a twitter pandas client object
tp = TwitterPandas(
TWITTER_OAUTH_TOKEN,
TWITTER_OAUTH_SECRET,
TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET
)
friendship_option()
# user_method_option()
|
{
"content_hash": "9bf35e356a58c142e4c0930b97d98103",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 114,
"avg_line_length": 33.86,
"alnum_prop": 0.6414648552864737,
"repo_name": "wdm0006/twitter-pandas",
"id": "3bdcc05503bfcb3a97465dad14bc6fbb472d0e73",
"size": "1693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/analysis/followers_who_arent_friends.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "44717"
},
{
"name": "Shell",
"bytes": "449"
}
],
"symlink_target": ""
}
|
"""
Tests for ``admin.release``.
"""
import json
import os
from gzip import GzipFile
from StringIO import StringIO
import tempfile
from textwrap import dedent
from unittest import skipUnless
from setuptools import __version__ as setuptools_version
from effect import sync_perform, ComposedDispatcher, base_dispatcher
from git import GitCommandError, Repo
from requests.exceptions import HTTPError
from twisted.python.filepath import FilePath
from twisted.python.procutils import which
from twisted.python.usage import UsageError
from twisted.trial.unittest import SynchronousTestCase
from .. import release
from ..release import (
upload_python_packages, upload_packages, update_repo,
publish_docs, Environments,
DocumentationRelease, DOCUMENTATION_CONFIGURATIONS, NotTagged, NotARelease,
calculate_base_branch, create_release_branch,
CreateReleaseBranchOptions, BranchExists, TagExists,
MissingPreRelease, NoPreRelease,
UploadOptions, create_pip_index, upload_pip_index,
IncorrectSetuptoolsVersion,
publish_homebrew_recipe, PushFailed,
publish_vagrant_metadata
)
from ..packaging import Distribution
from ..aws import FakeAWS, CreateCloudFrontInvalidation
from ..yum import FakeYum, yum_dispatcher
from hashlib import sha256
FLOCKER_PATH = FilePath(__file__).parent().parent().parent()
def hard_linking_possible():
"""
Return True if hard linking is possible in the current directory, else
return False.
"""
scratch_directory = FilePath(tempfile.mkdtemp())
file = scratch_directory.child('src')
file.touch()
try:
os.link(file.path, scratch_directory.child('dst').path)
return True
except:
return False
finally:
scratch_directory.remove()
class PublishDocsTests(SynchronousTestCase):
"""
Tests for :func:``publish_docs``.
"""
def publish_docs(self, aws,
flocker_version, doc_version, environment):
"""
Call :func:``publish_docs``, interacting with a fake AWS.
:param FakeAWS aws: Fake AWS to interact with.
:param flocker_version: See :py:func:`publish_docs`.
:param doc_version: See :py:func:`publish_docs`.
:param environment: See :py:func:`environment`.
"""
sync_perform(
ComposedDispatcher([aws.get_dispatcher(), base_dispatcher]),
publish_docs(flocker_version, doc_version,
environment=environment))
def test_copies_documentation(self):
"""
Calling :func:`publish_docs` copies documentation from
``s3://clusterhq-dev-docs/<flocker_version>/`` to
``s3://clusterhq-staging-docs/en/<doc_version>/``.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.0-444-gf05215b/index.html': 'index-content',
'0.3.0-444-gf05215b/sub/index.html': 'sub-index-content',
'0.3.0-444-gf05215b/other.html': 'other-content',
'0.3.0-392-gd50b558/index.html': 'bad-index',
'0.3.0-392-gd50b558/sub/index.html': 'bad-sub-index',
'0.3.0-392-gd50b558/other.html': 'bad-other',
},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.s3_buckets['clusterhq-staging-docs'], {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': 'index-content',
'en/0.3.1/sub/index.html': 'sub-index-content',
'en/0.3.1/other.html': 'other-content',
})
def test_copies_documentation_production(self):
"""
Calling :func:`publish_docs` in production copies documentation from
``s3://clusterhq-dev-docs/<flocker_version>/`` to
``s3://clusterhq-docs/en/<doc_version>/``.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.1/index.html': 'index-content',
'0.3.1/sub/index.html': 'sub-index-content',
'0.3.1/other.html': 'other-content',
'0.3.0-392-gd50b558/index.html': 'bad-index',
'0.3.0-392-gd50b558/sub/index.html': 'bad-sub-index',
'0.3.0-392-gd50b558/other.html': 'bad-other',
},
})
self.publish_docs(aws, '0.3.1', '0.3.1',
environment=Environments.PRODUCTION)
self.assertEqual(
aws.s3_buckets['clusterhq-docs'], {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': 'index-content',
'en/0.3.1/sub/index.html': 'sub-index-content',
'en/0.3.1/other.html': 'other-content',
})
def test_deletes_removed_documentation(self):
"""
Calling :func:`publish_docs` replaces documentation from
``s3://clusterhq-staging-docs/en/<doc_version>/``.
with documentation from ``s3://clusterhq-dev-docs/<flocker_version>/``.
In particular, files with changed content are updated, and removed
files are deleted.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': 'old-index-content',
'en/0.3.1/sub/index.html': 'old-sub-index-content',
'en/0.3.1/other.html': 'other-content',
},
'clusterhq-dev-docs': {
'0.3.0-444-gf05215b/index.html': 'index-content',
'0.3.0-444-gf05215b/sub/index.html': 'sub-index-content',
},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.s3_buckets['clusterhq-staging-docs'], {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': 'index-content',
'en/0.3.1/sub/index.html': 'sub-index-content',
})
def test_updates_redirects(self):
"""
Calling :func:`publish_docs` with a release version updates the
redirect for ``en/latest/*`` to point at ``en/<doc_version>/*``. Any
other redirects are left untouched.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
'en/devel/': 'en/0.3.1.dev4/',
},
},
s3_buckets={
'clusterhq-staging-docs': {},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.routing_rules, {
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.1/',
'en/devel/': 'en/0.3.1.dev4/',
},
})
def test_updates_redirects_devel(self):
"""
Calling :func:`publish_docs` for a development version updates the
redirect for ``en/devel/*`` to point at ``en/<doc_version>/*``. Any
other redirects are left untouched.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
'en/devel/': 'en/0.3.1dev4/',
},
},
s3_buckets={
'clusterhq-staging-docs': {},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0-444-gf01215b', '0.3.1dev5',
environment=Environments.STAGING)
self.assertEqual(
aws.routing_rules, {
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
'en/devel/': 'en/0.3.1dev5/',
},
})
def test_updates_redirects_production(self):
"""
Calling :func:`publish_docs` with a release or documentation version
and in production updates the redirect for the
``clusterhq-docs`` S3 bucket.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/latest/': 'en/0.3.0/',
'en/devel/': 'en/0.3.1.dev4/',
},
},
s3_buckets={
'clusterhq-docs': {},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.1', '0.3.1',
environment=Environments.PRODUCTION)
self.assertEqual(
aws.routing_rules, {
'clusterhq-docs': {
'en/latest/': 'en/0.3.1/',
'en/devel/': 'en/0.3.1.dev4/',
},
})
def test_creates_cloudfront_invalidation_new_files(self):
"""
Calling :func:`publish_docs` with a release or documentation version
creates an invalidation for
- en/latest/
- en/<doc_version>/
each for every path in the new documentation for <doc_version>.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': '',
'en/0.3.1/sub/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.0-444-gf05215b/index.html': '',
'0.3.0-444-gf05215b/sub/index.html': '',
'0.3.0-444-gf05215b/sub/other.html': '',
},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/latest/',
'en/latest/index.html',
'en/latest/sub/',
'en/latest/sub/index.html',
'en/latest/sub/other.html',
'en/0.3.1/',
'en/0.3.1/index.html',
'en/0.3.1/sub/',
'en/0.3.1/sub/index.html',
'en/0.3.1/sub/other.html',
}),
])
def test_creates_cloudfront_invalidation_trailing_index(self):
"""
Calling :func:`publish_docs` with a release or documentation version
doesn't creates an invalidation for files that end in ``index.html``.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.0-444-gf05215b/sub_index.html': '',
},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/latest/',
'en/latest/sub_index.html',
'en/0.3.1/',
'en/0.3.1/sub_index.html',
}),
])
def test_creates_cloudfront_invalidation_removed_files(self):
"""
Calling :func:`publish_docs` with a release or documentation version
creates an invalidation for
- en/latest/
- en/<doc_version>/
each for every path in the old documentation for <doc_version>.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': '',
'en/0.3.1/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/latest/',
'en/latest/index.html',
'en/latest/sub/',
'en/latest/sub/index.html',
'en/0.3.1/',
'en/0.3.1/index.html',
'en/0.3.1/sub/',
'en/0.3.1/sub/index.html',
}),
])
def test_creates_cloudfront_invalidation_previous_version(self):
"""
Calling :func:`publish_docs` with a release or documentation version
creates an invalidation for
- en/latest/
- en/<doc_version>/
each for every path in the documentation for version that was
previously `en/latest/`.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.0/index.html': '',
'en/0.3.0/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/latest/',
'en/latest/index.html',
'en/latest/sub/',
'en/latest/sub/index.html',
'en/0.3.1/',
'en/0.3.1/index.html',
'en/0.3.1/sub/',
'en/0.3.1/sub/index.html',
}),
])
def test_creates_cloudfront_invalidation_devel_new_files(self):
"""
Calling :func:`publish_docs` with a development version creates an
invalidation for
- en/devel/
- en/<doc_version>/
each for every path in the new documentation for <doc_version>.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/devel/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/devel/index.html': '',
'en/0.3.1dev1/index.html': '',
'en/0.3.1dev1/sub/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.0-444-gf05215b/index.html': '',
'0.3.0-444-gf05215b/sub/index.html': '',
'0.3.0-444-gf05215b/sub/other.html': '',
},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1dev1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/devel/',
'en/devel/index.html',
'en/devel/sub/',
'en/devel/sub/index.html',
'en/devel/sub/other.html',
'en/0.3.1dev1/',
'en/0.3.1dev1/index.html',
'en/0.3.1dev1/sub/',
'en/0.3.1dev1/sub/index.html',
'en/0.3.1dev1/sub/other.html',
}),
])
def test_creates_cloudfront_invalidation_devel_removed_files(self):
"""
Calling :func:`publish_docs` with a development version creates an
invalidation for
- en/devel/
- en/<doc_version>/
each for every path in the old documentation for <doc_version>.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/devel/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/devel/index.html': '',
'en/0.3.1dev1/index.html': '',
'en/0.3.1dev1/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1dev1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/devel/',
'en/devel/index.html',
'en/devel/sub/',
'en/devel/sub/index.html',
'en/0.3.1dev1/',
'en/0.3.1dev1/index.html',
'en/0.3.1dev1/sub/',
'en/0.3.1dev1/sub/index.html',
}),
])
def test_creates_cloudfront_invalidation_devel_previous_version(self):
"""
Calling :func:`publish_docs` with a development version creates an
invalidation for
- en/devel/
- en/<doc_version>/
each for every path in the documentation for version that was
previously `en/devel/`.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/devel/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/devel/index.html': '',
'en/0.3.0/index.html': '',
'en/0.3.0/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0-444-gf05215b', '0.3.1dev1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/devel/',
'en/devel/index.html',
'en/devel/sub/',
'en/devel/sub/index.html',
'en/0.3.1dev1/',
'en/0.3.1dev1/index.html',
'en/0.3.1dev1/sub/',
'en/0.3.1dev1/sub/index.html',
}),
])
def test_creates_cloudfront_invalidation_production(self):
"""
Calling :func:`publish_docs` in production creates an invalidation for
``docs.clusterhq.com``.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': '',
'en/0.3.1/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.1', '0.3.1',
environment=Environments.PRODUCTION)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.clusterhq.com',
paths={
'en/latest/',
'en/latest/index.html',
'en/latest/sub/',
'en/latest/sub/index.html',
'en/0.3.1/',
'en/0.3.1/index.html',
'en/0.3.1/sub/',
'en/0.3.1/sub/index.html',
}),
])
def test_production_gets_tagged_version(self):
"""
Trying to publish to production, when the version being pushed isn't
tagged raises an exception.
"""
aws = FakeAWS(routing_rules={}, s3_buckets={})
self.assertRaises(
NotTagged,
self.publish_docs,
aws, '0.3.0-444-gf05215b', '0.3.1dev1',
environment=Environments.PRODUCTION)
def test_publish_to_doc_version(self):
"""
Trying to publish to a documentation version in a staging environment
publishes to to the version being updated.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': '',
},
},
s3_buckets={
'clusterhq-staging-docs': {},
'clusterhq-dev-docs': {},
})
self.publish_docs(
aws, '0.3.1-444-gf05215b', '0.3.1+doc1',
environment=Environments.STAGING)
self.assertEqual(
aws.routing_rules, {
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.1/',
},
})
def test_production_can_publish_doc_version(self):
"""
Publishing a documentation version to the version of the latest full
release in production succeeds.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-docs': {},
'clusterhq-dev-docs': {},
})
# Does not raise:
self.publish_docs(
aws, '0.3.1+doc1', '0.3.1', environment=Environments.PRODUCTION)
def test_production_can_publish_prerelease(self):
"""
Publishing a pre-release succeeds.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/devel/': 'en/0.3.1.dev4/',
},
},
s3_buckets={
'clusterhq-docs': {},
'clusterhq-dev-docs': {},
})
# Does not raise:
self.publish_docs(
aws, '0.3.2pre1', '0.3.2pre1', environment=Environments.PRODUCTION)
def test_publish_non_release_fails(self):
"""
Trying to publish to version that isn't a release fails.
"""
aws = FakeAWS(routing_rules={}, s3_buckets={})
self.assertRaises(
NotARelease,
self.publish_docs,
aws, '0.3.0-444-gf05215b', '0.3.0-444-gf05215b',
environment=Environments.STAGING)
def assert_error_key_update(self, doc_version, environment, should_update):
"""
Call ``publish_docs`` and assert that only the expected buckets have an
updated error_key property.
:param unicode doc_version: The version of the documentation that is
being published.
:param NamedConstant environment: One of the ``NamedConstants`` in
``Environments``.
:param bool should_update: A flag indicating whether the error_key for
the bucket associated with ``environment`` is expected to be
updated.
:raises: ``FailTest`` if an error_key in any of the S3 buckets has been
updated unexpectedly.
"""
# Get a set of all target S3 buckets.
bucket_names = set()
for e in Environments.iterconstants():
bucket_names.add(
DOCUMENTATION_CONFIGURATIONS[e].documentation_bucket
)
# Pretend that both devel and latest aliases are currently pointing to
# an older version.
empty_routes = {
'en/devel/': 'en/0.0.0/',
'en/latest/': 'en/0.0.0/',
}
# In all the S3 buckets.
empty_routing_rules = {
bucket_name: empty_routes.copy()
for bucket_name in bucket_names
}
# And that all the buckets themselves are empty.
empty_buckets = {bucket_name: {} for bucket_name in bucket_names}
# Including the dev bucket
empty_buckets['clusterhq-dev-docs'] = {}
# And that all the buckets have an empty error_key
empty_error_keys = {bucket_name: b'' for bucket_name in bucket_names}
aws = FakeAWS(
routing_rules=empty_routing_rules,
s3_buckets=empty_buckets,
error_key=empty_error_keys
)
# The value of any updated error_key will include the version that's
# being published.
expected_error_path = 'en/{}/error_pages/404.html'.format(doc_version)
expected_updated_bucket = (
DOCUMENTATION_CONFIGURATIONS[environment].documentation_bucket
)
# Grab a copy of the current error_key before it gets mutated.
expected_error_keys = aws.error_key.copy()
if should_update:
# And if an error_key is expected to be updated we expect it to be
# for the bucket corresponding to the environment that we're
# publishing to.
expected_error_keys[expected_updated_bucket] = expected_error_path
self.publish_docs(
aws,
flocker_version=doc_version,
doc_version=doc_version,
environment=environment
)
self.assertEqual(expected_error_keys, aws.error_key)
def test_error_key_dev_staging(self):
"""
Publishing documentation for a development release to the staging
bucket, updates the error_key in that bucket only.
"""
self.assert_error_key_update(
doc_version='0.4.1dev1',
environment=Environments.STAGING,
should_update=True
)
def test_error_key_dev_production(self):
"""
Publishing documentation for a development release to the production
bucket, does not update the error_key in any of the buckets.
"""
self.assert_error_key_update(
doc_version='0.4.1dev1',
environment=Environments.PRODUCTION,
should_update=False
)
def test_error_key_pre_staging(self):
"""
Publishing documentation for a pre-release to the staging
bucket, updates the error_key in that bucket only.
"""
self.assert_error_key_update(
doc_version='0.4.1pre1',
environment=Environments.STAGING,
should_update=True
)
def test_error_key_pre_production(self):
"""
Publishing documentation for a pre-release to the production
bucket, does not update the error_key in any of the buckets.
"""
self.assert_error_key_update(
doc_version='0.4.1pre1',
environment=Environments.PRODUCTION,
should_update=False
)
def test_error_key_marketing_staging(self):
"""
Publishing documentation for a marketing release to the staging
bucket, updates the error_key in that bucket.
"""
self.assert_error_key_update(
doc_version='0.4.1',
environment=Environments.STAGING,
should_update=True
)
def test_error_key_marketing_production(self):
"""
Publishing documentation for a marketing release to the production
bucket, updates the error_key in that bucket.
"""
self.assert_error_key_update(
doc_version='0.4.1',
environment=Environments.PRODUCTION,
should_update=True
)
class UpdateRepoTests(SynchronousTestCase):
"""
Tests for :func:``update_repo``.
"""
def setUp(self):
pass
self.target_bucket = 'test-target-bucket'
self.target_key = 'test/target/key'
self.package_directory = FilePath(self.mktemp())
self.packages = ['clusterhq-flocker-cli', 'clusterhq-flocker-node']
def update_repo(self, aws, yum,
package_directory, target_bucket, target_key, source_repo,
packages, flocker_version, distribution):
"""
Call :func:``update_repo``, interacting with a fake AWS and yum
utilities.
:param FakeAWS aws: Fake AWS to interact with.
:param FakeYum yum: Fake yum utilities to interact with.
See :py:func:`update_repo` for other parameter documentation.
"""
dispatchers = [aws.get_dispatcher(), yum.get_dispatcher(),
base_dispatcher]
sync_perform(
ComposedDispatcher(dispatchers),
update_repo(
package_directory=package_directory,
target_bucket=target_bucket,
target_key=target_key,
source_repo=source_repo,
packages=packages,
flocker_version=flocker_version,
distribution=distribution,
)
)
def test_fake_rpm(self):
"""
Calling :func:`update_repo` downloads the new RPMs, creates the
metadata, and uploads it to S3.
- Existing packages on S3 are preserved in the metadata.
- Other packages on the buildserver are not downloaded.
- Existing metadata files are left untouched.
"""
existing_s3_keys = {
os.path.join(self.target_key, 'existing_package.rpm'): '',
os.path.join(self.target_key,
'clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm'):
'existing-content-to-be-replaced', # noqa
os.path.join(self.target_key, 'repodata', 'repomod.xml'):
'<oldhash>-metadata.xml',
os.path.join(self.target_key, 'repodata',
'<oldhash>-metadata.xml'):
'metadata for: existing_package.rpm',
}
# Copy before passing to FakeAWS
expected_keys = existing_s3_keys.copy()
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: existing_s3_keys,
},
)
unspecified_package = 'unspecified-package-0.3.3-0.dev.7.noarch.rpm'
repo_contents = {
'clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm': 'cli-package',
'clusterhq-flocker-node-0.3.3-0.dev.7.noarch.rpm': 'node-package',
unspecified_package: 'unspecified-package-content',
}
self.update_repo(
aws=aws,
yum=FakeYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=create_fake_repository(self, files=repo_contents),
packages=self.packages,
flocker_version='0.3.3dev7',
distribution=Distribution(name='centos', version='7'),
)
# The expected files are the new files plus the package which already
# existed in S3.
expected_packages = {
'existing_package.rpm',
'clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm',
'clusterhq-flocker-node-0.3.3-0.dev.7.noarch.rpm',
}
expected_keys.update({
'test/target/key/clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm':
'cli-package',
'test/target/key/clusterhq-flocker-node-0.3.3-0.dev.7.noarch.rpm':
'node-package',
})
expected_keys.update({
os.path.join(self.target_key, 'repodata', 'repomod.xml'):
'<newhash>-metadata.xml',
os.path.join(self.target_key, 'repodata',
'<newhash>-metadata.xml'):
'metadata content for: ' + ','.join(expected_packages),
})
self.assertEqual(
expected_keys,
aws.s3_buckets[self.target_bucket])
def test_fake_deb(self):
"""
Calling :func:`update_repo` downloads the new DEBs, creates the
metadata, and uploads it to S3.
- Existing packages on S3 are preserved in the metadata.
- Other packages on the buildserver are not downloaded.
"""
existing_s3_keys = {
os.path.join(self.target_key, 'existing_package.deb'): '',
os.path.join(self.target_key,
'clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb'):
'existing-content-to-be-replaced', # noqa
os.path.join(self.target_key, 'Packages.gz'):
'metadata for: existing_package.deb',
}
# Copy before passing to FakeAWS
expected_keys = existing_s3_keys.copy()
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: existing_s3_keys,
},
)
unspecified_package = 'unspecified-package_0.3.3-0.dev.7_all.deb'
repo_contents = {
'clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb': 'cli-package',
'clusterhq-flocker-node_0.3.3-0.dev.7_all.deb': 'node-package',
unspecified_package: 'unspecified-package-content',
}
self.update_repo(
aws=aws,
yum=FakeYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=create_fake_repository(self, files=repo_contents),
packages=self.packages,
flocker_version='0.3.3dev7',
distribution=Distribution(name='ubuntu', version='14.04'),
)
# The expected files are the new files plus the package which already
# existed in S3.
expected_packages = {
'existing_package.deb',
'clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb',
'clusterhq-flocker-node_0.3.3-0.dev.7_all.deb',
}
expected_keys.update({
'test/target/key/Release': 'Origin: ClusterHQ\n',
'test/target/key/clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb':
'cli-package',
'test/target/key/clusterhq-flocker-node_0.3.3-0.dev.7_all.deb':
'node-package',
'test/target/key/Packages.gz':
'Packages.gz for: ' + ','.join(expected_packages),
})
self.assertEqual(
expected_keys,
aws.s3_buckets[self.target_bucket])
def test_package_not_available_exception(self):
"""
If a requested package is not available in the repository, a 404 error
is raised.
"""
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
with self.assertRaises(HTTPError) as exception:
self.update_repo(
aws=aws,
yum=FakeYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=create_fake_repository(
self, files={}),
packages=self.packages,
flocker_version='0.3.3dev7',
distribution=Distribution(name="centos", version="7"),
)
self.assertEqual(404, exception.exception.response.status_code)
@skipUnless(which('createrepo'),
"Tests require the ``createrepo`` command.")
def test_real_yum_utils(self):
"""
Calling :func:`update_repo` with real yum utilities creates a
repository in S3.
"""
source_repo = FilePath(self.mktemp())
source_repo.createDirectory()
FilePath(__file__).sibling('yum-repo').copyTo(source_repo)
repo_uri = 'file://' + source_repo.path
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
class RealYum(object):
def get_dispatcher(self):
return yum_dispatcher
self.update_repo(
aws=aws,
yum=RealYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=repo_uri,
packages=self.packages,
flocker_version='0.3.3dev7',
distribution=Distribution(name='centos', version='7'),
)
expected_files = {
os.path.join(self.target_key, file)
for file in [
'clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm',
'clusterhq-flocker-node-0.3.3-0.dev.7.noarch.rpm',
'repodata/repomd.xml',
]
}
files_on_s3 = aws.s3_buckets[self.target_bucket]
repodata_path = os.path.join(self.target_key, 'repodata')
# Yum repositories prefix metadata files with the sha256 hash
# of the file. Since these files contain timestamps, we calculate
# the hash from the file, to determine the expected file names.
for metadata_file in [
'other.sqlite.bz2',
'filelists.xml.gz',
'primary.xml.gz',
'filelists.sqlite.bz2',
'primary.sqlite.bz2',
'other.xml.gz',
]:
for key in files_on_s3:
if (key.endswith(metadata_file)
and key.startswith(repodata_path)):
expected_files.add(
os.path.join(
repodata_path,
sha256(files_on_s3[key]).hexdigest()
+ '-' + metadata_file)
)
break
else:
expected_files.add(
os.path.join(
repodata_path, '<missing>-' + metadata_file))
# The original source repository contains no metadata.
# This tests that CreateRepo creates the expected metadata files from
# given RPMs, not that any metadata files are copied.
self.assertEqual(expected_files, set(files_on_s3.keys()))
@skipUnless(which('dpkg-scanpackages'),
"Tests require the ``dpkg-scanpackages`` command.")
def test_real_dpkg_utils(self):
"""
Calling :func:`update_repo` with real dpkg utilities creates a
repository in S3.
The filenames in the repository metadata do not have the build
directory in them.
"""
source_repo = FilePath(self.mktemp())
source_repo.createDirectory()
FilePath(__file__).sibling('apt-repo').copyTo(source_repo)
repo_uri = 'file://' + source_repo.path
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
class RealYum(object):
def get_dispatcher(self):
return yum_dispatcher
self.update_repo(
aws=aws,
yum=RealYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=repo_uri,
packages=self.packages,
flocker_version='0.3.3dev7',
distribution=Distribution(name="ubuntu", version="14.04"),
)
expected_files = {
os.path.join(self.target_key, file)
for file in [
'clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb',
'clusterhq-flocker-node_0.3.3-0.dev.7_all.deb',
'Packages.gz',
'Release',
]
}
files_on_s3 = aws.s3_buckets[self.target_bucket]
# The original source repository contains no metadata.
# This tests that CreateRepo creates the expected metadata files from
# given RPMs, not that any metadata files are copied.
self.assertEqual(expected_files, set(files_on_s3.keys()))
# The repository is built in self.packages_directory
# Ensure that that does not leak into the metadata.
packages_gz = files_on_s3[os.path.join(self.target_key, 'Packages.gz')]
with GzipFile(fileobj=StringIO(packages_gz), mode="r") as f:
packages_metadata = f.read()
self.assertNotIn(self.package_directory.path, packages_metadata)
class UploadPackagesTests(SynchronousTestCase):
"""
Tests for :func:``upload_packages``.
"""
def upload_packages(self, aws, yum,
scratch_directory, target_bucket, version,
build_server, top_level):
"""
Call :func:``upload_packages``, interacting with a fake AWS and yum
utilities.
:param FakeAWS aws: Fake AWS to interact with.
:param FakeYum yum: Fake yum utilities to interact with.
See :py:func:`upload_packages` for other parameter documentation.
"""
dispatchers = [aws.get_dispatcher(), yum.get_dispatcher(),
base_dispatcher]
sync_perform(
ComposedDispatcher(dispatchers),
upload_packages(
scratch_directory=scratch_directory,
target_bucket=target_bucket,
version=version,
build_server=build_server,
top_level=top_level,
),
)
def setUp(self):
self.scratch_directory = FilePath(self.mktemp())
self.scratch_directory.createDirectory()
self.target_bucket = 'test-target-bucket'
self.aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
self.build_server = 'http://test-build-server.example'
def test_repositories_created(self):
"""
Calling :func:`upload_packages` creates repositories for supported
distributions.
"""
repo_contents = {
'results/omnibus/0.3.3dev1/centos-7/clusterhq-flocker-cli-0.3.3-0.dev.1.noarch.rpm': '', # noqa
'results/omnibus/0.3.3dev1/centos-7/clusterhq-flocker-node-0.3.3-0.dev.1.noarch.rpm': '', # noqa
'results/omnibus/0.3.3dev1/centos-7/clusterhq-python-flocker-0.3.3-0.dev.1.x86_64.rpm': '', # noqa
'results/omnibus/0.3.3dev1/ubuntu-14.04/clusterhq-flocker-cli_0.3.3-0.dev.1_all.deb': '', # noqa
'results/omnibus/0.3.3dev1/ubuntu-14.04/clusterhq-flocker-node_0.3.3-0.dev.1_all.deb': '', # noqa
'results/omnibus/0.3.3dev1/ubuntu-14.04/clusterhq-python-flocker_0.3.3-0.dev.1_amd64.deb': '', # noqa
'results/omnibus/0.3.3dev1/ubuntu-15.04/clusterhq-flocker-cli_0.3.3-0.dev.1_all.deb': '', # noqa
'results/omnibus/0.3.3dev1/ubuntu-15.04/clusterhq-flocker-node_0.3.3-0.dev.1_all.deb': '', # noqa
'results/omnibus/0.3.3dev1/ubuntu-15.04/clusterhq-python-flocker_0.3.3-0.dev.1_amd64.deb': '', # noqa
}
self.upload_packages(
aws=self.aws,
yum=FakeYum(),
scratch_directory=self.scratch_directory,
target_bucket=self.target_bucket,
version='0.3.3dev1',
build_server=create_fake_repository(self, files=repo_contents),
top_level=FLOCKER_PATH,
)
expected_files = {
'centos-testing/7/x86_64/clusterhq-flocker-cli-0.3.3-0.dev.1.noarch.rpm', # noqa
'centos-testing/7/x86_64/clusterhq-flocker-node-0.3.3-0.dev.1.noarch.rpm', # noqa
'centos-testing/7/x86_64/clusterhq-python-flocker-0.3.3-0.dev.1.x86_64.rpm', # noqa
'centos-testing/7/x86_64/repodata/repomod.xml', # noqa
'centos-testing/7/x86_64/repodata/<newhash>-metadata.xml', # noqa
'ubuntu-testing/14.04/amd64/clusterhq-flocker-cli_0.3.3-0.dev.1_all.deb', # noqa
'ubuntu-testing/14.04/amd64/clusterhq-flocker-node_0.3.3-0.dev.1_all.deb', # noqa
'ubuntu-testing/14.04/amd64/clusterhq-python-flocker_0.3.3-0.dev.1_amd64.deb', # noqa
'ubuntu-testing/14.04/amd64/Packages.gz',
'ubuntu-testing/14.04/amd64/Release',
'ubuntu-testing/15.04/amd64/clusterhq-flocker-cli_0.3.3-0.dev.1_all.deb', # noqa
'ubuntu-testing/15.04/amd64/clusterhq-flocker-node_0.3.3-0.dev.1_all.deb', # noqa
'ubuntu-testing/15.04/amd64/clusterhq-python-flocker_0.3.3-0.dev.1_amd64.deb', # noqa
'ubuntu-testing/15.04/amd64/Packages.gz',
'ubuntu-testing/15.04/amd64/Release',
}
files_on_s3 = self.aws.s3_buckets[self.target_bucket].keys()
self.assertEqual(expected_files, set(files_on_s3))
def test_key_suffixes(self):
"""
The OS part of the keys for created repositories have suffixes (or not)
appropriate for the release type. In particular there is no "-testing"
in keys created for a marketing release.
"""
repo_contents = {
'results/omnibus/0.3.3/centos-7/clusterhq-flocker-cli-0.3.3-1.noarch.rpm': '', # noqa
'results/omnibus/0.3.3/centos-7/clusterhq-flocker-node-0.3.3-1.noarch.rpm': '', # noqa
'results/omnibus/0.3.3/centos-7/clusterhq-python-flocker-0.3.3-1.x86_64.rpm': '', # noqa
'results/omnibus/0.3.3/ubuntu-14.04/clusterhq-flocker-cli_0.3.3-1_all.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-14.04/clusterhq-flocker-node_0.3.3-1_all.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-14.04/clusterhq-python-flocker_0.3.3-1_amd64.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-15.04/clusterhq-flocker-cli_0.3.3-1_all.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-15.04/clusterhq-flocker-node_0.3.3-1_all.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-15.04/clusterhq-python-flocker_0.3.3-1_amd64.deb': '', # noqa
}
self.upload_packages(
aws=self.aws,
yum=FakeYum(),
scratch_directory=self.scratch_directory,
target_bucket=self.target_bucket,
version='0.3.3',
build_server=create_fake_repository(self, files=repo_contents),
top_level=FLOCKER_PATH,
)
files_on_s3 = self.aws.s3_buckets[self.target_bucket].keys()
self.assertEqual(set(), {f for f in files_on_s3 if '-testing' in f})
def create_fake_repository(test_case, files):
"""
Create files in a directory to mimic a repository of packages.
:param TestCase test_case: The test case to use for creating a temporary
directory.
:param dict source_repo: Dictionary mapping names of files to create to
contents.
:return: FilePath of directory containing fake package files.
"""
source_repo = FilePath(test_case.mktemp())
source_repo.createDirectory
for key in files:
new_file = source_repo.preauthChild(key)
if not new_file.parent().exists():
new_file.parent().makedirs()
new_file.setContent(files[key])
return 'file://' + source_repo.path
class UploadPythonPackagesTests(SynchronousTestCase):
"""
Tests for :func:``upload_python_packages``.
"""
def setUp(self):
self.target_bucket = 'test-target-bucket'
self.scratch_directory = FilePath(self.mktemp())
self.top_level = FilePath(self.mktemp())
self.top_level.makedirs()
self.aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
})
def upload_python_packages(self):
"""
Call :func:``upload_python_packages``, discarding output.
:param bytes version: Version to upload packages for.
See :py:func:`upload_python_packages` for other parameter
documentation.
"""
dispatchers = [self.aws.get_dispatcher(), base_dispatcher]
with open(os.devnull, "w") as discard:
sync_perform(
ComposedDispatcher(dispatchers),
upload_python_packages(
scratch_directory=self.scratch_directory,
target_bucket=self.target_bucket,
top_level=self.top_level,
output=discard,
error=discard,
)
)
@skipUnless(setuptools_version == "3.6", "setuptools must be version 3.6")
@skipUnless(hard_linking_possible(),
"Hard linking is not possible in the current directory.")
def test_distributions_uploaded(self):
"""
Source and binary distributions of Flocker are uploaded to S3.
"""
self.top_level.child('setup.py').setContent(
dedent("""
from setuptools import setup
setup(
name="Flocker",
version="{package_version}",
py_modules=["Flocker"],
)
""").format(package_version='0.3.0')
)
self.upload_python_packages()
aws_keys = self.aws.s3_buckets[self.target_bucket].keys()
self.assertEqual(
sorted(aws_keys),
['python/Flocker-0.3.0-py2-none-any.whl',
'python/Flocker-0.3.0.tar.gz'])
def test_setuptools_version_requirement(self):
"""
When setuptools' version is not 3.6, an error is raised.
"""
self.patch(
release, 'setuptools_version', '15.1')
self.assertRaises(
IncorrectSetuptoolsVersion,
self.upload_python_packages)
class UploadOptionsTests(SynchronousTestCase):
"""
Tests for :class:`UploadOptions`.
"""
def test_must_be_release_version(self):
"""
Trying to upload artifacts for a version which is not a release
fails.
"""
options = UploadOptions()
self.assertRaises(
NotARelease,
options.parseOptions,
['--flocker-version', '0.3.0-444-gf05215b'])
def test_documentation_release_fails(self):
"""
Trying to upload artifacts for a documentation version fails.
"""
options = UploadOptions()
self.assertRaises(
DocumentationRelease,
options.parseOptions,
['--flocker-version', '0.3.0+doc1'])
class CreateReleaseBranchOptionsTests(SynchronousTestCase):
"""
Tests for :class:`CreateReleaseBranchOptions`.
"""
def test_flocker_version_required(self):
"""
The ``--flocker-version`` option is required.
"""
options = CreateReleaseBranchOptions()
self.assertRaises(
UsageError,
options.parseOptions, [])
def create_git_repository(test_case, bare=False):
"""
Create a git repository with a ``master`` branch and ``README``.
:param test_case: The ``TestCase`` calling this.
"""
directory = FilePath(test_case.mktemp())
repository = Repo.init(path=directory.path, bare=bare)
if not bare:
directory.child('README').makedirs()
directory.child('README').touch()
repository.index.add(['README'])
repository.index.commit('Initial commit')
repository.create_head('master')
return repository
class CreateReleaseBranchTests(SynchronousTestCase):
"""
Tests for :func:`create_release_branch`.
"""
def setUp(self):
self.repo = create_git_repository(test_case=self)
def test_branch_exists_fails(self):
"""
Trying to create a release when a branch already exists for the given
version fails.
"""
branch = self.repo.create_head('release/flocker-0.3.0')
self.assertRaises(
BranchExists,
create_release_branch, '0.3.0', base_branch=branch)
def test_active_branch(self):
"""
Creating a release branch changes the active branch on the given
branch's repository.
"""
branch = self.repo.create_head('release/flocker-0.3.0pre1')
create_release_branch(version='0.3.0', base_branch=branch)
self.assertEqual(
self.repo.active_branch.name,
"release/flocker-0.3.0")
def test_branch_created_from_base(self):
"""
The new branch is created from the given branch.
"""
master = self.repo.active_branch
branch = self.repo.create_head('release/flocker-0.3.0pre1')
branch.checkout()
FilePath(self.repo.working_dir).child('NEW_FILE').touch()
self.repo.index.add(['NEW_FILE'])
self.repo.index.commit('Add NEW_FILE')
master.checkout()
create_release_branch(version='0.3.0', base_branch=branch)
self.assertIn((u'NEW_FILE', 0), self.repo.index.entries)
class CreatePipIndexTests(SynchronousTestCase):
"""
Tests for :func:`create_pip_index`.
"""
def setUp(self):
self.scratch_directory = FilePath(self.mktemp())
self.scratch_directory.makedirs()
def test_index_created(self):
"""
A pip index file is created for all wheel files.
"""
index = create_pip_index(
scratch_directory=self.scratch_directory,
packages=[
'Flocker-0.3.0-py2-none-any.whl',
'Flocker-0.3.1-py2-none-any.whl'
]
)
expected = (
'<html>\nThis is an index for pip\n<div>'
'<a href="Flocker-0.3.0-py2-none-any.whl">'
'Flocker-0.3.0-py2-none-any.whl</a><br />\n</div><div>'
'<a href="Flocker-0.3.1-py2-none-any.whl">'
'Flocker-0.3.1-py2-none-any.whl</a><br />\n</div></html>'
)
self.assertEqual(expected, index.getContent())
def test_index_not_included(self):
"""
The pip index file does not reference itself.
"""
index = create_pip_index(
scratch_directory=self.scratch_directory,
packages=[
'Flocker-0.3.0-py2-none-any.whl',
'Flocker-0.3.1-py2-none-any.whl',
'index.html',
]
)
expected = (
'<html>\nThis is an index for pip\n<div>'
'<a href="Flocker-0.3.0-py2-none-any.whl">'
'Flocker-0.3.0-py2-none-any.whl</a><br />\n</div><div>'
'<a href="Flocker-0.3.1-py2-none-any.whl">'
'Flocker-0.3.1-py2-none-any.whl</a><br />\n</div></html>'
)
self.assertEqual(expected, index.getContent())
def test_quoted_destination(self):
"""
Destination links are quoted.
"""
index = create_pip_index(
scratch_directory=self.scratch_directory,
packages=[
'"Flocker-0.3.0-py2-none-any.whl',
]
)
expected = (
'<html>\nThis is an index for pip\n<div>'
'<a href=""Flocker-0.3.0-py2-none-any.whl">'
'"Flocker-0.3.0-py2-none-any.whl</a><br />\n</div></html>'
)
self.assertEqual(expected, index.getContent())
def test_escaped_title(self):
"""
Link titles are escaped.
"""
index = create_pip_index(
scratch_directory=self.scratch_directory,
packages=[
'>Flocker-0.3.0-py2-none-any.whl',
]
)
expected = (
'<html>\nThis is an index for pip\n<div>'
'<a href=">Flocker-0.3.0-py2-none-any.whl">'
'>Flocker-0.3.0-py2-none-any.whl</a><br />\n</div></html>'
)
self.assertEqual(expected, index.getContent())
class UploadPipIndexTests(SynchronousTestCase):
"""
Tests for :func:`upload_pip_index`.
"""
def test_index_uploaded(self):
"""
An index file is uploaded to S3.
"""
bucket = 'clusterhq-archive'
aws = FakeAWS(
routing_rules={},
s3_buckets={
bucket: {
'python/Flocker-0.3.1-py2-none-any.whl': '',
},
})
scratch_directory = FilePath(self.mktemp())
scratch_directory.makedirs()
sync_perform(
ComposedDispatcher([aws.get_dispatcher(), base_dispatcher]),
upload_pip_index(
scratch_directory=scratch_directory,
target_bucket=bucket))
self.assertEqual(
aws.s3_buckets[bucket]['python/index.html'],
(
'<html>\nThis is an index for pip\n<div>'
'<a href="Flocker-0.3.1-py2-none-any.whl">'
'Flocker-0.3.1-py2-none-any.whl</a><br />\n</div></html>'
))
class CalculateBaseBranchTests(SynchronousTestCase):
"""
Tests for :func:`calculate_base_branch`.
"""
def setUp(self):
self.repo = create_git_repository(test_case=self)
def calculate_base_branch(self, version):
return calculate_base_branch(
version=version, path=self.repo.working_dir)
def test_calculate_base_branch_for_non_release_fails(self):
"""
Calling :func:`calculate_base_branch` with a version that isn't a
release fails.
"""
self.assertRaises(
NotARelease,
self.calculate_base_branch, '0.3.0-444-gf05215b')
def test_weekly_release_base(self):
"""
A weekly release is created from the "master" branch.
"""
self.assertEqual(
self.calculate_base_branch(version='0.3.0dev1').name,
"master")
def test_doc_release_base(self):
"""
A documentation release is created from the release which is having
its documentation changed.
"""
self.repo.create_head('release/flocker-0.3.0')
self.assertEqual(
self.calculate_base_branch(version='0.3.0+doc1').name,
"release/flocker-0.3.0")
def test_first_pre_release(self):
"""
The first pre-release for a marketing release is created from the
"master" branch.
"""
self.assertEqual(
self.calculate_base_branch(version='0.3.0pre1').name,
"master")
def test_uses_previous_pre_release(self):
"""
The second pre-release for a marketing release is created from the
previous pre-release release branch.
"""
self.repo.create_head('release/flocker-0.3.0pre1')
self.repo.create_tag('0.3.0pre1')
self.repo.create_head('release/flocker-0.3.0pre2')
self.repo.create_tag('0.3.0pre2')
self.assertEqual(
self.calculate_base_branch(version='0.3.0pre3').name,
"release/flocker-0.3.0pre2")
def test_unparseable_tags(self):
"""
There is no error raised if the repository contains a tag which cannot
be parsed as a version.
"""
self.repo.create_head('release/flocker-0.3.0unparseable')
self.repo.create_tag('0.3.0unparseable')
self.repo.create_head('release/flocker-0.3.0pre2')
self.repo.create_tag('0.3.0pre2')
self.assertEqual(
self.calculate_base_branch(version='0.3.0pre3').name,
"release/flocker-0.3.0pre2")
def test_parent_repository_used(self):
"""
If a path is given as the repository path, the parents of that file
are searched until a Git repository is found.
"""
self.assertEqual(
calculate_base_branch(
version='0.3.0dev1',
path=FilePath(self.repo.working_dir).child('README').path,
).name,
"master")
def test_no_pre_releases_fails(self):
"""
Trying to release a marketing release when no pre-release exists for it
fails.
"""
self.assertRaises(
NoPreRelease,
self.calculate_base_branch, '0.3.0')
def test_missing_pre_release_fails(self):
"""
Trying to release a pre-release when the previous pre-release does not
exist fails.
"""
self.repo.create_head('release/flocker-0.3.0pre1')
self.repo.create_tag('0.3.0pre1')
self.assertRaises(
MissingPreRelease,
self.calculate_base_branch, '0.3.0pre3')
def test_base_branch_does_not_exist_fails(self):
"""
Trying to create a release when the base branch does not exist fails.
"""
self.repo.create_tag('0.3.0pre1')
self.assertRaises(
GitCommandError,
self.calculate_base_branch, '0.3.0')
def test_tag_exists_fails(self):
"""
Trying to create a release when a tag already exists for the given
version fails.
"""
self.repo.create_tag('0.3.0')
self.assertRaises(
TagExists,
self.calculate_base_branch, '0.3.0')
def test_branch_only_exists_remote(self):
"""
If the test branch does not exist locally, but does exist as a remote
branch a base branch can still be calculated.
"""
self.repo.create_head('release/flocker-0.3.0pre1')
self.repo.create_tag('0.3.0pre1')
directory = FilePath(self.mktemp())
clone = self.repo.clone(path=directory.path)
self.assertEqual(
calculate_base_branch(
version='0.3.0pre2',
path=clone.working_dir).name,
"release/flocker-0.3.0pre1")
class PublishVagrantMetadataTests(SynchronousTestCase):
"""
Tests for :func:`publish_vagrant_metadata`.
"""
def setUp(self):
self.target_bucket = 'clusterhq-archive'
self.metadata_key = 'vagrant/flocker-tutorial.json'
def metadata_version(self, version, box_filename, provider="virtualbox"):
"""
Create a version section for Vagrant metadata, for a given box, with
one provider: virtualbox.
:param bytes version: The version of the box, normalised for Vagrant.
:param bytes box_filename: The filename of the box.
:param bytes provider: The provider for the box.
:return: Dictionary to be used as a version section in Vagrant
metadata.
"""
return {
"version": version,
"providers": [
{
"url": "https://example.com/" + box_filename,
"name": provider,
}
],
}
def tutorial_metadata(self, versions):
"""
Create example tutorial metadata.
:param list versions: List of dictionaries of version sections.
:return: Dictionary to be used as Vagrant metadata.
"""
return {
"description": "clusterhq/flocker-tutorial box.",
"name": "clusterhq/flocker-tutorial",
"versions": versions,
}
def publish_vagrant_metadata(self, aws, version):
"""
Call :func:``publish_vagrant_metadata``, interacting with a fake AWS.
:param FakeAWS aws: Fake AWS to interact with.
:param version: See :py:func:`publish_vagrant_metadata`.
"""
scratch_directory = FilePath(self.mktemp())
scratch_directory.makedirs()
box_url = "https://example.com/flocker-tutorial-{}.box".format(version)
box_name = 'flocker-tutorial'
sync_perform(
ComposedDispatcher([aws.get_dispatcher(), base_dispatcher]),
publish_vagrant_metadata(
version=version,
box_url=box_url,
box_name=box_name,
target_bucket=self.target_bucket,
scratch_directory=scratch_directory))
def test_no_metadata_exists(self):
"""
A metadata file is added when one does not exist.
"""
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
self.publish_vagrant_metadata(aws=aws, version='0.3.0')
expected_version = self.metadata_version(
version="0.3.0",
box_filename="flocker-tutorial-0.3.0.box",
)
self.assertEqual(
json.loads(aws.s3_buckets[self.target_bucket][self.metadata_key]),
self.tutorial_metadata(versions=[expected_version]),
)
def test_metadata_content_type(self):
"""
Vagrant requires a JSON metadata file to have a Content-Type of
application/json.
"""
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
self.publish_vagrant_metadata(aws=aws, version='0.3.0')
self.assertEqual(
aws.s3_buckets[self.target_bucket][self.metadata_key].content_type,
'application/json'
)
def test_version_added(self):
"""
A version is added to an existing metadata file.
"""
existing_old_version = self.metadata_version(
version="0.3.0",
box_filename="flocker-tutorial-0.3.0.box",
)
existing_metadata = json.dumps(
self.tutorial_metadata(versions=[existing_old_version])
)
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {
'vagrant/flocker-tutorial.json': existing_metadata,
},
},
)
expected_new_version = self.metadata_version(
version="0.4.0",
box_filename="flocker-tutorial-0.4.0.box",
)
expected_metadata = self.tutorial_metadata(
versions=[existing_old_version, expected_new_version])
self.publish_vagrant_metadata(aws=aws, version='0.4.0')
self.assertEqual(
json.loads(aws.s3_buckets[self.target_bucket][self.metadata_key]),
expected_metadata,
)
def test_version_normalised(self):
"""
The version given is converted to a version number acceptable to
Vagrant.
"""
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
self.publish_vagrant_metadata(aws=aws, version='0.3.0_1')
metadata = json.loads(
aws.s3_buckets[self.target_bucket][self.metadata_key])
# The underscore is converted to a period in the version.
self.assertEqual(metadata['versions'][0]['version'], "0.3.0.1")
def test_version_already_exists(self):
"""
If a version already exists then its data is overwritten by the new
metadata. This works even if the version is changed when being
normalised.
"""
existing_version = self.metadata_version(
version="0.4.0.2314.g941011b",
box_filename="old_filename",
provider="old_provider",
)
existing_metadata = json.dumps(
self.tutorial_metadata(versions=[existing_version])
)
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {
'vagrant/flocker-tutorial.json': existing_metadata,
},
},
)
expected_version = self.metadata_version(
version="0.4.0.2314.g941011b",
box_filename="flocker-tutorial-0.4.0-2314-g941011b.box",
provider="virtualbox",
)
self.publish_vagrant_metadata(aws=aws, version='0.4.0-2314-g941011b')
metadata_versions = json.loads(
aws.s3_buckets[self.target_bucket][self.metadata_key])['versions']
self.assertEqual(metadata_versions, [expected_version])
class PublishHomebrewRecipeTests(SynchronousTestCase):
"""
Tests for :func:`publish_homebrew_recipe`.
"""
def setUp(self):
self.source_repo = create_git_repository(test_case=self, bare=True)
# Making a recipe involves interacting with PyPI, this should be
# a parameter, not a patch. See:
# https://clusterhq.atlassian.net/browse/FLOC-1759
self.patch(release, 'make_recipe',
lambda version, sdist_url:
"Recipe for " + version + " at " + sdist_url)
def test_commit_message(self):
"""
The recipe is committed with a sensible message.
"""
publish_homebrew_recipe(
homebrew_repo_url=self.source_repo.git_dir,
version='0.3.0',
scratch_directory=FilePath(self.mktemp()),
source_bucket="archive",
)
self.assertEqual(
self.source_repo.head.commit.summary,
u'Add recipe for Flocker version 0.3.0')
def test_recipe_contents(self):
"""
The passed in contents are in the recipe.
"""
publish_homebrew_recipe(
homebrew_repo_url=self.source_repo.git_dir,
version='0.3.0',
scratch_directory=FilePath(self.mktemp()),
source_bucket="bucket-name",
)
recipe = self.source_repo.head.commit.tree['flocker-0.3.0.rb']
self.assertEqual(recipe.data_stream.read(),
'Recipe for 0.3.0 at https://bucket-name.s3.amazonaws.com/python/Flocker-0.3.0.tar.gz') # noqa
def test_push_fails(self):
"""
If the push fails, an error is raised.
"""
non_bare_repo = create_git_repository(test_case=self, bare=False)
self.assertRaises(
PushFailed,
publish_homebrew_recipe,
non_bare_repo.git_dir, '0.3.0', "archive", FilePath(self.mktemp()))
def test_recipe_already_exists(self):
"""
If a recipe already exists with the same name, it is overwritten.
"""
publish_homebrew_recipe(
homebrew_repo_url=self.source_repo.git_dir,
version='0.3.0',
scratch_directory=FilePath(self.mktemp()),
source_bucket="archive",
)
self.patch(release, 'make_recipe',
lambda version, sdist_url: "New content")
publish_homebrew_recipe(
homebrew_repo_url=self.source_repo.git_dir,
version='0.3.0',
scratch_directory=FilePath(self.mktemp()),
source_bucket="archive",
)
recipe = self.source_repo.head.commit.tree['flocker-0.3.0.rb']
self.assertEqual(recipe.data_stream.read(), 'New content')
|
{
"content_hash": "e6ef81353ea855a370f9c765f5177e14",
"timestamp": "",
"source": "github",
"line_count": 2058,
"max_line_length": 114,
"avg_line_length": 36.032069970845484,
"alnum_prop": 0.52370741969415,
"repo_name": "lukemarsden/flocker",
"id": "65ca4140cc2694380a3a2dc463b521aedda3e5cc",
"size": "74216",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "admin/test/test_release.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2366120"
},
{
"name": "Ruby",
"bytes": "6229"
},
{
"name": "Shell",
"bytes": "3418"
}
],
"symlink_target": ""
}
|
"""Small wrapper around `jax.random`."""
from typing import Optional, Union, overload
from etils.array_types import PRNGKey, ui32 # pylint: disable=g-multiple-import
import jax
from jax import numpy as jnp
import jax.random
import numpy as np
class RandomState:
"""Small stateful wrapper around `jax.random.split` to reduce boilerplate.
Usage:
```
rng = jax3d.RandomState(0)
jax.random.uniform(rng.next())
```
"""
def __init__(self, seed_or_rng: Union[int, PRNGKey]):
"""Constructor."""
if isinstance(seed_or_rng, (np.ndarray, jnp.ndarray)):
self.curr_key = seed_or_rng
elif isinstance(seed_or_rng, int):
self.curr_key = jax.random.PRNGKey(seed_or_rng)
else:
raise TypeError(f'Invalid seed or key: {seed_or_rng}')
@overload
def next(self, n: None = None) -> PRNGKey:
...
@overload
def next(self, n: int) -> ui32['n 2']:
...
def next(self, n=None):
"""Returns the next rng key."""
if n is None:
self.curr_key, key = jax.random.split(self.curr_key)
return key
else:
keys = jax.random.split(self.curr_key, n + 1)
self.curr_key = keys[0]
return keys[1:]
def fork(self) -> 'RandomState':
"""Returns another RandomState initialised with `.next()`."""
return RandomState(self.next()) # pylint: disable=not-callable
def fold_in(self, data: int) -> None:
"""Folds in delta into the random state."""
self.curr_key = jax.random.fold_in(self.curr_key, data)
def fold_in_stateless(self, data: int) -> 'RandomState':
"""Folds in delta into the random state.
This version is stateless, so do not modify the random state of the
instance. Instead, return a new `RandomState` instance with updated state.
Args:
data: Delta to fold-in.
Returns:
The new `RandomState`
"""
return RandomState(jax.random.fold_in(self.curr_key, data))
def bind_to_host_device(self,
*,
bind_to: str,
axis_name: Optional[str] = None):
"""Binds this random state to the host/device we are on."""
self.curr_key = self.bind_to_host_device_stateless(
bind_to=bind_to, axis_name=axis_name).curr_key
def bind_to_host_device_stateless(
self,
*,
bind_to: str,
axis_name: Optional[str] = None,
) -> 'RandomState':
"""Binds the random state to the host/device we are on.
Must be called from within a pmapped function. Note that when binding to
"device", we also bind the rng to hosts, as we fold_in the rng with
axis_index which is unique for devices across all hosts.
Args:
bind_to: Must be one of the 'host' or 'device'. None means no binding.
axis_name: The axis of the devices we are binding rng across.
Only required when bind_to = 'device'.
Returns:
jax.random.PRNGKey specialized to host/device.
"""
if bind_to == 'host':
return RandomState(jax.random.fold_in(self.curr_key, jax.process_index()))
elif bind_to == 'device':
return RandomState(jax.random.fold_in(self.curr_key,
jax.lax.axis_index(axis_name)))
else:
raise ValueError(
"`bind_to` should be one of the `[None, 'host', 'device']`")
def uniform_polar_points_on_sphere(rng: RandomState, shape):
"""Return an array of points evenly distributed on a sphere."""
# Last dimension must equal 2 for [theta, phi]
assert shape[-1] == 2
u = jax.random.uniform(
rng.next(), shape[0:-1] + (1,), minval=-1.0, maxval=1.0)
phi = jnp.arccos(u)
theta = jax.random.uniform(
rng.next(), shape[0:-1] + (1,), minval=0.0, maxval=2.0 * jnp.pi)
return jnp.concatenate([theta, phi], axis=-1)
def polar_to_cartesian(theta_phi):
"""Convert polar coordinates to cartesian coordinates."""
# Define theta & phi according to the "mathematical" coordinate system
# (https://en.wikipedia.org/wiki/Spherical_coordinate_system)
# Last dimension must equal 2 for [theta, phi]
assert theta_phi.shape[-1] == 2
sin_theta = jnp.sin(theta_phi[..., 0])
cos_theta = jnp.cos(theta_phi[..., 0])
sin_phi = jnp.sin(theta_phi[..., 1])
cos_phi = jnp.cos(theta_phi[..., 1])
return jnp.stack([sin_theta * sin_phi, cos_theta * sin_phi, cos_phi],
axis=-1)
def uniform_points_on_sphere(rng: RandomState, shape):
"""Return an array of shape of points evenly distributed on a sphere."""
# Last dimension must equal 3 for cartesian coordinates [i, j, k]
assert shape[-1] == 3
theta_phi = uniform_polar_points_on_sphere(rng, shape[0:-1] + (2,))
points = polar_to_cartesian(theta_phi)
return points
|
{
"content_hash": "d3e595c69cfcac9cf9c3bd50d17ebfaf",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 80,
"avg_line_length": 31.763513513513512,
"alnum_prop": 0.6309295894490534,
"repo_name": "google-research/jax3d",
"id": "6da503d5b07f2e71c9207188107389d488f78a97",
"size": "5284",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax3d/utils/random.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "47972"
},
{
"name": "Python",
"bytes": "1239944"
}
],
"symlink_target": ""
}
|
from pycalendar.datetime import DateTime
from pycalendar.period import Period
from twext.python.clsprop import classproperty
import txweb2.dav.test.util
from txweb2.http_headers import MimeType
from txweb2.stream import MemoryStream
from twisted.internet.defer import inlineCallbacks, succeed, returnValue
from twistedcaldav import caldavxml
from twistedcaldav.ical import Component, normalize_iCalStr
from txdav.caldav.datastore.query.filter import Filter
from txdav.caldav.datastore.scheduling.cuaddress import calendarUserFromCalendarUserAddress
from txdav.caldav.datastore.scheduling.freebusy import FreebusyQuery
from txdav.caldav.datastore.scheduling.ischedule.localservers import ServersDB, Server
from txdav.caldav.datastore.sql import ManagedAttachment, AttachmentLink
from txdav.caldav.datastore.test.common import CaptureProtocol
from txdav.common.datastore.podding.conduit import PoddingConduit, \
FailedCrossPodRequestError
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.podding.test.util import MultiStoreConduitTest, \
FakeConduitRequest
from txdav.common.datastore.sql_tables import _BIND_STATUS_ACCEPTED
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
from txdav.common.icommondatastore import ObjectResourceNameAlreadyExistsError, \
ObjectResourceNameNotAllowedError
from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
class TestConduit (CommonCommonTests, txweb2.dav.test.util.TestCase):
class FakeConduit(object):
def recv_fake(self, j):
return succeed({
"back2u": j["echo"],
"more": "bits",
})
@inlineCallbacks
def setUp(self):
yield super(TestConduit, self).setUp()
serversDB = ServersDB()
serversDB.addServer(Server("A", "http://127.0.0.1", "A", True))
serversDB.addServer(Server("B", "http://127.0.0.2", "B", False))
yield self.buildStoreAndDirectory(serversDB=serversDB)
self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest()))
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
@classproperty(cache=False)
def requirements(cls): # @NoSelf
return {
"user01": {
"calendar_1": {
},
"inbox": {
},
},
"user02": {
"calendar_1": {
},
"inbox": {
},
},
"user03": {
"calendar_1": {
},
"inbox": {
},
},
}
@inlineCallbacks
def test_validRequest(self):
"""
Cross-pod request fails when there is no shared secret header present.
"""
conduit = PoddingConduit(self.storeUnderTest())
r1, r2 = yield conduit.validRequest("user01", "puser02")
self.assertTrue(r1 is not None)
self.assertTrue(r2 is not None)
yield self.assertFailure(
conduit.validRequest("bogus01", "user02"),
DirectoryRecordNotFoundError
)
yield self.assertFailure(
conduit.validRequest("user01", "bogus02"),
DirectoryRecordNotFoundError
)
yield self.assertFailure(
conduit.validRequest("user01", "user02"),
FailedCrossPodRequestError
)
class TestConduitToConduit(MultiStoreConduitTest):
class FakeConduit(PoddingConduit):
@inlineCallbacks
def send_fake(self, txn, ownerUID, shareeUID):
_ignore_owner, sharee = yield self.validRequest(ownerUID, shareeUID)
action = {
"action": "fake",
"echo": "bravo"
}
result = yield self.sendRequest(txn, sharee, action)
returnValue(result)
def recv_fake(self, txn, j):
return succeed({
"back2u": j["echo"],
"more": "bits",
})
def makeConduit(self, store):
"""
Use our own variant.
"""
conduit = self.FakeConduit(store)
conduit.conduitRequestClass = FakeConduitRequest
return conduit
@inlineCallbacks
def test_fake_action(self):
"""
Cross-pod request works when conduit does support the action.
"""
store = self.theStoreUnderTest(0)
response = yield store.conduit.send_fake(self.theTransactionUnderTest(0), "user01", "puser01")
self.assertEqual(response, {"back2u": "bravo", "more": "bits"})
yield self.commitTransaction(0)
store = self.theStoreUnderTest(1)
response = yield store.conduit.send_fake(self.theTransactionUnderTest(1), "puser01", "user01")
self.assertEqual(response, {"back2u": "bravo", "more": "bits"})
yield self.commitTransaction(1)
class TestConduitAPI(MultiStoreConduitTest):
"""
Test that the conduit api works.
"""
nowYear = {"now": DateTime.getToday().getYear()}
caldata1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid1
DTSTART:{now:04d}0102T140000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
caldata1_changed = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid1
DTSTART:{now:04d}0102T150000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance changed
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
caldata2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid2
DTSTART:{now:04d}0102T160000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
caldata3 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid3
DTSTART:{now:04d}0102T160000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
@inlineCallbacks
def test_basic_share(self):
"""
Test that basic invite/uninvite works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
shared = yield calendar1.shareeView("puser01")
self.assertEqual(shared.shareStatus(), _BIND_STATUS_ACCEPTED)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
self.assertTrue(shared is not None)
self.assertTrue(shared.external())
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.uninviteUIDFromShare("puser01")
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
self.assertTrue(shared is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_countobjects(self):
"""
Test that action=countobjects works.
"""
yield self.createShare("user01", "puser01")
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
count = yield shared.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
count = yield calendar1.countObjectResources()
self.assertEqual(count, 1)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
count = yield shared.countObjectResources()
self.assertEqual(count, 1)
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
count = yield calendar1.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
count = yield shared.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(1)
@inlineCallbacks
def test_listobjects(self):
"""
Test that action=listobjects works.
"""
yield self.createShare("user01", "puser01")
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
objects = yield shared.listObjectResources()
self.assertEqual(set(objects), set())
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
objects = yield calendar1.listObjectResources()
self.assertEqual(set(objects), set(("1.ics", "2.ics",)))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
objects = yield shared.listObjectResources()
self.assertEqual(set(objects), set(("1.ics", "2.ics",)))
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
objects = yield calendar1.listObjectResources()
self.assertEqual(set(objects), set(("2.ics",)))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
objects = yield shared.listObjectResources()
self.assertEqual(set(objects), set(("2.ics",)))
yield self.commitTransaction(1)
@inlineCallbacks
def test_synctoken(self):
"""
Test that action=synctoken works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_1 = yield calendar1.syncTokenRevision()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_1 = yield shared.syncTokenRevision()
yield self.commitTransaction(1)
self.assertEqual(token1_1, token2_1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_2 = yield calendar1.syncTokenRevision()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_2 = yield shared.syncTokenRevision()
yield self.commitTransaction(1)
self.assertNotEqual(token1_1, token1_2)
self.assertEqual(token1_2, token2_2)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
count = yield calendar1.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_3 = yield calendar1.syncTokenRevision()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_3 = yield shared.syncTokenRevision()
yield self.commitTransaction(1)
self.assertNotEqual(token1_1, token1_3)
self.assertNotEqual(token1_2, token1_3)
self.assertEqual(token1_3, token2_3)
@inlineCallbacks
def test_resourcenamessincerevision(self):
"""
Test that action=synctoken works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_1 = yield calendar1.syncToken()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_1 = yield shared.syncToken()
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_2 = yield calendar1.syncToken()
names1 = yield calendar1.resourceNamesSinceToken(token1_1)
self.assertEqual(names1, ([u"1.ics"], [], [],))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_2 = yield shared.syncToken()
names2 = yield shared.resourceNamesSinceToken(token2_1)
self.assertEqual(names2, ([u"1.ics"], [], [],))
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
count = yield calendar1.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_3 = yield calendar1.syncToken()
names1 = yield calendar1.resourceNamesSinceToken(token1_2)
self.assertEqual(names1, ([], [u"1.ics"], [],))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_3 = yield shared.syncToken()
names2 = yield shared.resourceNamesSinceToken(token2_2)
self.assertEqual(names2, ([], [u"1.ics"], [],))
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
names1 = yield calendar1.resourceNamesSinceToken(token1_3)
self.assertEqual(names1, ([], [], [],))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
names2 = yield shared.resourceNamesSinceToken(token2_3)
self.assertEqual(names2, ([], [], [],))
yield self.commitTransaction(1)
@inlineCallbacks
def test_resourceuidforname(self):
"""
Test that action=resourceuidforname works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
uid = yield calendar1.resourceUIDForName("1.ics")
self.assertEqual(uid, "uid1")
uid = yield calendar1.resourceUIDForName("2.ics")
self.assertTrue(uid is None)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
uid = yield shared.resourceUIDForName("1.ics")
self.assertEqual(uid, "uid1")
uid = yield shared.resourceUIDForName("2.ics")
self.assertTrue(uid is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_resourcenameforuid(self):
"""
Test that action=resourcenameforuid works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
name = yield calendar1.resourceNameForUID("uid1")
self.assertEqual(name, "1.ics")
name = yield calendar1.resourceNameForUID("uid2")
self.assertTrue(name is None)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
name = yield shared.resourceNameForUID("uid1")
self.assertEqual(name, "1.ics")
name = yield shared.resourceNameForUID("uid2")
self.assertTrue(name is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_search(self):
"""
Test that action=resourcenameforuid works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
*[caldavxml.ComponentFilter(
**{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")}
)],
**{"name": "VCALENDAR"}
)
)
filter = Filter(filter)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
names = [item[0] for item in (yield calendar1.search(filter))]
self.assertEqual(names, ["1.ics", ])
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
names = [item[0] for item in (yield shared.search(filter))]
self.assertEqual(names, ["1.ics", ])
yield self.commitTransaction(1)
@inlineCallbacks
def test_loadallobjects(self):
"""
Test that action=loadallobjects works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
resource1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id1 = resource1.id()
resource2 = yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
resource_id2 = resource2.id()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResources()
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 2)
self.assertEqual(set([obj.name() for obj in resources]), set(("1.ics", "2.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid1", "uid2",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id1, resource_id2,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithName("2.ics")
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is byuid["uid1"])
resource = yield shared.objectResourceWithUID("uid2")
self.assertTrue(resource is byuid["uid2"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithID(resource_id2)
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResources()
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 1)
self.assertEqual(set([obj.name() for obj in resources]), set(("2.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid2",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id2,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithName("2.ics")
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid2")
self.assertTrue(resource is byuid["uid2"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id2)
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_loadallobjectswithnames(self):
"""
Test that action=loadallobjectswithnames works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
resource1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id1 = resource1.id()
yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
resource3 = yield calendar1.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3))
resource_id3 = resource3.id()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResources()
self.assertEqual(len(resources), 3)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResourcesWithNames(("1.ics", "3.ics",))
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 2)
self.assertEqual(set([obj.name() for obj in resources]), set(("1.ics", "3.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid1", "uid3",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id1, resource_id3,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithName("3.ics")
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is byuid["uid1"])
resource = yield shared.objectResourceWithUID("uid3")
self.assertTrue(resource is byuid["uid3"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithID(resource_id3)
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResourcesWithNames(("1.ics", "3.ics",))
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 1)
self.assertEqual(set([obj.name() for obj in resources]), set(("3.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid3",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id3,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithName("3.ics")
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid3")
self.assertTrue(resource is byuid["uid3"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id3)
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_objectwith(self):
"""
Test that action=objectwith works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
resource = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id = resource.id()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
resource = yield shared.objectResourceWithName("2.ics")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
resource = yield shared.objectResourceWithUID("uid2")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithID(resource_id)
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithID(resource_id)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_create(self):
"""
Test that action=create works.
"""
yield self.createShare("user01", "puser01")
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id = resource.id()
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
self.assertFalse(resource._componentChanged)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
self.assertEqual(resource.id(), resource_id)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
self.assertTrue(object1 is not None)
self.assertEqual(object1.name(), "1.ics")
self.assertEqual(object1.uid(), "uid1")
self.assertEqual(object1.id(), resource_id)
yield self.commitTransaction(0)
@inlineCallbacks
def test_create_exception(self):
"""
Test that action=create fails when a duplicate name is used.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
yield self.failUnlessFailure(shared.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)), ObjectResourceNameAlreadyExistsError)
yield self.abortTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
yield self.failUnlessFailure(shared.createCalendarObjectWithName(".2.ics", Component.fromString(self.caldata2)), ObjectResourceNameNotAllowedError)
yield self.abortTransaction(1)
@inlineCallbacks
def test_setcomponent(self):
"""
Test that action=setcomponent works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
ical = yield shared_object.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1))
yield self.commitTransaction(1)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
changed = yield shared_object.setComponent(Component.fromString(self.caldata1_changed))
self.assertFalse(changed)
ical = yield shared_object.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1_changed))
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
ical = yield object1.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1_changed))
yield self.commitTransaction(0)
@inlineCallbacks
def test_component(self):
"""
Test that action=component works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
ical = yield shared_object.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1))
yield self.commitTransaction(1)
@inlineCallbacks
def test_remove(self):
"""
Test that action=remove works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
yield shared_object.remove()
yield self.commitTransaction(1)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
self.assertTrue(shared_object is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
self.assertTrue(object1 is None)
yield self.commitTransaction(0)
@inlineCallbacks
def test_freebusy(self):
"""
Test that action=component works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
fbstart = "{now:04d}0102T000000Z".format(**self.nowYear)
fbend = "{now:04d}0103T000000Z".format(**self.nowYear)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
fbinfo = FreebusyQuery.FBInfo([], [], [])
timerange = Period(DateTime.parseText(fbstart), DateTime.parseText(fbend))
organizer = recipient = (yield calendarUserFromCalendarUserAddress("mailto:puser01@example.com", self.theTransactionUnderTest(1)))
freebusy = FreebusyQuery(organizer=organizer, recipient=recipient, timerange=timerange)
matchtotal = (yield freebusy.generateFreeBusyInfo([shared, ], fbinfo))
self.assertEqual(matchtotal, 1)
self.assertEqual(fbinfo[0], [Period.parseText("{now:04d}0102T140000Z/PT1H".format(**self.nowYear)), ])
self.assertEqual(len(fbinfo[1]), 0)
self.assertEqual(len(fbinfo[2]), 0)
yield self.commitTransaction(1)
def attachmentToString(self, attachment):
"""
Convenience to convert an L{IAttachment} to a string.
@param attachment: an L{IAttachment} provider to convert into a string.
@return: a L{Deferred} that fires with the contents of the attachment.
@rtype: L{Deferred} firing C{bytes}
"""
capture = CaptureProtocol()
attachment.retrieve(capture)
return capture.deferred
@inlineCallbacks
def test_add_attachment(self):
"""
Test that action=add-attachment works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resourceID = object1.id()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
data = "Here is some text."
attachment, location = yield shared_object.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream(data))
managedID = attachment.managedID()
from txdav.caldav.datastore.sql_external import ManagedAttachmentExternal
self.assertTrue(isinstance(attachment, ManagedAttachmentExternal))
self.assertEqual(attachment.size(), len(data))
self.assertTrue("user01/dropbox/" in location)
yield self.commitTransaction(1)
cobjs = yield ManagedAttachment.referencesTo(self.theTransactionUnderTest(0), managedID)
self.assertEqual(cobjs, set((resourceID,)))
attachment = yield ManagedAttachment.load(self.theTransactionUnderTest(0), resourceID, managedID)
self.assertEqual(attachment.name(), "test.txt")
data = yield self.attachmentToString(attachment)
self.assertEqual(data, "Here is some text.")
yield self.commitTransaction(0)
@inlineCallbacks
def test_update_attachment(self):
"""
Test that action=update-attachment works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
resourceID = object1.id()
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
managedID = attachment.managedID()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
data = "Here is some more text."
attachment, location = yield shared_object.updateAttachment(managedID, MimeType.fromString("text/plain"), "test.txt", MemoryStream(data))
managedID = attachment.managedID()
from txdav.caldav.datastore.sql_external import ManagedAttachmentExternal
self.assertTrue(isinstance(attachment, ManagedAttachmentExternal))
self.assertEqual(attachment.size(), len(data))
self.assertTrue("user01/dropbox/" in location)
yield self.commitTransaction(1)
cobjs = yield ManagedAttachment.referencesTo(self.theTransactionUnderTest(0), managedID)
self.assertEqual(cobjs, set((resourceID,)))
attachment = yield ManagedAttachment.load(self.transactionUnderTest(), resourceID, managedID)
self.assertEqual(attachment.name(), "test.txt")
data = yield self.attachmentToString(attachment)
self.assertEqual(data, "Here is some more text.")
yield self.commitTransaction(0)
@inlineCallbacks
def test_remove_attachment(self):
"""
Test that action=remove-attachment works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
resourceID = object1.id()
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
managedID = attachment.managedID()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
yield shared_object.removeAttachment(None, managedID)
yield self.commitTransaction(1)
cobjs = yield ManagedAttachment.referencesTo(self.theTransactionUnderTest(0), managedID)
self.assertEqual(cobjs, set())
attachment = yield ManagedAttachment.load(self.theTransactionUnderTest(0), resourceID, managedID)
self.assertTrue(attachment is None)
yield self.commitTransaction(0)
@inlineCallbacks
def test_get_all_attachments(self):
"""
Test that action=get-all-attachments works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
attachments = yield shared_object.ownerHome().getAllAttachments()
self.assertEqual(len(attachments), 1)
self.assertTrue(isinstance(attachments[0], ManagedAttachment))
self.assertEqual(attachments[0].contentType(), MimeType.fromString("text/plain"))
self.assertEqual(attachments[0].name(), "test.txt")
yield self.commitTransaction(1)
@inlineCallbacks
def test_get_attachment_data(self):
"""
Test that action=get-all-attachments works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
remote_id = attachment.id()
yield self.commitTransaction(0)
home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="puser01")
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
attachment = yield ManagedAttachment._create(self.theTransactionUnderTest(1), None, home1.id())
attachment._contentType = MimeType.fromString("text/plain")
attachment._name = "test.txt"
yield shared_object.ownerHome().readAttachmentData(remote_id, attachment)
yield self.commitTransaction(1)
@inlineCallbacks
def test_get_attachment_links(self):
"""
Test that action=get-attachment-links works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
cobj1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
calobjID = cobj1.id()
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
attID = attachment.id()
managedID = attachment.managedID()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
links = yield shared_object.ownerHome().getAttachmentLinks()
self.assertEqual(len(links), 1)
self.assertTrue(isinstance(links[0], AttachmentLink))
self.assertEqual(links[0]._attachmentID, attID)
self.assertEqual(links[0]._managedID, managedID)
self.assertEqual(links[0]._calendarObjectID, calobjID)
yield self.commitTransaction(1)
|
{
"content_hash": "d323eb75b3957fdc395e21b742adba3d",
"timestamp": "",
"source": "github",
"line_count": 1086,
"max_line_length": 158,
"avg_line_length": 45.705340699815835,
"alnum_prop": 0.6858530099121606,
"repo_name": "macosforge/ccs-calendarserver",
"id": "040cb43b42a679bae7e1cea30ce8644b92a7c6b6",
"size": "50243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txdav/common/datastore/podding/test/test_conduit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from tixi3 import tixi3wrapper
# define handles
tixi = tixi3wrapper.Tixi3()
xmlInputFilename = "MainFile.xml"
xmlOutputFilename = "Result_out.xml"
# Open a CPACS configuration file. First open the CPACS-XML file
# with TiXI to get a tixi handle.
tixi.open(xmlInputFilename, recursive=True)
# remove an Element from the main xml file:
tixi.removeElement("/plane/aPoint")
# add an element to a node that is located in a external satellite file
tixi.addTextElement("/plane/testNode/aVeryTest/testNode/aVeryTest3/point", "y", "123.45")
# export document as string
xmlDocumentString = tixi.exportDocumentAsString()
print("---------------------------------")
print(xmlDocumentString)
print("---------------------------------")
# After all elements have been edited, save the document to file with all changes in the sub-files.
tixi.saveDocument(xmlOutputFilename)
# Now we can close the TiXI-document
error = tixi.close()
|
{
"content_hash": "0fa7b4531f81b930fce0cb95d05ad992",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 100,
"avg_line_length": 21.40909090909091,
"alnum_prop": 0.7059447983014862,
"repo_name": "melven/tixi",
"id": "25b27d885e68a470ef093cac3b087988f19d93a0",
"size": "1139",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/python/MergeIncludedFiles.py/MergeIncludedFiles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "312276"
},
{
"name": "C++",
"bytes": "347700"
},
{
"name": "CMake",
"bytes": "54000"
},
{
"name": "Fortran",
"bytes": "22477"
},
{
"name": "M",
"bytes": "1003"
},
{
"name": "MATLAB",
"bytes": "1019"
},
{
"name": "Makefile",
"bytes": "1517"
},
{
"name": "Python",
"bytes": "133917"
},
{
"name": "Shell",
"bytes": "465"
},
{
"name": "XSLT",
"bytes": "1713"
}
],
"symlink_target": ""
}
|
import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors._ball_tree import BallTree
from sklearn.neighbors import DistanceMetric
from sklearn.utils import check_random_state
rng = np.random.RandomState(10)
V_mahalanobis = rng.rand(3, 3)
V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=rng.random_sample(DIMENSION)),
'wminkowski': dict(p=3, w=rng.random_sample(DIMENSION)),
'mahalanobis': dict(V=V_mahalanobis)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
@pytest.mark.parametrize('metric',
itertools.chain(BOOLEAN_METRICS, DISCRETE_METRICS))
def test_ball_tree_query_metrics(metric):
rng = check_random_state(0)
if metric in BOOLEAN_METRICS:
X = rng.random_sample((40, 10)).round(0)
Y = rng.random_sample((10, 10)).round(0)
elif metric in DISCRETE_METRICS:
X = (4 * rng.random_sample((40, 10))).round(0)
Y = (4 * rng.random_sample((10, 10))).round(0)
k = 5
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
def test_query_haversine():
rng = check_random_state(0)
X = 2 * np.pi * rng.random_sample((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
|
{
"content_hash": "0975f6891227472a813b1537d01722dc",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 76,
"avg_line_length": 32.71641791044776,
"alnum_prop": 0.6208941605839416,
"repo_name": "bnaul/scikit-learn",
"id": "8da703dbe207df813c969e4758f6d77e94ee88bc",
"size": "2192",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/neighbors/tests/test_ball_tree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7229182"
},
{
"name": "Shell",
"bytes": "19938"
}
],
"symlink_target": ""
}
|
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, hex_str_to_bytes
from test_framework.messages import sha256
from test_framework.script import CScript, OP_0
from decimal import Decimal
class SignRawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-deprecatedrpc=signrawtransaction"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, inputs)
# 1) The transaction has a complete set of signatures
assert rawTxSigned['complete']
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def test_with_lock_outputs(self):
"""Test correct error reporting when trying to sign a locked output"""
self.nodes[0].encryptwallet("password")
rawTx = '020000000156b958f78e3f24e0b2f4e4db1255426b0902027cb37e3ddadb52e37c3557dddb0000000000ffffffff01c0a6b929010000001600149a2ee8c77140a053f36018ac8124a6ececc1668a00000000'
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].signrawtransactionwithwallet, rawTx)
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, scripts)
# 3) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransactionwithwallet(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
def witness_script_test(self):
# Now test signing transaction to P2SH-P2WSH addresses without wallet
# Create a new P2SH-P2WSH 1-of-1 multisig address:
embedded_address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())
embedded_privkey = self.nodes[1].dumpprivkey(embedded_address["address"])
p2sh_p2wsh_address = self.nodes[1].addmultisigaddress(1, [embedded_address["pubkey"]], "", "p2sh-segwit")
# send transaction to P2SH-P2WSH 1-of-1 multisig address
self.nodes[0].generate(101)
self.nodes[0].sendtoaddress(p2sh_p2wsh_address["address"], 49.999)
self.nodes[0].generate(1)
self.sync_all()
# Find the UTXO for the transaction node[1] should have received, check witnessScript matches
unspent_output = self.nodes[1].listunspent(0, 999999, [p2sh_p2wsh_address["address"]])[0]
assert_equal(unspent_output["witnessScript"], p2sh_p2wsh_address["redeemScript"])
p2sh_redeemScript = CScript([OP_0, sha256(hex_str_to_bytes(p2sh_p2wsh_address["redeemScript"]))])
assert_equal(unspent_output["redeemScript"], p2sh_redeemScript.hex())
# Now create and sign a transaction spending that output on node[0], which doesn't know the scripts or keys
spending_tx = self.nodes[0].createrawtransaction([unspent_output], {self.nodes[1].getnewaddress(): Decimal("49.998")})
spending_tx_signed = self.nodes[0].signrawtransactionwithkey(spending_tx, [embedded_privkey], [unspent_output])
# Check the signing completed successfully
assert 'complete' in spending_tx_signed
assert_equal(spending_tx_signed['complete'], True)
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
self.witness_script_test()
self.test_with_lock_outputs()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
{
"content_hash": "bbfe5d9b0c849e4a531e690df130db6b",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 712,
"avg_line_length": 53.55681818181818,
"alnum_prop": 0.7088903034160832,
"repo_name": "droark/bitcoin",
"id": "dcafc828573f08efe3bf94056990f23b5998ddf0",
"size": "9640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/rpc_signrawtransaction.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "696871"
},
{
"name": "C++",
"bytes": "6310531"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "198257"
},
{
"name": "Makefile",
"bytes": "119862"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "5382"
},
{
"name": "Python",
"bytes": "1583778"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "98048"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from airflow.exceptions import AirflowException
from airflow.providers.databricks.hooks.databricks import RunState
def normalise_json_content(content, json_path: str = 'json') -> str | bool | list | dict:
"""
Normalize content or all values of content if it is a dict to a string. The
function will throw if content contains non-string or non-numeric non-boolean types.
The reason why we have this function is because the ``self.json`` field must be a
dict with only string values. This is because ``render_template`` will fail
for numerical values.
The only one exception is when we have boolean values, they can not be converted
to string type because databricks does not understand 'True' or 'False' values.
"""
normalise = normalise_json_content
if isinstance(content, (str, bool)):
return content
elif isinstance(
content,
(
int,
float,
),
):
# Databricks can tolerate either numeric or string types in the API backend.
return str(content)
elif isinstance(content, (list, tuple)):
return [normalise(e, f'{json_path}[{i}]') for i, e in enumerate(content)]
elif isinstance(content, dict):
return {k: normalise(v, f'{json_path}[{k}]') for k, v in list(content.items())}
else:
param_type = type(content)
msg = f'Type {param_type} used for parameter {json_path} is not a number or a string'
raise AirflowException(msg)
def validate_trigger_event(event: dict):
"""
Validates correctness of the event
received from :class:`~airflow.providers.databricks.triggers.databricks.DatabricksExecutionTrigger`
"""
keys_to_check = ['run_id', 'run_page_url', 'run_state']
for key in keys_to_check:
if key not in event:
raise AirflowException(f'Could not find `{key}` in the event: {event}')
try:
RunState.from_json(event['run_state'])
except Exception:
raise AirflowException(f'Run state returned by the Trigger is incorrect: {event["run_state"]}')
|
{
"content_hash": "26f85011cdf862ad1ba44d1b70cf407d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 103,
"avg_line_length": 40.0377358490566,
"alnum_prop": 0.6663524976437323,
"repo_name": "cfei18/incubator-airflow",
"id": "fa6e180826aa8966e7f065df989ecb8a6f175806",
"size": "2909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/databricks/utils/databricks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
}
|
from datetime import date, datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from .time import Time
class Filter:
def __init__(self, value):
self.value = value
self.filters = {'%s__exact': value}
def __call__(self, field_name):
return {filter%field_name: value for filter, value in self.filters.items()}
def __str__(self):
return str(self.value)
def __repr__(self):
return repr(self.value)
class StringFilter(Filter):
def __init__(self, value):
self.value = value
self.filters = dict()
if not isinstance(value, (tuple, list)):
self.filters['%s__iexact'] = value
elif len(value) == 0 or len(value) > 3:
raise ValueError('String filter must either be an exact value or a triplet (starts with, contains, end with)')
else:
if len(value) >= 1 and value[0] is not None:
self.filters['%s__istartswith'] = value[0]
if len(value) >= 2 and value[1] is not None:
self.filters['%s__icontains'] = value[1]
if len(value) >= 3 and value[2] is not None:
self.filters['%s__iendswith'] = value[2]
class NumericFilter(Filter):
def __init__(self, value):
self.value = value
self.filters = dict()
if not isinstance(value, (tuple, list)):
self.filters['%s__exact'] = value
elif len(value) == 0 or len(value) > 2:
raise ValueError('Numeric filter must either be an exact value or a doublet (min value, max value)')
else:
if len(value) >= 1 and value[0] is not None:
self.filters['%s__gte'] = value[0]
if len(value) >= 2 and value[1] is not None:
self.filters['%s__lt'] = value[1]
class TimeFilter(Filter):
def __init__(self, value):
self.value = value
self.filters = dict()
if isinstance(self.value, str):
self.value = Time(value)
if isinstance(self.value, Time):
start_time = datetime(self.value.year, 1, 1)
for attr, delta in [('month', relativedelta(years=1)), ('day', relativedelta(months=1)) , ('hour', relativedelta(days=1)), ('minute', relativedelta(hours=1)), ('second', relativedelta(minutes=1), ('stop', relativedelta(seconds=1)))]:
part = getattr(self.value, attr)
if part is None:
end_time = start_time + delta
break
else:
start_time = start_time.replace(**{attr: part})
self.value = (start_time, end_time)
if isinstance(self.value, (tuple, list)) and 0 < len(self.value) <= 2 and (self.value[0] is None or isinstance(self.value[0], (str, datetime))) and (self.value[1] is None or isinstance(self.value[1], (str, datetime))):
if isinstance(self.value[0] , str):
self.value[0] = parse(self.value[0])
self.filters['%s__gte'] = self.value[0].isoformat()
if isinstance(self.value[1] , str):
self.value[1] = parse(self.value[1])
self.filters['%s__lt'] = self.value[1].isoformat()
else:
raise ValueError('Time filter must either be a Time value, a Time compatible string, or a doublet (first date, last date)')
class RelatedFilter(Filter):
def __init__(self, value):
self.value = value
self.filters = dict()
self.filters['%s__in'] = value
|
{
"content_hash": "147bb019e39b340d3b3fc89f985e1a39",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 236,
"avg_line_length": 32.37894736842105,
"alnum_prop": 0.6511703511053316,
"repo_name": "bmampaey/SOLARNET-python-client",
"id": "03a0dcd86ae522d93da6e0520de7fe19e89e4d9e",
"size": "3076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SOLARNET/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15400"
}
],
"symlink_target": ""
}
|
import imp
import platform
import sys
from ctypes import (cdll,
POINTER, pointer,
c_char_p,
c_size_t, c_double, c_int, c_uint64, c_uint32,
create_string_buffer)
#Linux
if platform.system() == 'Linux':
if platform.architecture()[0] == '64bit':
_scrypt = cdll.LoadLibrary(imp.find_module('_scrypt_lin_64')[1])
else:
_scrypt = cdll.LoadLibrary(imp.find_module('_scrypt_lin_x86')[1])
#Windows
if platform.system() == 'Windows':
if platform.architecture()[0] == '64bit':
_scrypt = cdll.LoadLibrary(imp.find_module('_scrypt_win_64')[1])
else:
_scrypt = cdll.LoadLibrary(imp.find_module('_scrypt_win_x86')[1])
#Mac
if platform.system() == 'Darwin':
if platform.architecture()[0] == '64bit':
_scrypt = cdll.LoadLibrary(imp.find_module('_scrypt_mac_64')[1])
else:
_scrypt = cdll.LoadLibrary(imp.find_module('_scrypt_mac_x86')[1])
_scryptenc_buf = _scrypt.exp_scryptenc_buf
_scryptenc_buf.argtypes = [c_char_p, # const uint_t *inbuf
c_size_t, # size_t inbuflen
c_char_p, # uint8_t *outbuf
c_char_p, # const uint8_t *passwd
c_size_t, # size_t passwdlen
c_size_t, # size_t maxmem
c_double, # double maxmemfrac
c_double, # double maxtime
]
_scryptenc_buf.restype = c_int
_scryptdec_buf = _scrypt.exp_scryptdec_buf
_scryptdec_buf.argtypes = [c_char_p, # const uint8_t *inbuf
c_size_t, # size_t inbuflen
c_char_p, # uint8_t *outbuf
POINTER(c_size_t), # size_t *outlen
c_char_p, # const uint8_t *passwd
c_size_t, # size_t passwdlen
c_size_t, # size_t maxmem
c_double, # double maxmemfrac
c_double, # double maxtime
]
_scryptdec_buf.restype = c_int
_crypto_scrypt = _scrypt.exp_crypto_scrypt
_crypto_scrypt.argtypes = [c_char_p, # const uint8_t *passwd
c_size_t, # size_t passwdlen
c_char_p, # const uint8_t *salt
c_size_t, # size_t saltlen
c_uint64, # uint64_t N
c_uint32, # uint32_t r
c_uint32, # uint32_t p
c_char_p, # uint8_t *buf
c_size_t, # size_t buflen
]
_crypto_scrypt.restype = c_int
ERROR_MESSAGES = ['success',
'getrlimit or sysctl(hw.usermem) failed',
'clock_getres or clock_gettime failed',
'error computing derived key',
'could not read salt from /dev/urandom',
'error in OpenSSL',
'malloc failed',
'data is not a valid scrypt-encrypted block',
'unrecognized scrypt format',
'decrypting file would take too much memory',
'decrypting file would take too long',
'password is incorrect',
'error writing output file',
'error reading input file']
MAXMEM_DEFAULT = 0
MAXMEMFRAC_DEFAULT = 0.5
MAXTIME_DEFAULT = 300.0
MAXTIME_DEFAULT_ENC = 5.0
IS_PY2 = sys.version_info < (3, 0, 0, 'final', 0)
class error(Exception):
def __init__(self, scrypt_code):
if isinstance(scrypt_code, int):
self._scrypt_code = scrypt_code
super(error, self).__init__(ERROR_MESSAGES[scrypt_code])
else:
self._scrypt_code = -1
super(error, self).__init__(scrypt_code)
def _ensure_bytes(data):
if IS_PY2 and isinstance(data, unicode):
raise TypeError('can not encrypt/decrypt unicode objects')
if not IS_PY2 and isinstance(data, str):
return bytes(data, 'utf-8')
return data
def encrypt(input, password,
maxtime=MAXTIME_DEFAULT_ENC,
maxmem=MAXMEM_DEFAULT,
maxmemfrac=MAXMEMFRAC_DEFAULT):
"""
Encrypt a string using a password. The resulting data will have len =
len(input) + 128.
Notes for Python 2:
- `input` and `password` must be str instances
- The result will be a str instance
Notes for Python 3:
- `input` and `password` can be both str and bytes. If they are str
instances, they will be encoded with utf-8
- The result will be a bytes instance
Exceptions raised:
- TypeError on invalid input
- scrypt.error if encryption failed
For more information on the `maxtime`, `maxmem`, and `maxmemfrac`
parameters, see the scrypt documentation.
"""
input = _ensure_bytes(input)
password = _ensure_bytes(password)
outbuf = create_string_buffer(len(input) + 128)
result = _scryptenc_buf(input, len(input),
outbuf,
password, len(password),
maxmem, maxmemfrac, maxtime)
if result:
raise error(result)
return outbuf.raw
def decrypt(input, password,
maxtime=MAXTIME_DEFAULT,
maxmem=MAXMEM_DEFAULT,
maxmemfrac=MAXMEMFRAC_DEFAULT,
encoding='utf-8'):
"""
Decrypt a string using a password.
Notes for Python 2:
- `input` and `password` must be str instances
- The result will be a str instance
- The encoding parameter is ignored
Notes for Python 3:
- `input` and `password` can be both str and bytes. If they are str
instances, they wil be encoded with utf-8. `input` *should*
really be a bytes instance, since that's what `encrypt` returns.
- The result will be a str instance encoded with `encoding`.
If encoding=None, the result will be a bytes instance.
Exceptions raised:
- TypeError on invalid input
- scrypt.error if decryption failed
For more information on the `maxtime`, `maxmem`, and `maxmemfrac`
parameters, see the scrypt documentation.
"""
outbuf = create_string_buffer(len(input))
outbuflen = pointer(c_size_t(0))
input = _ensure_bytes(input)
password = _ensure_bytes(password)
result = _scryptdec_buf(input, len(input),
outbuf, outbuflen,
password, len(password),
maxmem, maxmemfrac, maxtime)
if result:
raise error(result)
out_bytes = outbuf.raw[:outbuflen.contents.value]
if IS_PY2 or encoding is None:
return out_bytes
return str(out_bytes, encoding)
def hash(password, salt, N=1 << 14, r=8, p=1, buflen=64):
"""
Compute scrypt(password, salt, N, r, p, buflen).
The parameters r, p, and buflen must satisfy r * p < 2^30 and
buflen <= (2^32 - 1) * 32. The parameter N must be a power of 2
greater than 1. N, r and p must all be positive.
Notes for Python 2:
- `password` and `salt` must be str instances
- The result will be a str instance
Notes for Python 3:
- `password` and `salt` can be both str and bytes. If they are str
instances, they wil be encoded with utf-8.
- The result will be a bytes instance
Exceptions raised:
- TypeError on invalid input
- scrypt.error if scrypt failed
"""
outbuf = create_string_buffer(buflen)
password = _ensure_bytes(password)
salt = _ensure_bytes(salt)
if r * p >= (1 << 30) or N <= 1 or (N & (N - 1)) != 0 or p < 1 or r < 1:
raise error('hash parameters are wrong (r*p should be < 2**30, and N should be a power of two > 1)')
result = _crypto_scrypt(password, len(password),
salt, len(salt),
N, r, p,
outbuf, buflen)
if result:
raise error('could not compute hash')
return outbuf.raw
__all__ = ['error', 'encrypt', 'decrypt', 'hash']
|
{
"content_hash": "9fdd093027b75b99e1be44a418c47d85",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 108,
"avg_line_length": 34.87966804979253,
"alnum_prop": 0.5372353081132525,
"repo_name": "inuitwallet/bippy_old",
"id": "b14f8357c193fafda9bfb017550a44f9bb06705d",
"size": "8406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "encrypt/scrypt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96190"
}
],
"symlink_target": ""
}
|
"""
homeassistant.components.sensor.vera
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Vera sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.vera/
"""
import logging
from requests.exceptions import RequestException
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
ATTR_BATTERY_LEVEL, ATTR_TRIPPED, ATTR_ARMED, ATTR_LAST_TRIP_TIME,
TEMP_CELCIUS, TEMP_FAHRENHEIT)
REQUIREMENTS = ['https://github.com/pavoni/home-assistant-vera-api/archive/'
'efdba4e63d58a30bc9b36d9e01e69858af9130b8.zip'
'#python-vera==0.1.1']
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def get_devices(hass, config):
""" Find and return Vera Sensors. """
import pyvera as veraApi
base_url = config.get('vera_controller_url')
if not base_url:
_LOGGER.error(
"The required parameter 'vera_controller_url'"
" was not found in config"
)
return False
device_data = config.get('device_data', {})
vera_controller = veraApi.VeraController(base_url)
categories = ['Temperature Sensor', 'Light Sensor', 'Sensor']
devices = []
try:
devices = vera_controller.get_devices(categories)
except RequestException:
# There was a network related error connecting to the vera controller
_LOGGER.exception("Error communicating with Vera API")
return False
vera_sensors = []
for device in devices:
extra_data = device_data.get(device.deviceId, {})
exclude = extra_data.get('exclude', False)
if exclude is not True:
vera_sensors.append(VeraSensor(device, extra_data))
return vera_sensors
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Performs setup for Vera controller devices. """
add_devices(get_devices(hass, config))
class VeraSensor(Entity):
""" Represents a Vera Sensor. """
def __init__(self, vera_device, extra_data=None):
self.vera_device = vera_device
self.extra_data = extra_data
if self.extra_data and self.extra_data.get('name'):
self._name = self.extra_data.get('name')
else:
self._name = self.vera_device.name
self.current_value = ''
self._temperature_units = None
def __str__(self):
return "%s %s %s" % (self.name, self.vera_device.deviceId, self.state)
@property
def state(self):
return self.current_value
@property
def name(self):
""" Get the mame of the sensor. """
return self._name
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._temperature_units
@property
def state_attributes(self):
attr = {}
if self.vera_device.has_battery:
attr[ATTR_BATTERY_LEVEL] = self.vera_device.battery_level + '%'
if self.vera_device.is_armable:
armed = self.vera_device.refresh_value('Armed')
attr[ATTR_ARMED] = 'True' if armed == '1' else 'False'
if self.vera_device.is_trippable:
last_tripped = self.vera_device.refresh_value('LastTrip')
if last_tripped is not None:
utc_time = dt_util.utc_from_timestamp(int(last_tripped))
attr[ATTR_LAST_TRIP_TIME] = dt_util.datetime_to_str(
utc_time)
else:
attr[ATTR_LAST_TRIP_TIME] = None
tripped = self.vera_device.refresh_value('Tripped')
attr[ATTR_TRIPPED] = 'True' if tripped == '1' else 'False'
attr['Vera Device Id'] = self.vera_device.vera_device_id
return attr
def update(self):
if self.vera_device.category == "Temperature Sensor":
self.vera_device.refresh_value('CurrentTemperature')
current_temp = self.vera_device.get_value('CurrentTemperature')
vera_temp_units = self.vera_device.veraController.temperature_units
if vera_temp_units == 'F':
self._temperature_units = TEMP_FAHRENHEIT
else:
self._temperature_units = TEMP_CELCIUS
if self.hass:
temp = self.hass.config.temperature(
current_temp,
self._temperature_units)
current_temp, self._temperature_units = temp
self.current_value = current_temp
elif self.vera_device.category == "Light Sensor":
self.vera_device.refresh_value('CurrentLevel')
self.current_value = self.vera_device.get_value('CurrentLevel')
elif self.vera_device.category == "Sensor":
tripped = self.vera_device.refresh_value('Tripped')
self.current_value = 'Tripped' if tripped == '1' else 'Not Tripped'
else:
self.current_value = 'Unknown'
|
{
"content_hash": "c9680c132267aec6571c8416225096c1",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 34.43150684931507,
"alnum_prop": 0.6136860950865327,
"repo_name": "caiuspb/home-assistant",
"id": "7fb72fd91b72c08037e66206f12f8621fb11ce31",
"size": "5027",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/vera.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1328099"
},
{
"name": "Python",
"bytes": "1268986"
},
{
"name": "Shell",
"bytes": "4079"
}
],
"symlink_target": ""
}
|
"""Support for getting data from websites with scraping."""
from __future__ import annotations
import logging
from typing import Any
from bs4 import BeautifulSoup
import httpx
import voluptuous as vol
from homeassistant.components.rest.data import RestData
from homeassistant.components.sensor import (
CONF_STATE_CLASS,
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
STATE_CLASSES_SCHEMA,
SensorEntity,
)
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_HEADERS,
CONF_NAME,
CONF_PASSWORD,
CONF_RESOURCE,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
CONF_ATTR = "attribute"
CONF_SELECT = "select"
CONF_INDEX = "index"
DEFAULT_NAME = "Web scrape"
DEFAULT_VERIFY_SSL = True
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.string,
vol.Required(CONF_SELECT): cv.string,
vol.Optional(CONF_ATTR): cv.string,
vol.Optional(CONF_INDEX, default=0): cv.positive_int,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_STATE_CLASS): STATE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Web scrape sensor."""
name: str = config[CONF_NAME]
resource: str = config[CONF_RESOURCE]
method: str = "GET"
payload: str | None = None
headers: str | None = config.get(CONF_HEADERS)
verify_ssl: bool = config[CONF_VERIFY_SSL]
select: str | None = config.get(CONF_SELECT)
attr: str | None = config.get(CONF_ATTR)
index: int = config[CONF_INDEX]
unit: str | None = config.get(CONF_UNIT_OF_MEASUREMENT)
device_class: str | None = config.get(CONF_DEVICE_CLASS)
state_class: str | None = config.get(CONF_STATE_CLASS)
username: str | None = config.get(CONF_USERNAME)
password: str | None = config.get(CONF_PASSWORD)
value_template: Template | None = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
auth: httpx.DigestAuth | tuple[str, str] | None = None
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = httpx.DigestAuth(username, password)
else:
auth = (username, password)
rest = RestData(hass, method, resource, auth, headers, None, payload, verify_ssl)
await rest.async_update()
if rest.data is None:
raise PlatformNotReady
async_add_entities(
[
ScrapeSensor(
rest,
name,
select,
attr,
index,
value_template,
unit,
device_class,
state_class,
)
],
True,
)
class ScrapeSensor(SensorEntity):
"""Representation of a web scrape sensor."""
def __init__(
self,
rest: RestData,
name: str,
select: str | None,
attr: str | None,
index: int,
value_template: Template | None,
unit: str | None,
device_class: str | None,
state_class: str | None,
) -> None:
"""Initialize a web scrape sensor."""
self.rest = rest
self._attr_native_value = None
self._select = select
self._attr = attr
self._index = index
self._value_template = value_template
self._attr_name = name
self._attr_native_unit_of_measurement = unit
self._attr_device_class = device_class
self._attr_state_class = state_class
def _extract_value(self) -> Any:
"""Parse the html extraction in the executor."""
raw_data = BeautifulSoup(self.rest.data, "html.parser")
_LOGGER.debug(raw_data)
try:
if self._attr is not None:
value = raw_data.select(self._select)[self._index][self._attr]
else:
tag = raw_data.select(self._select)[self._index]
if tag.name in ("style", "script", "template"):
value = tag.string
else:
value = tag.text
except IndexError:
_LOGGER.warning("Index '%s' not found in %s", self._attr, self.entity_id)
value = None
except KeyError:
_LOGGER.warning(
"Attribute '%s' not found in %s", self._attr, self.entity_id
)
value = None
_LOGGER.debug(value)
return value
async def async_update(self) -> None:
"""Get the latest data from the source and updates the state."""
await self.rest.async_update()
await self._async_update_from_rest_data()
async def async_added_to_hass(self) -> None:
"""Ensure the data from the initial update is reflected in the state."""
await self._async_update_from_rest_data()
async def _async_update_from_rest_data(self) -> None:
"""Update state from the rest data."""
if self.rest.data is None:
_LOGGER.error("Unable to retrieve data for %s", self.name)
return
value = await self.hass.async_add_executor_job(self._extract_value)
if self._value_template is not None:
self._attr_native_value = (
self._value_template.async_render_with_possible_json_value(value, None)
)
else:
self._attr_native_value = value
|
{
"content_hash": "e095fe12a9840b3f40c845ca3bc7ba10",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 87,
"avg_line_length": 33.07920792079208,
"alnum_prop": 0.6194253217599521,
"repo_name": "GenericStudent/home-assistant",
"id": "8f2a672ef06ab59f945051bddf9e30f3d192eb47",
"size": "6682",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/scrape/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
import re
import shutil
import time
import fnmatch
import sys
import hotspotter.tpl
import numpy as np
import pylab
import os
from os.path import expanduser, join, relpath, realpath, normpath, exists, dirname
from hotspotter.other.AbstractPrintable import AbstractManager
from hotspotter.other.ConcretePrintable import DynStruct
from hotspotter.helpers import dircheck
from hotspotter.other.logger import logmsg, logwarn, logdbg, logerr, logio
from hotspotter.other.crossplat import platexec
#----------------
def checkdir_decorator(method_fn):
def wrapper(iom, *args):
ret = method_fn(iom, *args)
dircheck(ret)
return ret
return wrapper
class IOManager(AbstractManager):
def __init__(iom, hs):
super( IOManager, iom ).__init__( hs )
logdbg('Creating IOManager')
iom.hs = hs
iom._hsroot = None
iom.settings_dpath = normpath(join(expanduser('~'),'.hotspotter'))
iom.internal_dname = '.hs_internals';
iom.dummy_delete = False #Dont actually delete things
iom.find_hotspotter_root_dir()
def hsroot(iom):
if iom._hsroot is None:
iom.find_hotspotter_root_dir()
return iom._hsroot
def find_hotspotter_root_dir(iom):
# Find the HotSpotter root dir even in installed packages
hsroot = realpath(dirname(__file__))
while True:
root_landmark = join(hsroot, '__HOTSPOTTER_ROOT__')
logdbg('Testing Existence:'+str(root_landmark))
if not os.path.exists(root_landmark):
logdbg('No landmark here')
else:
logdbg('Found the landmark')
break
_newroot = dirname(hsroot)
if _newroot == hsroot:
logerr('Cannot Find HotSpotter Root')
hsroot = _newroot
iom._hsroot = hsroot
def remove_file(iom, fpath):
if iom.dummy_delete:
logdbg('DummyDelete: %s' % fpath)
return False
logdbg('Deleting: %s' % fpath)
try:
os.remove(fpath)
except OSError as e:
logwarn('OSError: %s,\n Could not delete %s' % (str(e), fpath))
return False
return True
def remove_files_with_pattern(iom, dpath, fname_pattern, recursive_bit=True):
logdbg('Removing files in directory %r %s' % (dpath, ['', ', Recursively'][recursive_bit]))
logdbg('Removing files with pattern: %r' % fname_pattern)
num_removed = 0
num_matched = 0
for root, dname_list, fname_list in os.walk(dpath):
for fname in fnmatch.filter(fname_list, fname_pattern):
num_matched += 1
num_removed += iom.remove_file(join(root, fname))
if not recursive_bit:
break
logmsg('Removed %d/%d files' % (num_removed, num_matched))
return True
def remove_settings_files_with_pattern(iom, fname_pattern):
iom.remove_files_with_pattern(iom.settings_dpath, fname_pattern, recursive_bit=False)
'removes files in computed_dpath'
def remove_computed_files_with_pattern(iom, fname_pattern):
iom.remove_files_with_pattern(iom.get_computed_dpath(), fname_pattern, recursive_bit=True)
'removes files in computed_dpath'
# DEPRICATED
def get_tpl_lib_dir(iom):
return join(dirname(hotspotter.tpl.__file__), 'lib', sys.platform)
#START: Directory and File Managment
#==========
# --- Private Directories'
@checkdir_decorator
def get_internal_dpath(iom):
return join(iom.hs.db_dpath, iom.internal_dname)
@checkdir_decorator
def get_computed_dpath(iom):
return join(iom.get_internal_dpath(),'computed')
@checkdir_decorator
def get_thumb_dpath(iom, thumb_type):
return join(iom.get_computed_dpath(), 'thumbs', thumb_type)
def get_experiment_dpath(iom):
return join(iom.get_computed_dpath(), 'experiments')
# --- Public Directories
@checkdir_decorator
def get_img_dpath(iom):
return join(iom.hs.db_dpath, 'images')
@checkdir_decorator
def get_chip_dpath(iom):
return join(iom.get_computed_dpath(), 'chips')
@checkdir_decorator
def get_chiprep_dpath(iom):
return join(iom.get_computed_dpath(), 'features')
@checkdir_decorator
def get_model_dpath(iom):
return join(iom.get_computed_dpath(), 'models')
@checkdir_decorator
def get_temp_dpath(iom):
return join(iom.get_computed_dpath(), 'temp')
def get_temp_fpath(iom, tmp_fname):
return normpath(join(iom.get_temp_dpath(), tmp_fname))
def get_user_fpath(iom, fname):
return normpath(join(iom.hs.db_dpath, fname))
def write_to_user_fpath(iom, fname, to_write):
user_fpath = iom.get_user_fpath(fname)
iom.logwrite(user_fpath, to_write)
# DEPRICATED: TODO: REMOVE
def logwrite(iom, fpath, to_write):
iom.write(fpath, to_write)
def get_timestamp(iom):
'Year-Month-Day_Hour-Minute'
import datetime
return datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')
@checkdir_decorator
def ensure_directory(iom, dpath):
'Makes directory if it does not exist. Returns path to directory'
return dpath
@checkdir_decorator
def ensure_computed_directory(iom, dname):
'Input: Path relative to the computed directory'
'Ensures directory exists in the database\'s computed directory.'
'Output: Returns absolute path'
return join(iom.get_computed_dpath(), dname)
def write(iom, fpath, to_write):
logmsg('Writing to: %s' % fpath)
print 'Writing String:\n%s' % to_write
try:
with open(fpath, 'w') as f:
f.write(to_write)
print 'Wrote to %s' % fpath
except Exception as ex:
print 'Error: '+str(ex)
print 'Failed to write to %s ' % fpath
# --- Main Saved Files
def get_image_table_fpath(iom):
return normpath(join(iom.get_internal_dpath(),'image_table.csv'))
def get_chip_table_fpath(iom):
return normpath(join(iom.get_internal_dpath(),'chip_table.csv'))
def get_name_table_fpath(iom):
return normpath(join(iom.get_internal_dpath(),'name_table.csv'))
def get_flat_table_fpath(iom):
return normpath(join(iom.hs.db_dpath,'flat_table.csv'))
# --- Executable Filenames
def get_hesaff_exec(iom):
ext = ''
if sys.platform == 'win32':
ext = '.exe'
tpl_dir = dirname(hotspotter.tpl.__file__)
tpl_hesaff = normpath(join(tpl_dir, 'hesaff', 'hesaff'+ext))
if os.path.exists(tpl_hesaff):
return tpl_hesaff
# Fix for weird mac packaging things
root_dir = tpl_dir
while root_dir!=None:
tpl_hesaff = join(root_dir, 'hotspotter', 'tpl', 'hesaff', 'hesaff'+ext)
logdbg(tpl_hesaff)
exists_test = os.path.exists(tpl_hesaff)
logdbg('Exists:'+str(exists_test))
if exists_test:
break
tmp = os.path.dirname(root_dir)
if tmp == root_dir:
root_dir = None
else:
root_dir = tmp
return '"' + tpl_hesaff + '"'
def get_inria_exec(iom):
return platexec(join(iom.get_tpl_lib_dir(), 'inria_features'))
# --- Chip Representations
def get_chip_prefix(iom, cid, depends):
'Naming convention for chips: cid, algo_depends, other'
am = iom.hs.am
algo_suffix = am.get_algo_suffix(depends=depends)
return 'CID.'+str(cid)+'_'+algo_suffix
def get_chiprep_fpath(iom, cid):
chiprep_fname = iom.get_chip_prefix\
(cid, ['preproc', 'chiprep']) + '_feats.npz'
return normpath(join(iom.get_chiprep_dpath(), chiprep_fname))
# Images thumb and full
def get_img_thumb_fpath(iom, gname):
return normpath(join(iom.get_thumb_dpath('images'), gname))
def get_img_fpath(iom, gname):
return normpath(join(iom.get_img_dpath(), gname))
# Chips thumb and full
def get_chip_thumb_fpath(iom, cid):
chip_fname = iom.get_chip_prefix(cid, ['preproc'])+'_chip.jpg'
return normpath(join(iom.get_thumb_dpath('chip'), chip_fname))
def get_chip_fpath(iom, cid):
chip_fname = iom.get_chip_prefix(cid, ['preproc'])+'_chip.png'
return normpath(join(iom.get_chip_dpath(),chip_fname))
#
def get_model_fpath(iom):
am, vm = iom.hs.get_managers('am','vm')
algo_suffix = am.get_algo_suffix(depends=['preproc','chiprep','model'])
samp_suffix = vm.get_samp_suffix()
model_fname = 'model'+samp_suffix+algo_suffix+'.npz'
return normpath(join(iom.get_model_dpath(),model_fname))
def get_flann_index_fpath(iom):
am, vm = iom.hs.get_managers('am','vm')
algo_suffix = am.get_algo_suffix(['preproc','chiprep','model'])
samp_suffix = vm.get_samp_suffix()
flann_index_fname = 'index%s%s.flann' % (algo_suffix, samp_suffix)
return normpath(join(iom.get_model_dpath(), flann_index_fname))
# --- Indexes
def get_prefs_fpath(iom, prefs_name):
dircheck(iom.settings_dpath)
return normpath(join(iom.settings_dpath, prefs_name+'.txt'))
def get_dataset_fpath(iom, db_name=None):
if sys.platform == 'win32':
work_fpath = 'D:/data/work/'
else:
work_fpath = '/data/work/'
if db_name is None:
db_name = 'Naut_Dan'
print "Valid Work Directories Are: "
for dir in os.listdir(work_fpath):
print dir
return work_fpath+db_name
def load_tables(iom):
logmsg('Loading data tables in '+iom.hs.db_dpath)
if not (exists(iom.get_image_table_fpath()) and\
exists(iom.get_name_table_fpath()) and\
exists(iom.get_image_table_fpath())):
if exists(iom.get_oxford_gt_dpath()):
logmsg('You have selected an Oxford style groundtruth')
iom.load_oxford_gt()
logmsg('Succesfully Loaded Oxford style groundtruth')
sys.stdout.flush()
return
logwarn('Trying to load a Legacy Database')
iom.load_image_table()
iom.load_name_table()
iom.load_chip_table()
logmsg('Done loading data tables')
sys.stdout.flush()
#START: CSV IO METHODS
#=======================================
# IO Internals
def _load_table(iom, csv_fpath, table_name, alloc_func, csv_func):
'''
Reads csv files. Must pass in a table name a memory allocation function
and a csv_func: function which parses the fields read by _load_table
'''
logio('Loading '+table_name+' Table: '+csv_fpath)
if not exists(csv_fpath):
logio('\"'+csv_fpath+'\" Does Not Exist')
return False
fid = file(csv_fpath, 'r')
csv_headers = None
line = fid.readline()
num_line_prefix = '# NumData'
# Foreach line in the CSV file
while line != '':
line = line.strip()
# NEW LINE: Skip
if line == '': continue
# COMMENT LINE: Check for metadata
elif line[0] == '#':
# CHECK Preallocation
if line.find(num_line_prefix) > -1:
# Parse out the number of lines to allocate
# and use the given allocation function
num_lines = int(line.replace(num_line_prefix,'').replace(' ',''))
alloc_func(num_lines)
# CHECK Data Headers: StripeSpotter
elif line.find('#imgindex') > -1:
logmsg('Loading a Legacy StripeSpotter File')
csv_headers = line[1:].split(',')
# CHECK Data Headers: Legacy HotSpotter
elif line.find('#01)') > -1:
logmsg('Loading a Legacy HotSpotter File')
csv_headers = []
while line != '':
line = line[:-1]
if len(line) < 4 or line[3] != ')': break
parnstr = '#\\d\\d\\) '
head_field = re.sub(parnstr, '', line)
head_field = re.sub(' - .*','', head_field)
csv_headers += [head_field]
line = fid.readline()
# CHECK Data Headers: Hotspotter
elif any([line.find(field) >=0 for field in ['ChipID', 'NameID', 'ImageID']]):
csv_headers = [field.strip() for field in line[1:].split(',')]
# HACK: Change the fields to the ones it actually expects
import hotspotter.other.AbstractPrintable
_lbl2_header = hotspotter.other.AbstractPrintable._lbl2_header
_header2_lbl = {v:k for k,v in _lbl2_header.iteritems()}
csv_headers = [_header2_lbl[field] if field in _header2_lbl.keys() else field for field in csv_headers]
# DATA LINE: Read it
else:
csv_data = [data_field.strip() for data_field in line.split(',')]
csv_func(csv_data, csv_headers)
# Next Line
line = fid.readline()
# Finsh reading table
fid.close()
logio('Loaded '+table_name+' Table')
return True
def __image_csv_func(iom, csv_data, csv_headers=None):
""" A function which reads a single line of csv image data """
'''
gid = None
gname = None
aif = None
if csv_headers != None: pass'''
if len(csv_data) == 3:
# Format where extension is part of name
gid = int(csv_data[0])
gname = csv_data[1]
aif = csv_data[2]
logdbg('Adding Image')
elif len(csv_data) == 4:
# Format where extension is its own field
gid = int(csv_data[0])
gnameext = csv_data[2]
gname_noext = csv_data[1]
if gname_noext.find('.') == -1 and gnameext.find('.') == -1:
gname = gname_noext + '.' + gnameext
else:
gname = gname_noext + gnameext
aif = csv_data[3]
logdbg('Adding Image (old way)')
iom.hs.gm.add_img(gid, gname, aif)
# MOVED INTO CHIP MANAGER READ_CSV_LINE. TODO: do with others
#def __chip_csv_func(iom, csv_data, csv_headers=None):
def __name_csv_func(iom, csv_data, csv_headers=None):
nid = int(csv_data[0])
name = (csv_data[1])
logdbg('Adding Name: '+str(name))
iom.hs.nm.add_name(nid, name)
def load_image_table(iom):
logmsg('Loading Image Table')
img_table_fpath = iom.get_image_table_fpath()
if not exists(img_table_fpath):
img_table_fpath = iom._check_altfname(alt_names=['image_table.csv'])
image_csv_func = lambda f,d: iom.__image_csv_func(f,d)
return iom._load_table(img_table_fpath, 'Image', iom.hs.gm.img_alloc, image_csv_func)
def load_name_table(iom):
logmsg('Loading Name Table')
name_table_fpath = iom.get_name_table_fpath()
if not exists(name_table_fpath):
name_table_fpath = iom._check_altfname(alt_names=['name_table.csv'])
name_csv_func = lambda f,d: iom.__name_csv_func(f,d)
return iom._load_table(name_table_fpath, 'Name', iom.hs.nm.name_alloc, name_csv_func)
def load_chip_table(iom):
logmsg('Loading Chip Table')
chip_table_fpath = iom.get_chip_table_fpath()
if not exists(chip_table_fpath):
alt_names=['chip_table.csv','instance_table.csv','animal_info_table.csv','SightingData.csv']
chip_table_fpath = iom._check_altfname(alt_names=alt_names)
return iom._load_table(chip_table_fpath, 'Chip', iom.hs.cm.chip_alloc, iom.hs.cm.load_csv_line)
#csv_fpath=chip_table_fpath
#table_name='Chip'
#alloc_func=iom.hs.cm.chip_alloc
#csv_func=iom.hs.cm.load_csv_line
def _check_altfname(iom, alt_names=None):
'Checks for a legacy data table'
alt_dirs = [iom.get_internal_dpath(),
iom.hs.db_dpath,
join(iom.hs.db_dpath,'data'),
join(iom.hs.db_dpath,'data','..','data','..')]
for adir in iter(alt_dirs):
for aname in iter(alt_names):
alt_fpath = normpath(join(adir,aname))
logdbg('Checking: '+alt_fpath)
if exists(alt_fpath):
logwarn('Using Alternative Datatable '+alt_fpath)
timestamp = str(time.time())
backup_fpath = normpath(alt_fpath+'.'+timestamp+'.bak')
logwarn('Creating Backup: '+backup_fpath)
shutil.copyfile(alt_fpath, backup_fpath)
return alt_fpath
if iom.hs.db_dpath.find(iom.internal_dname) >= 0:
# Disallow Hotspotter directories inside HotSpotter directories
new_db_path = iom.hs.db_dpath[0:iom.hs.db_dpath.find(iom.internal_dname)]
logwarn('Changing this data dir '+iom.hs.db_dpath)
logwarn('To that data dir '+new_db_path)
iom.hs.db_dpath = new_db_path
return 'CSV_Name_not_found'
def save_tables(iom):
hs = iom.hs
gm = hs.gm
cm = hs.cm
nm = hs.nm
logmsg('Saving the Database. Give it a sec.')
chip_table_fpath = iom.get_chip_table_fpath()
name_table_fpath = iom.get_name_table_fpath()
img_table_fpath = iom.get_image_table_fpath()
flat_table_fpath = iom.get_flat_table_fpath()
logmsg('Saving Image Table')
img_file = open(img_table_fpath, 'w')
img_file.write(gm.gx2_info(lbls = ['gid','gname','aif']))
img_file.close()
logmsg('Saving Name Table')
name_file = open(name_table_fpath, 'w')
name_file.write(nm.nx2_info(lbls = ['nid', 'name']))
name_file.close()
logmsg('Saving Chip Table')
chip_file = open(chip_table_fpath, 'w')
chip_file.write(cm.cx2_info(lbls='all'))
chip_file.close()
logmsg('Saving Flat Table')
flat_file = open(flat_table_fpath, 'w')
flat_lbls = ['cid','gname','name', 'roi', 'theta'] + cm.user_props.keys()
flat_file.write(cm.cx2_info(lbls=flat_lbls))
flat_file.close()
logmsg('The Database was Saved')
def get_oxford_gt_dpath(iom):
return join(iom.hs.db_dpath, 'oxford_style_gt')
def load_oxford_gt(iom):
'loads oxford style groundtruth'
gm,cm,nm = iom.hs.get_managers('gm','cm','nm')
# Check for corrupted files (Looking at your Paris Buildings Dataset)
oxford_gt_dpath = iom.get_oxford_gt_dpath()
corrupted_gname_list = []
corrupted_file_fname = 'corrupted_files.txt'
corrupted_file_fpath = join(oxford_gt_dpath,corrupted_file_fname)
if exists(corrupted_file_fpath):
with open(corrupted_file_fpath) as f:
corrupted_gname_list = f.read().splitlines()
logmsg('Loading Oxford Style Images')
#Recursively get relative path of all files in img_dpath
img_dpath = iom.get_img_dpath() #with a sexy list comprehension
gname_list = [join(relpath(root, img_dpath), fname).replace('\\','/').replace('./','')\
for (root,dlist,flist) in os.walk(img_dpath)\
for fname in flist]
#Roughly Prealloc
gm.img_alloc( len(gname_list))
cm.chip_alloc(len(gname_list))
#Add all images in images directory (allow nested directories (...paris))
for gname in gname_list:
if gname in corrupted_gname_list: continue
gm.add_img(-1, gname, True)
logmsg('Loading Oxford Style Names and Chips')
# Add names and chips from ground truth
gt_fname_list = os.listdir(oxford_gt_dpath)
iom.hs.nm.name_alloc(len(gt_fname_list)/4)
for gt_fname in gt_fname_list:
if gt_fname == corrupted_file_fname: continue
#Get gt_name, quality, and num from fname
gt_name = gt_fname.replace('.txt','')
_pos1 = gt_name.rfind('_')
quality = gt_name[_pos1+1:]
gt_name = gt_name[:_pos1]
_pos2 = gt_name.rfind('_')
num = gt_name[_pos2+1:]
gt_name = gt_name[:_pos2]
# Add Name (-2 suppresses warnings)
nid = nm.add_name(-2, gt_name)
nx = nm.nid2_nx[nid]
gt_fpath = join(oxford_gt_dpath, gt_fname)
with open(gt_fpath,'r') as f:
line_list = f.read().splitlines()
for line in line_list:
if line == '': continue
fields = line.split(' ')
gname = fields[0].replace('oxc1_','')+'.jpg'
if gname.find('paris_') >= 0:
# PARIS HACK >:(
#Because they just cant keep their paths consistent
paris_hack = gname[6:gname.rfind('_')]
gname = paris_hack+'/'+gname
if gname in corrupted_gname_list: continue
gid = gm.gname2_gid[gname]
gx = gm.gid2_gx[gid]
if len(fields) > 1: #quality == query
roi = map(lambda x: int(round(float(x))),fields[1:])
else: # quality in ['good','ok','junk']
(w,h) = gm.gx2_img_size(gx)
roi = [0,0,w,h]
cm.add_chip(-1, nx, gx, roi)
# HACKISH Duplicate detection. Eventually this should actually be in the codebase
logmsg('Detecting and Removing Duplicate Ground Truth')
dup_cx_list = []
for nx in nm.get_valid_nxs():
cx_list = array(nm.nx2_cx_list[nx])
gx_list = cm.cx2_gx[cx_list]
(unique_gx, unique_x) = np.unique(gx_list, return_index=True)
name = nm.nx2_name[nx]
for gx in gx_list[unique_x]:
bit = False
gname = gm.gx2_gname[gx]
x_list = pylab.find(gx_list == gx)
cx_list2 = cx_list[x_list]
roi_list2 = cm.cx2_roi[cx_list2]
roi_hash = lambda roi: roi[0]+roi[1]*10000+roi[2]*100000000+roi[3]*1000000000000
(_, unique_x2) = np.unique(map(roi_hash, roi_list2), return_index=True)
non_unique_x2 = np.setdiff1d(np.arange(0,len(cx_list2)), unique_x2)
for nux2 in non_unique_x2:
cx_ = cx_list2[nux2]
dup_cx_list += [cx_]
roi_ = roi_list2[nux2]
logmsg('Duplicate: cx=%4d, gx=%4d, nx=%4d roi=%r' % (cx_, gx, nx, roi_) )
logmsg(' Name:%s, Image:%s' % (name, gname) )
bit = True
if bit:
logmsg('-----------------')
for cx in dup_cx_list:
cm.remove_chip(cx)
|
{
"content_hash": "10f11f5bab31ae0598b13a7e5820ad4b",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 123,
"avg_line_length": 42.17414721723519,
"alnum_prop": 0.553488570090673,
"repo_name": "Erotemic/hotspotter",
"id": "2a84b5b415e638d29dd26559ce44d0e8caef88d4",
"size": "23850",
"binary": false,
"copies": "2",
"ref": "refs/heads/jon",
"path": "_graveyard/oldhotspotter/IOManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "175"
},
{
"name": "Python",
"bytes": "2017583"
},
{
"name": "Shell",
"bytes": "20855"
}
],
"symlink_target": ""
}
|
from webmarks.bookmarks import models
from webmarks.bookmarks import serializers
from webmarks.drf_utils.cache import CustomListKeyConstructor
from webmarks.storage.crawler import Crawler
from webmarks.storage.storage import FileStore
from webmarks.bookmarks.filters import BookmarkFilter
from webmarks.bookmarks.filters import FolderFilter
from webmarks.bookmarks.filters import TagFilter
from webmarks.drf_utils.viewsets import AggregateModelViewSet
# from webmarks.storage.models import Archive
from rest_framework import filters
from rest_framework import viewsets
from rest_framework.decorators import detail_route
from rest_framework.decorators import list_route
from rest_framework.response import Response
from rest_framework_extensions.cache.decorators import cache_response
from rest_framework import permissions
import base64
import logging
import uuid
stdlogger = logging.getLogger(__name__)
class FolderViewSet(AggregateModelViewSet):
"""
retrieve:
Return a Folder instance.
list:
Return all Folder instance , ordered by most recently created.
create:
Create a new Folder.
delete:
Remove an existing Folder.
partial_update:
Update one or more fields on an existing Folder.
update:
Update a Folder.
"""
queryset = models.Folder.objects.all()
serializer_class = serializers.FolderSerializer
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter)
filter_class = FolderFilter
permission_classes = (permissions.IsAuthenticated,)
def get_queryset(self, *args, **kwargs):
return models.Folder.objects.filter(user_cre_id=self.request.user.id)
@detail_route(methods=['get'])
def bookmarks(self, request, pk):
"""
Return all bookmarks of folder.
"""
self.queryset = models.Bookmark.objects.filter(folders__in=[pk])
page = self.paginate_queryset(self.queryset)
if page is not None:
serializer = serializers.BookmarkSerializer(
self.queryset, many=True)
return self.get_paginated_response(serializer.data)
serializer = serializers.BookmarkSerializer(self.queryset, many=True)
return Response(serializer.data)
class BookmarkViewSet(AggregateModelViewSet):
"""
retrieve:
Return a Bookmark instance.
list:
Return all Bookmark instance , ordered by most recently created.
create:
Create a new Bookmark.
delete:
Remove an existing Bookmark.
partial_update:
Update one or more fields on an existing Bookmark.
update:
Update a Bookmark.
"""
queryset = models.Bookmark.objects.prefetch_related('tags')
serializer_class = serializers.BookmarkSerializer
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter)
filter_class = BookmarkFilter
permission_classes = (permissions.IsAuthenticated,)
# .prefetch_related(
# 'tags').prefetch_related('archive').values_list('title','rate')
# queryset = models.Note.objects.prefetch_related('tags').values
# ('archive__note', 'id', 'title', 'url', 'description', 'updated_dt', 'created_dt',
# 'user_cre', 'user_upd', 'archived_dt', 'rate', 'type', 'status', 'public', 'schedule_dt')
@cache_response(key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(BookmarkViewSet, self).list(*args, **kwargs)
def get_queryset(self, *args, **kwargs):
# print('user_id=' + str(self.request.user.id))
return models.Bookmark.objects.prefetch_related('tags').filter(
user_cre_id=self.request.user.id)
def get_serializer_class(self):
# if self.action == 'list':
# return serializers.NoteListSerializer
return serializers.BookmarkSerializer
@detail_route(methods=['get'])
def title(self, request, pk=None):
"""
Return the title of url's Page Bookmark.
"""
crawler = Crawler()
stdlogger.debug(pk)
url = base64.b64decode(pk)
stdlogger.info(url.decode())
crawler.crawl_title(url.decode())
serializer = serializers.CrawlSerializer(crawler)
return Response(serializer.data)
@detail_route(methods=['get'])
def archive(self, request, pk):
"""
Archive the Bookmark Page.
"""
bookmark = self.get_object()
stdlogger.debug(pk)
crawler = Crawler()
# url = base64.b64decode(pk)
# stdlogger.info(url.decode())
crawler.crawl(bookmark.url)
user = request.user
FileStore().store(user.username, str(bookmark.uuid), crawler.html.encode())
archive = models.Archive.create(
bookmark, crawler.content_type, crawler.html.encode())
archive.save()
bookmark.archive_id = archive.id
bookmark.save()
serializer = serializers.ArchiveSerializer(archive)
return Response(serializer.data)
class TagViewSet(AggregateModelViewSet):
"""
retrieve:
Return a Tag instance.
list:
Return all Tag instance , ordered by most recently created.
create:
Create a new Tag.
delete:
Remove an existing Tag.
partial_update:
Update one or more fields on an existing Tag.
update:
Update a Tag.
"""
queryset = models.Tag.objects.all()
serializer_class = serializers.TagSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = TagFilter
permission_classes = (permissions.IsAuthenticated,)
@list_route(methods=['get'])
def count(self, request):
queryset = models.Tag.objects.with_counts(user_cre_id=request.user.id)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = serializers.TagCountSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
serializer = serializers.TagCountSerializer(queryset, many=True)
return Response(serializer.data)
class CrawlerViewSet(viewsets.ViewSet):
permission_classes = (permissions.IsAuthenticated,)
def retrieve(self, request, pk):
crawler = Crawler()
stdlogger.debug(pk)
url = base64.b64decode(pk)
stdlogger.info(url.decode())
data = crawler.crawl(url.decode())
serializer = serializers.CrawlSerializer(data)
return Response(serializer.data)
@detail_route(methods=['get'])
def title(self, request, pk=None):
crawler = Crawler()
stdlogger.debug(pk)
url = base64.b64decode(pk)
stdlogger.info(url.decode())
crawler.crawl_title(url.decode())
serializer = serializers.CrawlSerializer(crawler)
return Response(serializer.data)
class ArchiveViewSet(AggregateModelViewSet):
"""
retrieve:
Return a Archive instance.
list:
Return all Archives, ordered by most recently joined.
create:
Create a new Archive.
delete:
Remove an existing Archive.
partial_update:
Update one or more fields on an existing Archive.
update:
Update a Archive.
"""
filter_backends = (filters.DjangoFilterBackend,)
queryset = models.Archive.objects.all()
serializer_class = serializers.ArchiveSerializer
# renderer_classes = (renderers.JSONRenderer, renderers.BrowsableAPIRenderer,
# renderers.StaticHTMLRenderer,)
permission_classes = (permissions.AllowAny,)
def retrieve(self, request, pk, format=None):
archive = get_object_or_404(models.Archive, pk=pk)
if request.accepted_renderer.format == 'html':
return Response(archive.data)
serializer = self.serializer_class(archive)
response = Response(serializer.data)
response['Cache-Control'] = 'no-cache'
return response
@detail_route(methods=['get'])
def download(self, request, pk):
"""
Download Archive File.
"""
archive = get_object_or_404(models.Archive, pk=pk)
# tmp = tempfile.NamedTemporaryFile(suffix=".note")
filename = archive.name.split('/')[-1]
resp = HttpResponse(
archive.data, content_type='application/text;charset=UTF-8')
resp['Content-Disposition'] = "attachment; filename=%s" % filename
return resp
|
{
"content_hash": "3cb428b3d40484e2e74cdc84af515eba",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 95,
"avg_line_length": 31.047272727272727,
"alnum_prop": 0.6632700866713516,
"repo_name": "EricMuller/mynotes-backend",
"id": "ea1bf78bae7ece9c7aee8b85a0d7918ff582b167",
"size": "8613",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/webmarks/bookmarks/viewsets.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "11880"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "6613"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "233863"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "22991176"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "13496"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
}
|
import time
from random import randint
from itertools import chain
import input
from helpers import *
from modules import *
"""
Conway's Game of Life
A board is represented like this::
{(x, y): state, ...}
...where `state` is an int from 0..2 representing a color
"""
def random_color():
color = []
choices = [0, 0, 48, 127, 198, 248]
while len(color) < 3:
color = [choices[randint(0, len(choices) - 1)] for i in range(3)]
return Color(color[0], color[1], color[2])
class GameOfLive(Module):
def __init__(self, screen):
super(GameOfLive, self).__init__(screen)
input.on_press.append(self.on_key_down)
self.LOAD_FACTOR = 7 # smaller means more crowded
self.NUDGING = self.LOAD_FACTOR * 1 # smaller means bigger nudge
self.width = self.screen.width
self.height = self.screen.height
self.colors = self.new_colors()
self.board = self.random_board(self.LOAD_FACTOR)
self.detector = BoredomDetector()
self.interval = 0.15
self.next_step = time.clock() + self.interval
def random_board(self, factor):
return dict(
((randint(0, self.width), randint(0, self.height)), 0)
for _ in xrange(int(self.width * self.height / factor))
)
@staticmethod
def new_colors():
c1 = random_color()
c2 = darken_color(c1, 1.3)
c3 = darken_color(c2, 2)
return [c1, c2, c3]
def next_board(self, wrap):
"""Given a board, return the board one interation later.
Adapted from Jack Diedrich's implementation from his 2012 PyCon talk "Stop
Writing Classes"
:arg wrap: A callable which takes a point and transforms it, for example
to wrap to the other edge of the screen. Return None to remove a point.
"""
new_board = {}
# consider only points that are alive and their neighbors:
points_to_recalc = set(self.board.iterkeys()) | set(chain(*map(self.neighbors, self.board)))
for point in points_to_recalc:
count = sum((neigh in self.board) for neigh in (wrap(n) for n in self.neighbors(point) if n))
if count == 3:
state = 0 if point in self.board else 1
elif count == 2 and point in self.board:
state = 2
else:
state = None
if state is not None:
wrapped = wrap(point)
if wrapped:
new_board[wrapped] = state
return new_board
def die(self, (x, y)):
"""Pretend any out-of-bounds cell is dead."""
if 0 <= x < self.width and 0 <= y < self.height:
return x, y
@staticmethod
def neighbors((x, y)):
"""Return the (possibly out of bounds) neighbors of a point."""
yield x + 1, y
yield x - 1, y
yield x, y + 1
yield x, y - 1
yield x + 1, y + 1
yield x + 1, y - 1
yield x - 1, y + 1
yield x - 1, y - 1
def draw(self):
self.screen.clear()
for (x, y), state in self.board.iteritems():
self.screen.pixel[x - 1][y - 1] = self.colors[state]
self.screen.update()
def tick(self):
if time.clock() > self.next_step:
self.next_step += self.interval
self.board = self.next_board(self.die)
# If the pattern is stuck in a loop, give it a nudge:
if self.detector.is_bored_of(self.board):
self.colors = self.new_colors()
self.board.update(self.random_board(self.NUDGING))
self.draw()
time.sleep(.001)
def on_key_down(self, key):
if key == input.Key.UP:
self.board.update(self.random_board(self.NUDGING))
if key == input.Key.DOWN:
self.board.update(self.random_board(self.NUDGING))
if key == input.Key.LEFT:
self.board = self.next_board(self.die)
if key == input.Key.RIGHT:
self.board = self.next_board(self.die)
class BoredomDetector(object):
"""Detector of when the simulation gets stuck in a loop"""
# Get bored after (at minimum) this many repititions of a pattern:
REPITITIONS = 14
# We can detect cyclical patterns of up to this many iterations:
PATTERN_LEN = 4
def __init__(self):
# Make is_bored_of() init the state the first time through:
self.iteration = self.REPITITIONS * self.PATTERN_LEN + 1
self.num = self.times = 0
def is_bored_of(self, board):
""" Return whether the simulation is probably in a loop.
This is a stochastic guess. Basically, it detects whether the
simulation has had the same number of cells a lot lately. May have
false positives (like if you just have a screen full of gliders) or
take awhile to catch on sometimes. I've even seen it totally miss the
boat once. But it's simple and fast.
"""
self.iteration += 1
if len(board) == self.num:
self.times += 1
is_bored = self.times > self.REPITITIONS
if self.iteration > self.REPITITIONS * self.PATTERN_LEN or is_bored:
# A little randomness in case things divide evenly into each other:
self.iteration = randint(-2, 0)
self.num = len(board)
self.times = 0
return is_bored
|
{
"content_hash": "e3f0cfbfb67efdaf99eff8a22259e4e5",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 105,
"avg_line_length": 31.905882352941177,
"alnum_prop": 0.5800147492625368,
"repo_name": "derblub/pixelpi",
"id": "bcea63b8ad8822fe8b54203f5361c66e945f75b7",
"size": "5424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/gameoflife.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1075"
},
{
"name": "CSS",
"bytes": "2266"
},
{
"name": "HTML",
"bytes": "2514"
},
{
"name": "JavaScript",
"bytes": "4438"
},
{
"name": "Python",
"bytes": "132434"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Bounce",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("email", models.EmailField(max_length=255)),
("bounces", models.JSONField(default=list)),
(
"last_update",
models.DateTimeField(default=django.utils.timezone.now),
),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "Bounce",
"verbose_name_plural": "Bounces",
},
),
]
|
{
"content_hash": "14a96858fcf562d3986191194fc0df47",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 29,
"alnum_prop": 0.43340094658553074,
"repo_name": "fin/froide",
"id": "372e76b08c1de11660dc963f107a0338112f4c4a",
"size": "1553",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "froide/bounce/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302838"
},
{
"name": "JavaScript",
"bytes": "47357"
},
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "1706123"
},
{
"name": "SCSS",
"bytes": "39397"
},
{
"name": "TypeScript",
"bytes": "57910"
},
{
"name": "Vue",
"bytes": "218866"
}
],
"symlink_target": ""
}
|
'''
This will eventually get replaced with Require.js
but until then it's a quick hack to save time.
'''
import os
EXIT_SUCCESS = 0
def precompile(compiler='handlebars'):
'''
Quick hack to precompile all templates
'''
success = 0
print("[*] Precompiling templates, please wait ...")
files = filter(
lambda f: f.endswith('.handlebars'), os.listdir('./handlebars'))
for hb in files:
output = ''.join(hb.split('.')[:-1]) + '.js'
exit_status = os.system('%s ./handlebars/%s -f ./js/%s' % (
compiler, hb, output
))
if exit_status == EXIT_SUCCESS:
print("[$] Successfully compiled %s" % hb)
success += 1
else:
print("[!] Failed to compile %s" % hb)
print("[*] Successfully compiled %d of %d templates" % (
success, len(files)
))
if success == len(files):
minify()
def minify(minifier='minify', output='templates.min.js'):
print("[*] Minifying JavaScript files ...")
files = filter(lambda f: f.endswith('.js'), os.listdir('./js'))
if output in files:
files.remove(output)
os.system("%s ./js/%s > %s" % (
minifier, " ./js/".join(files), './js/' + output,
))
os.system("rm ./js/%s" % " ./js/".join(files))
if __name__ == '__main__':
precompile()
|
{
"content_hash": "3c7ed53b30aeb3375b627c24a6678d9d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 72,
"avg_line_length": 27.9375,
"alnum_prop": 0.546607009694258,
"repo_name": "BishopFox/iSpy",
"id": "e51c86ca783a9b2ea74623d14f7d9da77b17af26",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "layout/var/www/iSpy/js/ispy/templates/precompile.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10030"
},
{
"name": "C++",
"bytes": "11326"
},
{
"name": "CSS",
"bytes": "82618"
},
{
"name": "HTML",
"bytes": "15402"
},
{
"name": "JavaScript",
"bytes": "102982"
},
{
"name": "Logos",
"bytes": "239510"
},
{
"name": "Makefile",
"bytes": "1008"
},
{
"name": "Objective-C",
"bytes": "14145"
},
{
"name": "Python",
"bytes": "1363"
}
],
"symlink_target": ""
}
|
from functools import partial
from flowlight.tasks.task import _Task
class Node:
"""Abstract representation of `Machine` and `Group`.
:param name: the unique name of a `Node`.
Usage::
>>> machine = Machine('host1')
>>> isinstance(machine, Node)
True
>>> group = Group([Machine('host1'), Machine('host2')])
>>> isinstance(group, Node)
True
"""
def __init__(self, name):
self.name = name
def run(self, cmd):
raise NotImplementedError
def enable_connection(self):
raise NotImplementedError
def run_task(node, task, *args, **kwargs):
if not isinstance(task, _Task):
raise Exception('Need a task')
return task.__call__(node, *args, **kwargs)
def run_tasks(self, tasks, *args, **kwargs):
if not isinstance(tasks, list):
tasks = [tasks]
return list(map(partial(self.run_task, *args, **kwargs), tasks))
|
{
"content_hash": "a2cd9e28d43151001234a34d7388700c",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 27.62857142857143,
"alnum_prop": 0.5832471561530507,
"repo_name": "tonnie17/flowlight",
"id": "56284f94ca70ea6b81125158fae45f5c732e7408",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowlight/model/node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36581"
}
],
"symlink_target": ""
}
|
import mock
from lab_assistant.tests import cases
import lab_assistant
class TestExperimentPublish(cases.TestCase):
def test_publish_calls_tasks(self):
task = mock.MagicMock()
experiment = type('Experiment', (lab_assistant.experiments.Experiment,), {
'publish_tasks': [task]
})()
result = object()
experiment.publish(result)
task.assert_called_once_with(result)
|
{
"content_hash": "192097a2c4df9b8e13330efa56c24b7a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 82,
"avg_line_length": 28.6,
"alnum_prop": 0.6573426573426573,
"repo_name": "joealcorn/lab_assistant",
"id": "0c2d9a9e7ff3a4a70e0e80cc2a43f4b9dd499bea",
"size": "429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab_assistant/tests/test_experiments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11799"
}
],
"symlink_target": ""
}
|
from flask import render_template
def html_redirect(url):
""" Generates redirect pages using the meta refresh HTML tag.
Proper redirects use 301 responses - this isn't possible with static sites. To circumvent, send HTML pages
with meta refresh tags.
:param str url: The URL to redirect to.
:return: rendered page
"""
return render_template('redirect.html', url=url)
|
{
"content_hash": "961b7e9fb60d7f070a13c4a613d8fe00",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 110,
"avg_line_length": 30.692307692307693,
"alnum_prop": 0.7142857142857143,
"repo_name": "wcpr740/wcpr.org",
"id": "ee64b8ec9e5e0c11bfad94a86507271c7c7ebf5a",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_site/libraries/html_redirect_gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23410"
},
{
"name": "JavaScript",
"bytes": "23527"
},
{
"name": "Less",
"bytes": "40646"
},
{
"name": "Python",
"bytes": "9191"
}
],
"symlink_target": ""
}
|
"""Keras implementation of efficientdet."""
import functools
from absl import logging
import numpy as np
import tensorflow as tf
import dataloader
import hparams_config
import utils
from backbone import backbone_factory
from backbone import efficientnet_builder
from tf2 import fpn_configs
from tf2 import postprocess
from tf2 import util_keras
def add_n(nodes):
"""A customized add_n to add up a list of tensors."""
# tf.add_n is not supported by EdgeTPU, while tf.reduce_sum is not supported
# by GPU and runs slow on EdgeTPU because of the 5-dimension op.
with tf.name_scope('add_n'):
new_node = nodes[0]
for n in nodes[1:]:
new_node = new_node + n
return new_node
class FNode(tf.keras.layers.Layer):
"""A Keras Layer implementing BiFPN Node."""
def __init__(self,
feat_level,
inputs_offsets,
fpn_num_filters,
apply_bn_for_resampling,
is_training_bn,
conv_after_downsample,
conv_bn_act_pattern,
separable_conv,
act_type,
strategy,
weight_method,
data_format,
name='fnode'):
super().__init__(name=name)
self.feat_level = feat_level
self.inputs_offsets = inputs_offsets
self.fpn_num_filters = fpn_num_filters
self.apply_bn_for_resampling = apply_bn_for_resampling
self.separable_conv = separable_conv
self.act_type = act_type
self.is_training_bn = is_training_bn
self.conv_after_downsample = conv_after_downsample
self.strategy = strategy
self.data_format = data_format
self.weight_method = weight_method
self.conv_bn_act_pattern = conv_bn_act_pattern
self.resample_layers = []
self.vars = []
def fuse_features(self, nodes):
"""Fuse features from different resolutions and return a weighted sum.
Args:
nodes: a list of tensorflow features at different levels
Returns:
A tensor denoting the fused feature.
"""
dtype = nodes[0].dtype
if self.weight_method == 'attn':
edge_weights = [tf.cast(var, dtype=dtype) for var in self.vars]
normalized_weights = tf.nn.softmax(tf.stack(edge_weights))
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'fastattn':
edge_weights = [
tf.nn.relu(tf.cast(var, dtype=dtype)) for var in self.vars
]
weights_sum = add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = add_n(nodes)
elif self.weight_method == 'channel_attn':
edge_weights = [tf.cast(var, dtype=dtype) for var in self.vars]
normalized_weights = tf.nn.softmax(tf.stack(edge_weights, -1), axis=-1)
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'channel_fastattn':
edge_weights = [
tf.nn.relu(tf.cast(var, dtype=dtype)) for var in self.vars
]
weights_sum = add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = add_n(nodes)
elif self.weight_method == 'sum':
new_node = add_n(nodes)
else:
raise ValueError('unknown weight_method %s' % self.weight_method)
return new_node
def _add_wsm(self, initializer, shape=None):
for i, _ in enumerate(self.inputs_offsets):
name = 'WSM' + ('' if i == 0 else '_' + str(i))
self.vars.append(
self.add_weight(initializer=initializer, name=name, shape=shape))
def build(self, feats_shape):
for i, input_offset in enumerate(self.inputs_offsets):
name = 'resample_{}_{}_{}'.format(i, input_offset, len(feats_shape))
self.resample_layers.append(
ResampleFeatureMap(
self.feat_level,
self.fpn_num_filters,
self.apply_bn_for_resampling,
self.is_training_bn,
self.conv_after_downsample,
strategy=self.strategy,
data_format=self.data_format,
name=name))
if self.weight_method == 'attn':
self._add_wsm('ones')
elif self.weight_method == 'fastattn':
self._add_wsm('ones')
elif self.weight_method == 'channel_attn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(tf.ones, num_filters)
elif self.weight_method == 'channel_fastattn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(tf.ones, num_filters)
self.op_after_combine = OpAfterCombine(
self.is_training_bn,
self.conv_bn_act_pattern,
self.separable_conv,
self.fpn_num_filters,
self.act_type,
self.data_format,
self.strategy,
name='op_after_combine{}'.format(len(feats_shape)))
self.built = True
super().build(feats_shape)
def call(self, feats, training):
nodes = []
for i, input_offset in enumerate(self.inputs_offsets):
input_node = feats[input_offset]
input_node = self.resample_layers[i](input_node, training, feats)
nodes.append(input_node)
new_node = self.fuse_features(nodes)
new_node = self.op_after_combine(new_node)
return feats + [new_node]
class OpAfterCombine(tf.keras.layers.Layer):
"""Operation after combining input features during feature fusiong."""
def __init__(self,
is_training_bn,
conv_bn_act_pattern,
separable_conv,
fpn_num_filters,
act_type,
data_format,
strategy,
name='op_after_combine'):
super().__init__(name=name)
self.conv_bn_act_pattern = conv_bn_act_pattern
self.separable_conv = separable_conv
self.fpn_num_filters = fpn_num_filters
self.act_type = act_type
self.data_format = data_format
self.strategy = strategy
self.is_training_bn = is_training_bn
if self.separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D, depth_multiplier=1)
else:
conv2d_layer = tf.keras.layers.Conv2D
self.conv_op = conv2d_layer(
filters=fpn_num_filters,
kernel_size=(3, 3),
padding='same',
use_bias=not self.conv_bn_act_pattern,
data_format=self.data_format,
name='conv')
self.bn = util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
strategy=self.strategy,
name='bn')
def call(self, new_node, training):
if not self.conv_bn_act_pattern:
new_node = utils.activation_fn(new_node, self.act_type)
new_node = self.conv_op(new_node)
new_node = self.bn(new_node, training=training)
if self.conv_bn_act_pattern:
new_node = utils.activation_fn(new_node, self.act_type)
return new_node
class ResampleFeatureMap(tf.keras.layers.Layer):
"""Resample feature map for downsampling or upsampling."""
def __init__(self,
feat_level,
target_num_channels,
apply_bn=False,
is_training_bn=None,
conv_after_downsample=False,
strategy=None,
data_format=None,
pooling_type=None,
upsampling_type=None,
name='resample_p0'):
super().__init__(name=name)
self.apply_bn = apply_bn
self.is_training_bn = is_training_bn
self.data_format = data_format
self.target_num_channels = target_num_channels
self.feat_level = feat_level
self.strategy = strategy
self.conv_after_downsample = conv_after_downsample
self.pooling_type = pooling_type or 'max'
self.upsampling_type = upsampling_type or 'nearest'
def build(self, input_shape):
idx = 1 if self.data_format == 'channels_first' else 3
num_channels = input_shape[idx]
self._maybe_init_1x1(num_channels)
def _pool2d(self, inputs, height, width, target_height, target_width):
"""Pool the inputs to target height and width."""
height_stride_size = int((height - 1) // target_height + 1)
width_stride_size = int((width - 1) // target_width + 1)
if self.pooling_type == 'max':
return tf.keras.layers.MaxPooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(inputs)
if self.pooling_type == 'avg':
return tf.keras.layers.AveragePooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(inputs)
raise ValueError('Unsupported pooling type {}.'.format(self.pooling_type))
def _upsample2d(self, inputs, target_height, target_width):
if self.data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 2, 3, 1])
resized = tf.cast(
tf.compat.v1.image.resize_nearest_neighbor(
tf.cast(inputs, tf.float32), [target_height, target_width]),
inputs.dtype)
if self.data_format == 'channels_first':
resized = tf.transpose(resized, [0, 3, 1, 2])
return resized
def _maybe_init_1x1(self, num_channels):
"""Init 1x1 conv to change layer width if necessary."""
if num_channels != self.target_num_channels:
self.conv2d = tf.keras.layers.Conv2D(
self.target_num_channels, (1, 1),
padding='same',
data_format=self.data_format,
name='conv2d')
if self.apply_bn:
self.bn = util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
strategy=self.strategy,
name='bn')
def _maybe_apply_1x1(self, feat, training, num_channels):
"""Apply 1x1 conv to change layer width if necessary."""
if num_channels != self.target_num_channels:
feat = self.conv2d(feat)
if self.apply_bn:
feat = self.bn(feat, training=training)
return feat
def call(self, feat, training, all_feats):
hwc_idx = (2, 3, 1) if self.data_format == 'channels_first' else (1, 2, 3)
height, width, num_channels = [feat.shape.as_list()[i] for i in hwc_idx]
if all_feats:
target_feat_shape = all_feats[self.feat_level].shape.as_list()
target_height, target_width, _ = [target_feat_shape[i] for i in hwc_idx]
else:
# Default to downsampling if all_feats is empty.
target_height, target_width = (height + 1) // 2, (width + 1) // 2
# If conv_after_downsample is True, when downsampling, apply 1x1 after
# downsampling for efficiency.
if height > target_height and width > target_width:
if not self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
feat = self._pool2d(feat, height, width, target_height, target_width)
if self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
elif height <= target_height and width <= target_width:
feat = self._maybe_apply_1x1(feat, training, num_channels)
if height < target_height or width < target_width:
feat = self._upsample2d(feat, target_height, target_width)
else:
raise ValueError(
'Incompatible Resampling : feat shape {}x{} target_shape: {}x{}'
.format(height, width, target_height, target_width))
return feat
class ClassNet(tf.keras.layers.Layer):
"""Object class prediction network."""
def __init__(self,
num_classes=90,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training_bn=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
strategy=None,
data_format='channels_last',
grad_checkpoint=False,
name='class_net',
feature_only=False,
**kwargs):
"""Initialize the ClassNet.
Args:
num_classes: number of classes.
num_anchors: number of anchors.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of intermediate layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
strategy: string to specify training strategy for TPU/GPU/CPU.
data_format: string of 'channel_first' or 'channels_last'.
grad_checkpoint: bool, If true, apply grad checkpoint for saving memory.
name: the name of this layerl.
feature_only: build the base feature network only (excluding final class
head).
**kwargs: other parameters.
"""
super().__init__(name=name, **kwargs)
self.num_classes = num_classes
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training_bn = is_training_bn
self.survival_prob = survival_prob
self.act_type = act_type
self.strategy = strategy
self.data_format = data_format
self.conv_ops = []
self.bns = []
self.grad_checkpoint = grad_checkpoint
self.feature_only = feature_only
conv2d_layer = self.conv2d_layer(separable_conv, data_format)
for i in range(self.repeats):
# If using SeparableConv2D
self.conv_ops.append(
conv2d_layer(
self.num_filters,
kernel_size=3,
bias_initializer=tf.zeros_initializer(),
activation=None,
padding='same',
name='class-%d' % i))
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
strategy=self.strategy,
data_format=self.data_format,
name='class-%d-bn-%d' % (i, level),
))
self.bns.append(bn_per_level)
self.classes = self.classes_layer(
conv2d_layer, num_classes, num_anchors, name='class-predict')
@tf.autograph.experimental.do_not_convert
def _conv_bn_act(self, image, i, level_id, training):
conv_op = self.conv_ops[i]
bn = self.bns[i][level_id]
@utils.recompute_grad(self.grad_checkpoint)
def _call(image):
original_image = image
image = conv_op(image)
image = bn(image, training=training)
if self.act_type:
image = utils.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
return image
return _call(image)
def call(self, inputs, training, **kwargs):
"""Call ClassNet."""
class_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
image = self._conv_bn_act(image, i, level_id, training)
if self.feature_only:
class_outputs.append(image)
else:
class_outputs.append(self.classes(image))
return class_outputs
@classmethod
def conv2d_layer(cls, separable_conv, data_format):
"""Gets the conv2d layer in ClassNet class."""
if separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
data_format=data_format,
pointwise_initializer='variance_scaling',
depthwise_initializer='variance_scaling')
else:
conv2d_layer = functools.partial(
tf.keras.layers.Conv2D,
data_format=data_format,
kernel_initializer=tf.random_normal_initializer(stddev=0.01))
return conv2d_layer
@classmethod
def classes_layer(cls, conv2d_layer, num_classes, num_anchors, name):
"""Gets the classes layer in ClassNet class."""
return conv2d_layer(
num_classes * num_anchors,
kernel_size=3,
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
padding='same',
name=name)
class BoxNet(tf.keras.layers.Layer):
"""Box regression network."""
def __init__(self,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training_bn=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
strategy=None,
data_format='channels_last',
grad_checkpoint=False,
name='box_net',
feature_only=False,
**kwargs):
"""Initialize BoxNet.
Args:
num_anchors: number of anchors used.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of "intermediate" layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
strategy: string to specify training strategy for TPU/GPU/CPU.
data_format: string of 'channel_first' or 'channels_last'.
grad_checkpoint: bool, If true, apply grad checkpoint for saving memory.
name: Name of the layer.
feature_only: build the base feature network only (excluding box class
head).
**kwargs: other parameters.
"""
super().__init__(name=name, **kwargs)
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training_bn = is_training_bn
self.survival_prob = survival_prob
self.act_type = act_type
self.strategy = strategy
self.data_format = data_format
self.grad_checkpoint = grad_checkpoint
self.feature_only = feature_only
self.conv_ops = []
self.bns = []
for i in range(self.repeats):
# If using SeparableConv2D
if self.separable_conv:
self.conv_ops.append(
tf.keras.layers.SeparableConv2D(
filters=self.num_filters,
depth_multiplier=1,
pointwise_initializer='variance_scaling',
depthwise_initializer='variance_scaling',
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-%d' % i))
# If using Conv2d
else:
self.conv_ops.append(
tf.keras.layers.Conv2D(
filters=self.num_filters,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-%d' % i))
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
strategy=self.strategy,
data_format=self.data_format,
name='box-%d-bn-%d' % (i, level)))
self.bns.append(bn_per_level)
self.boxes = self.boxes_layer(
separable_conv, num_anchors, data_format, name='box-predict')
@tf.autograph.experimental.do_not_convert
def _conv_bn_act(self, image, i, level_id, training):
conv_op = self.conv_ops[i]
bn = self.bns[i][level_id]
@utils.recompute_grad(self.grad_checkpoint)
def _call(image):
original_image = image
image = conv_op(image)
image = bn(image, training=training)
if self.act_type:
image = utils.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
return image
return _call(image)
def call(self, inputs, training):
"""Call boxnet."""
box_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
image = self._conv_bn_act(image, i, level_id, training)
if self.feature_only:
box_outputs.append(image)
else:
box_outputs.append(self.boxes(image))
return box_outputs
@classmethod
def boxes_layer(cls, separable_conv, num_anchors, data_format, name):
"""Gets the conv2d layer in BoxNet class."""
if separable_conv:
return tf.keras.layers.SeparableConv2D(
filters=4 * num_anchors,
depth_multiplier=1,
pointwise_initializer='variance_scaling',
depthwise_initializer='variance_scaling',
data_format=data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name=name)
else:
return tf.keras.layers.Conv2D(
filters=4 * num_anchors,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name=name)
class SegmentationHead(tf.keras.layers.Layer):
"""Keras layer for semantic segmentation head."""
def __init__(self,
num_classes,
num_filters,
min_level,
max_level,
data_format,
is_training_bn,
act_type,
strategy,
name='segmentation_head',
**kwargs):
"""Initialize SegmentationHead.
Args:
num_classes: number of classes.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
data_format: string of 'channel_first' or 'channels_last'.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
strategy: string to specify training strategy for TPU/GPU/CPU.
name: string of name.
**kwargs: other parameters.
"""
super().__init__(name=name, **kwargs)
self.act_type = act_type
self.con2d_ts = []
self.con2d_t_bns = []
for level in range(max_level - min_level):
self.con2d_ts.append(
tf.keras.layers.Conv2DTranspose(
num_filters,
3,
strides=2,
padding='same',
data_format=data_format,
use_bias=False))
self.con2d_t_bns.append(
util_keras.build_batch_norm(
is_training_bn=is_training_bn,
data_format=data_format,
strategy=strategy,
name='bn_' + str(level)))
self.head_transpose = tf.keras.layers.Conv2DTranspose(
num_classes, 3, strides=2, padding='same')
def call(self, feats, training):
x = feats[-1]
skips = list(reversed(feats[:-1]))
for con2d_t, con2d_t_bn, skip in zip(self.con2d_ts, self.con2d_t_bns,
skips):
x = con2d_t(x)
x = con2d_t_bn(x, training)
x = utils.activation_fn(x, self.act_type)
x = tf.concat([x, skip], axis=-1)
# This is the last layer of the model
return self.head_transpose(x) # 64x64 -> 128x128
class FPNCells(tf.keras.layers.Layer):
"""FPN cells."""
def __init__(self, config, name='fpn_cells'):
super().__init__(name=name)
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method)
self.cells = [
FPNCell(self.config, name='cell_%d' % rep)
for rep in range(self.config.fpn_cell_repeats)
]
def call(self, feats, training):
for cell in self.cells:
cell_feats = cell(feats, training)
min_level = self.config.min_level
max_level = self.config.max_level
feats = []
for level in range(min_level, max_level + 1):
for i, fnode in enumerate(reversed(self.fpn_config.nodes)):
if fnode['feat_level'] == level:
feats.append(cell_feats[-1 - i])
break
return feats
class FPNCell(tf.keras.layers.Layer):
"""A single FPN cell."""
def __init__(self, config, name='fpn_cell'):
super().__init__(name=name)
logging.info('building FPNCell %s', name)
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method)
self.fnodes = []
for i, fnode_cfg in enumerate(self.fpn_config.nodes):
logging.info('fnode %d : %s', i, fnode_cfg)
fnode = FNode(
fnode_cfg['feat_level'] - self.config.min_level,
fnode_cfg['inputs_offsets'],
config.fpn_num_filters,
config.apply_bn_for_resampling,
config.is_training_bn,
config.conv_after_downsample,
config.conv_bn_act_pattern,
config.separable_conv,
config.act_type,
strategy=config.strategy,
weight_method=self.fpn_config.weight_method,
data_format=config.data_format,
name='fnode%d' % i)
self.fnodes.append(fnode)
def call(self, feats, training):
@utils.recompute_grad(self.config.grad_checkpoint)
def _call(feats):
for fnode in self.fnodes:
feats = fnode(feats, training)
return feats
return _call(feats)
class EfficientDetNet(tf.keras.Model):
"""EfficientDet keras network without pre/post-processing."""
def __init__(self,
model_name=None,
config=None,
name='',
feature_only=False):
"""Initialize model."""
super().__init__(name=name)
config = config or hparams_config.get_efficientdet_config(model_name)
self.config = config
# Backbone.
backbone_name = config.backbone_name
is_training_bn = config.is_training_bn
if 'efficientnet' in backbone_name:
override_params = {
'batch_norm':
utils.batch_norm_class(is_training_bn, config.strategy),
'relu_fn':
functools.partial(utils.activation_fn, act_type=config.act_type),
'grad_checkpoint': self.config.grad_checkpoint
}
if 'b0' in backbone_name:
override_params['survival_prob'] = 0.0
if config.backbone_config is not None:
override_params['blocks_args'] = (
efficientnet_builder.BlockDecoder().encode(
config.backbone_config.blocks))
override_params['data_format'] = config.data_format
self.backbone = backbone_factory.get_model(
backbone_name, override_params=override_params)
# Feature network.
self.resample_layers = [] # additional resampling layers.
for level in range(6, config.max_level + 1):
# Adds a coarser level by downsampling the last feature map.
self.resample_layers.append(
ResampleFeatureMap(
feat_level=(level - config.min_level),
target_num_channels=config.fpn_num_filters,
apply_bn=config.apply_bn_for_resampling,
is_training_bn=config.is_training_bn,
conv_after_downsample=config.conv_after_downsample,
strategy=config.strategy,
data_format=config.data_format,
name='resample_p%d' % level,
))
self.fpn_cells = FPNCells(config)
# class/box output prediction network.
num_anchors = len(config.aspect_ratios) * config.num_scales
num_filters = config.fpn_num_filters
for head in config.heads:
if head == 'object_detection':
self.class_net = ClassNet(
num_classes=config.num_classes,
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
strategy=config.strategy,
grad_checkpoint=config.grad_checkpoint,
data_format=config.data_format,
feature_only=feature_only)
self.box_net = BoxNet(
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
strategy=config.strategy,
grad_checkpoint=config.grad_checkpoint,
data_format=config.data_format,
feature_only=feature_only)
if head == 'segmentation':
self.seg_head = SegmentationHead(
num_classes=config.seg_num_classes,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
strategy=config.strategy,
data_format=config.data_format)
def _init_set_name(self, name, zero_based=True):
"""A hack to allow empty model name for legacy checkpoint compitability."""
if name == '': # pylint: disable=g-explicit-bool-comparison
self._name = name
else:
self._name = super().__init__(name, zero_based)
def call(self, inputs, training):
config = self.config
# call backbone network.
all_feats = self.backbone(inputs, training=training, features_only=True)
feats = all_feats[config.min_level:config.max_level + 1]
# Build additional input features that are not from backbone.
for resample_layer in self.resample_layers:
feats.append(resample_layer(feats[-1], training, None))
# call feature network.
fpn_feats = self.fpn_cells(feats, training)
# call class/box/seg output network.
outputs = []
if 'object_detection' in config.heads:
class_outputs = self.class_net(fpn_feats, training)
box_outputs = self.box_net(fpn_feats, training)
outputs.extend([class_outputs, box_outputs])
if 'segmentation' in config.heads:
seg_outputs = self.seg_head(fpn_feats, training)
outputs.append(seg_outputs)
return tuple(outputs)
class EfficientDetModel(EfficientDetNet):
"""EfficientDet full keras model with pre and post processing."""
def _preprocessing(self,
raw_images,
image_size,
mean_rgb,
stddev_rgb,
mode=None):
"""Preprocess images before feeding to the network."""
if not mode:
return raw_images, None
image_size = utils.parse_image_size(image_size)
if mode != 'infer':
# We only support inference for now.
raise ValueError('preprocessing must be infer or empty')
def map_fn(image):
input_processor = dataloader.DetectionInputProcessor(
image, image_size)
input_processor.normalize_image(mean_rgb, stddev_rgb)
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
image_scale = input_processor.image_scale_to_original
return image, image_scale
if raw_images.shape.as_list()[0]: # fixed batch size.
batch_size = raw_images.shape.as_list()[0]
outputs = [map_fn(raw_images[i]) for i in range(batch_size)]
return [tf.stop_gradient(tf.stack(y)) for y in zip(*outputs)]
# otherwise treat it as dynamic batch size.
return tf.vectorized_map(map_fn, raw_images, warn=False)
def _postprocess(self, cls_outputs, box_outputs, scales, mode='global'):
"""Postprocess class and box predictions."""
if not mode:
return cls_outputs, box_outputs
if mode == 'global':
return postprocess.postprocess_global(self.config.as_dict(), cls_outputs,
box_outputs, scales)
if mode == 'per_class':
return postprocess.postprocess_per_class(self.config.as_dict(),
cls_outputs, box_outputs, scales)
if mode == 'combined':
return postprocess.postprocess_combined(self.config.as_dict(),
cls_outputs, box_outputs, scales)
if mode == 'tflite':
if scales is not None:
# pre_mode should be None for TFLite.
raise ValueError('scales not supported for TFLite post-processing')
return postprocess.postprocess_tflite(self.config.as_dict(), cls_outputs,
box_outputs)
raise ValueError('Unsupported postprocess mode {}'.format(mode))
def call(self, inputs, training=False, pre_mode='infer', post_mode='global'):
"""Call this model.
Args:
inputs: a tensor with common shape [batch, height, width, channels].
training: If true, it is training mode. Otherwise, eval mode.
pre_mode: preprocessing mode, must be {None, 'infer'}.
post_mode: postprrocessing mode, must be {None, 'global', 'per_class'}.
Returns:
the output tensor list.
"""
config = self.config
# preprocess.
inputs, scales = self._preprocessing(inputs, config.image_size,
config.mean_rgb, config.stddev_rgb,
pre_mode)
# network.
if config.data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 3, 1, 2])
outputs = super().call(inputs, training)
if 'object_detection' in config.heads and post_mode:
# postprocess for detection
det_outputs = self._postprocess(outputs[0], outputs[1], scales, post_mode)
outputs = det_outputs + outputs[2:]
return outputs
|
{
"content_hash": "931a86f5033c5b48a42f0ed866c648cd",
"timestamp": "",
"source": "github",
"line_count": 989,
"max_line_length": 80,
"avg_line_length": 35.900910010111225,
"alnum_prop": 0.6014476426519462,
"repo_name": "google/automl",
"id": "e21402e54b5bea217845ad929b0958dc63abb7b1",
"size": "36188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "efficientdet/tf2/efficientdet_keras.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1782347"
},
{
"name": "Python",
"bytes": "1051435"
},
{
"name": "Shell",
"bytes": "1708"
}
],
"symlink_target": ""
}
|
from google.cloud import dialogflowcx_v3beta1
def sample_detect_intent():
# Create a client
client = dialogflowcx_v3beta1.SessionsClient()
# Initialize request argument(s)
query_input = dialogflowcx_v3beta1.QueryInput()
query_input.text.text = "text_value"
query_input.language_code = "language_code_value"
request = dialogflowcx_v3beta1.DetectIntentRequest(
session="session_value",
query_input=query_input,
)
# Make the request
response = client.detect_intent(request=request)
# Handle the response
print(response)
# [END dialogflow_v3beta1_generated_Sessions_DetectIntent_sync]
|
{
"content_hash": "bd62fbe401753e1735c94cc43462e9b2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 63,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.7131901840490797,
"repo_name": "googleapis/python-dialogflow-cx",
"id": "f1839d477212e8c05e46031a167cd51adb15870d",
"size": "2043",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v3beta1_generated_sessions_detect_intent_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "10904903"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
}
|
class AnnotationMeta(type):
def __call__(cls, *args, **kwargs):
if cls._can_be_static and cls._is_static_call(*args, **kwargs):
self = super(AnnotationMeta, cls).__call__()
self(args[0])
return args[0]
else:
return super(AnnotationMeta, cls).__call__(*args, **kwargs)
class _Annotation(object):
_can_be_static = False
def modify_request_definition(self, request_definition_builder):
pass
@classmethod
def _is_static_call(cls, *args, **kwargs):
try:
is_builder = isinstance(args[0], RequestDefinitionBuilder)
except IndexError:
return False
else:
return is_builder and not (kwargs or args[1:])
Annotation = AnnotationMeta("Annotation", (_Annotation,), {})
class AnnotationHandlerBuilder(object):
__listener = None
@property
def listener(self):
return self.__listener
@listener.setter
def listener(self, listener):
self.__listener = listener
def add_annotation(self, annotation, *args, **kwargs):
if self.__listener is not None:
self.__listener(annotation)
def is_done(self):
return True
def build(self):
raise NotImplementedError
class AnnotationHandler(object):
@property
def annotations(self):
raise NotImplementedError
class UriDefinitionBuilder(object):
@property
def is_static(self):
raise NotImplementedError
@property
def is_dynamic(self):
raise NotImplementedError
@is_dynamic.setter
def is_dynamic(self, is_dynamic):
raise NotImplementedError
def add_variable(self, name):
raise NotImplementedError
@property
def remaining_variables(self):
raise NotImplementedError
def build(self):
raise NotImplementedError
class RequestDefinitionBuilder(object):
@property
def method(self):
raise NotImplementedError
@property
def uri(self):
raise NotImplementedError
@property
def argument_handler_builder(self):
raise NotImplementedError
@property
def method_handler_builder(self):
raise NotImplementedError
def update_wrapper(self, wrapper):
raise NotImplementedError
def build(self):
raise NotImplementedError
def copy(self):
raise NotImplementedError
class RequestDefinition(object):
def make_converter_registry(self, converters):
raise NotImplementedError
def define_request(self, request_builder, func_args, func_kwargs):
raise NotImplementedError
class CallBuilder(object):
@property
def client(self):
raise NotImplementedError
@property
def base_url(self):
raise NotImplementedError
@property
def converters(self):
raise NotImplementedError
@property
def hooks(self):
raise NotImplementedError
def add_hook(self, hook, *more_hooks):
raise NotImplementedError
@property
def auth(self):
raise NotImplementedError
def build(self, definition):
raise NotImplementedError
class Auth(object):
def __call__(self, request_builder):
raise NotImplementedError
class Consumer(object):
@property
def session(self):
raise NotImplementedError
|
{
"content_hash": "0b885157e7c61dc716789a53c5fa3a5d",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 71,
"avg_line_length": 22.03921568627451,
"alnum_prop": 0.6459074733096085,
"repo_name": "prkumar/uplink",
"id": "b02bd862901d6683af3383fb335dbe26ef2311d9",
"size": "3372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uplink/interfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "348057"
}
],
"symlink_target": ""
}
|
"""
Traverse an HDFS tree and output disk space usage by block size.
"""
# DOCS_INCLUDE_START
import pydoop.hdfs as hdfs
from common import MB, TEST_ROOT
def usage_by_bs(fs, root):
stats = {}
for info in fs.walk(root):
if info['kind'] == 'directory':
continue
bs = int(info['block_size'])
size = int(info['size'])
stats[bs] = stats.get(bs, 0) + size
return stats
if __name__ == "__main__":
with hdfs.hdfs() as fs:
root = "%s/%s" % (fs.working_directory(), TEST_ROOT)
print("BS(MB)\tBYTES")
for k, v in usage_by_bs(fs, root).items():
print("%.1f\t%d" % (k / float(MB), v))
|
{
"content_hash": "959c7e8c50ef6197f6e035de6aabb26c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 64,
"avg_line_length": 26.88,
"alnum_prop": 0.5491071428571429,
"repo_name": "elzaggo/pydoop",
"id": "e5e4e64b34e78cd30a42100601b2275b7722dd11",
"size": "1283",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/hdfs/treewalk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "202110"
},
{
"name": "C++",
"bytes": "157645"
},
{
"name": "Emacs Lisp",
"bytes": "153"
},
{
"name": "Java",
"bytes": "180329"
},
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "514013"
},
{
"name": "Shell",
"bytes": "18476"
}
],
"symlink_target": ""
}
|
import os
import dj_database_url
from project_runpy import env
BASE_DIR = os.path.dirname(__file__)
SECRET_KEY = env.get('SECRET_KEY', 'Rotom')
DEBUG = env.get('DEBUG')
ALLOWED_HOSTS = ['*']
DATABASES = {'default': dj_database_url.config(default='sqlite:///' +
os.path.join(BASE_DIR, 'example_project.db'))}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
# MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'lolimasekrit'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example_project.wsgi.application'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# app
'aws_admin',
# support
'django_extensions',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
],
'debug': DEBUG,
},
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': os.environ.get('LOGGING_LEVEL', 'WARNING'),
'handlers': ['console'],
},
'formatters': {
'verbose': {
'format': ' '.join([
'%(levelname)s',
'%(asctime)s',
'%(name)s',
'%(module)s',
'%(process)d',
'%(thread)d',
'%(message)s']),
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'readable_sql': {
'()': 'project_runpy.ReadableSqlFilter',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'project_runpy.ColorizingStreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'py.warnings': {
# how do i get colored warnings without duplicates?
'propagate': False,
},
'django.db.backends': {
'level': 'DEBUG' if env.get('SQL') else 'INFO',
'filters': ['require_debug_true', 'readable_sql'],
},
'factory': {
'level': 'ERROR',
'propagate': False,
},
},
}
# App settings
AWS_ACCESS_KEY_ID = env.require('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env.require('AWS_SECRET_ACCESS_KEY')
|
{
"content_hash": "91dcf4295afd5739c0667710f3c1cf77",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 79,
"avg_line_length": 29.936507936507937,
"alnum_prop": 0.6344998232591021,
"repo_name": "crccheck/django-aws-admin",
"id": "ef726d9c44952dd21211688cb8866c08ff093941",
"size": "5705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_project/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1289"
},
{
"name": "Python",
"bytes": "25648"
}
],
"symlink_target": ""
}
|
from math import ceil
from time import sleep
import os
import re
from database import User, Item
from . import data_manipulation
from . import gtk_element_editor
from . import window_creator
from .decorators import use_threading, use_spinner
class WindowHandler:
spinner = None
task_count = 0
user_list = None
food_list = None
database = None
selected_food = None
selected_user = None
user_to_edit = None
food_to_edit = None
selected_amount = 0
selected_amount_entry = None
image_size = 75
dynamic_scaling_list = list()
dynamic_font_list = list()
default_font_factor = 0.05
window_size = None # last known window size
window_history = list()
actual_window = None # actual window if known
user_image_list = list() # list of all images which should contain profile image of selected user
user_name_label_list = list()
user_balance_label_list = list()
food_image_list = list() # list of all images which should contain profile image of selected food
food_name_label_list = list()
food_price_label_list = list()
current_numpad_value = 0
numpad_value_label_list = list()
regex_str = ""
regex_obj = None
filter_clear_button = None
edit_nick_entry = None
edit_name_entry = None
edit_food_name_entry = None
edit_food_price_entry = None
creating_new_user = True
creating_new_food = True
def register_user_image(self, image):
"""
Function used to register where to put image of selected user.
:param image: image object
"""
self.user_image_list.append(image)
self.update_user_image()
@use_spinner
def register_spinner(self, spinner):
"""
Function used to register default spinner to indicate running process.
:param spinner: spinner object
"""
self.spinner = spinner
if self.task_count > 0:
spinner.start()
@use_threading
@use_spinner
def spinner_test(self, *_):
"""
Function used to test spinner as if data were being retrieved from database.
"""
print("Doing hard work.")
sleep(5)
print("Done")
def register_user_list(self, user_list):
"""
Function used to register graphical list of users.
:param user_list: list to register
"""
self.user_list = user_list
self.clear_user_list()
self.update_user_list()
def register_food_list(self, food_list):
"""
Function used to register graphical list of food.
:param food_list: list to register
"""
self.food_list = food_list
# todo: add food filter
self.clear_food_list()
self.update_food_list()
def register_edit_food_price(self, edit):
"""
Function used to register `Gtk.Entry` containing new price of food.
:param edit: Gtk.Entry
"""
self.edit_food_price_entry = edit
gtk_element_editor.change_label_entry_text(edit, data_manipulation.get_item_price_printable(self.food_to_edit,
currency=""))
def register_edit_food_name(self, edit):
"""
Function used to register `Gtk.Entry` containing new name for food.
:param edit: Gtk.Entry
"""
self.edit_food_name_entry = edit
gtk_element_editor.change_label_entry_text(edit, data_manipulation.get_item_printable_name(self.food_to_edit))
def event_user_selected(self, *args):
"""
This handler should be called when user is selected.
:param args: args[2] = object containing info about selected user
"""
self.selected_user = args[2]
self.update_selected_user_all()
def event_food_selected(self, *args):
"""
This handler should be called when food is selected.
:param args: args[2] = object containing info about selected food
"""
self.selected_food = args[2]
self.update_selected_food_all()
def event_save_profile(self, *_):
"""
Modifies user according to `edit_nick_entry` and `edit_name_entry`.
"""
if self.creating_new_user:
self.user_to_edit = User()
self.user_to_edit.name = gtk_element_editor.get_text_from_entry(self.edit_name_entry)
self.user_to_edit.nick = gtk_element_editor.get_text_from_entry(self.edit_nick_entry)
if self.creating_new_user:
self.database.add_user(self.user_to_edit)
else:
self.database.edit_user(self.user_to_edit)
self.clear_user_list()
self.update_user_list_non_threading()
self.update_selected_user_all()
self.event_jmp_back()
def event_select_image(self, *_):
"""
Lunches external command found in first line of config and finds last edited image in directory specified in
second line. Then it sets this image as user image.
"""
config = open(os.path.join(os.path.dirname(__file__), '../config.txt'), "r").read().split("\n")
config_command = data_manipulation.expand_username(config[0])
config_imagepath = data_manipulation.expand_username(config[1])
self.actual_window.hide()
os.system(config_command)
self.actual_window.show()
newest = max(os.listdir(config_imagepath), key=lambda x: os.path.getctime(os.path.join(config_imagepath, x)))
newest = os.path.join(config_imagepath, newest)
self.selected_user.photo = newest
self.update_user_image()
def event_transfer(self, *_):
"""
Should be called when user clicked button to buy items.
"""
if self.selected_user is not None and self.selected_food is not None:
self.database.buy_items(self.selected_user.id, self.selected_food.id, self.selected_amount)
self.clear_user_list()
self.update_user_list_non_threading()
self.update_user_balance_labels()
def event_save_food(self, *_):
"""
Modifies item according to `edit_food_name_entry` and `edit_food_price_entry`.
"""
if self.creating_new_food:
self.user_to_edit = Item()
self.food_to_edit.name = gtk_element_editor.get_text_from_entry(self.edit_food_name_entry)
pricestring = gtk_element_editor.get_text_from_entry(self.edit_food_price_entry)
self.food_to_edit.price = data_manipulation.price_string_to_int(pricestring)
if self.creating_new_food:
self.database.add_item(self.food_to_edit)
else:
self.database.edit_item(self.food_to_edit)
self.clear_food_list()
self.update_food_list_non_threading()
self.update_selected_food_all()
self.event_jmp_back()
def clear_user_list(self, *_):
"""
Clears user list. (Don't use if another thread may be accessing user list.)
"""
for c in self.user_list:
self.user_list.remove(c)
self.user_list.add(gtk_element_editor.create_event_button(self.event_jmp_new_user, "+"))
def clear_food_list(self, *_):
"""
Clears food list. (Don't use if another thread may be accessing food list.)
"""
for c in self.food_list:
self.food_list.remove(c)
self.food_list.add(gtk_element_editor.create_event_button(self.event_jmp_new_food, "+"))
@use_threading
def update_user_list(self, *_):
"""
Updates user_list with new data from database in new thread.
"""
self.update_user_list_non_threading()
@use_spinner
def update_user_list_non_threading(self, *_):
"""
Updates user_list with new data from database.
"""
user_list = self.database.get_user()
for user in user_list:
row = gtk_element_editor.create_user_row(user, self.event_user_selected, self.register_dynamic_font)
self.user_list.add(row)
self.user_list.show_all()
if self.selected_user is not None:
for user in user_list:
if user.id == self.selected_user.id:
self.selected_user = user
return
self.selected_user = None
@use_threading
def update_food_list(self, *_):
"""
Updates food_list with new data from database in new thread.
"""
self.update_food_list_non_threading()
@use_spinner
def update_food_list_non_threading(self, *_):
"""
Updates food_list with new data from database.
"""
food_list = self.database.get_item(None)
for food in food_list:
row = gtk_element_editor.create_food_row(food, self.event_food_selected, self.register_dynamic_font)
self.food_list.add(row)
self.food_list.show_all()
def update_user_image(self, *_, standard_window_width=640, standard_window_height=320):
"""
Updates images of selected user.
"""
if self.window_size is None:
scaling_factor = 1
else:
scaling_factor = data_manipulation.compute_scaling_factor(self.window_size[0], self.window_size[1],
standard_window_width, standard_window_height)
for user_image in self.user_image_list:
gtk_element_editor.image_set_missing(user_image)
if self.selected_user is not None:
if self.selected_user.photo is not None:
gtk_element_editor.load_image_from_file(user_image,
self.selected_user.photo,
self.image_size * scaling_factor,
self.image_size * scaling_factor)
def update_user_name_label(self, *_):
"""
Updates name labels of selected user.
"""
for user_label in self.user_name_label_list:
gtk_element_editor.change_label_entry_text(user_label,
data_manipulation.get_user_printable_name(self.selected_user))
def update_user_balance_labels(self, *_):
"""
Updates labels containing balance of selected user.
"""
for user_label in self.user_balance_label_list:
gtk_element_editor.change_label_entry_text(user_label,
data_manipulation.get_user_balance_printable(self.selected_user))
def update_selected_user_all(self, *_):
"""
Updates everything needed when user is selected.
"""
self.update_user_image()
self.update_user_name_label()
self.update_user_balance_labels()
def update_numpad_value_label(self, *_):
"""
Updates numpad value label.
It can be for example used when numpad button is clicked.
"""
for numpad_label in self.numpad_value_label_list:
gtk_element_editor.change_label_entry_text(numpad_label,
data_manipulation.format_money(self.current_numpad_value))
def update_amount_entry(self, *_):
gtk_element_editor.change_label_entry_text(self.selected_amount_entry, str(self.selected_amount))
def update_food_image(self, *_, standard_window_width=640, standard_window_height=320):
"""
Updates images of selected food.
"""
if self.window_size is None:
scaling_factor = 1
else:
scaling_factor = data_manipulation.compute_scaling_factor(self.window_size[0], self.window_size[1],
standard_window_width, standard_window_height)
for food_image in self.food_image_list:
gtk_element_editor.image_set_missing(food_image)
if self.selected_food is not None:
if self.selected_food.photo is not None:
gtk_element_editor.load_image_from_file(food_image,
self.selected_food.photo,
self.image_size * scaling_factor,
self.image_size * scaling_factor)
def update_food_price_labels(self, *_):
"""
Updates labels containing price of selected food.
"""
for food_price_label in self.food_price_label_list:
gtk_element_editor.change_label_entry_text(food_price_label,
data_manipulation.get_item_price_printable(self.selected_food))
def update_food_name_labels(self, *_):
"""
Updates name labels of selected food.
"""
for food_label in self.food_name_label_list:
gtk_element_editor.change_label_entry_text(food_label,
data_manipulation.get_item_printable_name(self.selected_food))
def update_selected_food_all(self, *_):
"""
Updates all widgets containing information about selected food.
"""
self.update_food_image()
self.update_food_name_labels()
self.update_food_price_labels()
def update(self, *_):
"""
Updates all data presented in main window.
"""
self.update_food_list()
self.update_user_list()
self.update_selected_user_all()
self.update_numpad_value_label()
def set_database(self, database):
"""
Sets which database should be used for retrieving data to display.
:param database: database to use
"""
self.database = database
def user_filter(self, row, *_):
"""
Function used to filter users in listbox.
:param row: row.user should contain valid user dictionary
:return: True if user should be displayed, False otherwise.
"""
if self.regex_obj is None:
return True
try:
names = data_manipulation.get_all_names(row.user)
except AttributeError:
return True
for name in names:
name = data_manipulation.normalize_string(name)
if self.regex_obj.search(name) is not None:
return True
return False
def event_buy_food(self, *_):
"""
This handler should be called, when user wants to buy food. (Clicking button.)
"""
self.database.buy_items(self.selected_user, self.selected_food, self.selected_amount)
# todo: error message
def event_amount_up(self, *_):
self.selected_amount += 1
self.update_amount_entry()
def event_amount_down(self, *_):
self.selected_amount -= 1
self.update_amount_entry()
def window_configure(self, *args):
"""
Function which should be called on every change of window size.
:param args First argument should be Gtk.Window or should contain get_size method.
"""
self.window_size = args[0].get_size()
self.apply_dynamic_scaling_all(self.window_size[0], self.window_size[1]) # todo: add std. win. width and height
self.apply_dynamic_font_all(self.default_font_factor, self.window_size[1])
def register_dynamic_scaling(self, *args):
"""
Function to be called for registering widget as dynamicly scalable.
:param args: Argument 0 should be widget do be resized on window change.
"""
self.dynamic_scaling_list.append((args[0], args[0].props.width_request, args[0].props.height_request))
@staticmethod
def apply_dynamic_scaling(awidth, aheight, widget_t, standard_window_width=640, standard_window_height=320):
"""
Scales specific widget.
:param awidth: actual window width
:param aheight: actual window height
:param widget_t: (widget to scale, original widget width, original widget height)
:param standard_window_width: standard window width used as reference
:param standard_window_height: standard window height used as reference
"""
scaling_factor = data_manipulation.compute_scaling_factor(awidth, aheight,
standard_window_width, standard_window_height)
if widget_t[1] > 0:
widget_t[0].props.width_request = ceil(widget_t[1] * scaling_factor)
if widget_t[2] > 0:
widget_t[0].props.height_request = ceil(widget_t[2] * scaling_factor)
def apply_dynamic_scaling_all(self, awidth, aheight, standard_window_width=640, standard_window_height=320):
"""
Scales all widgets registered for dynamic scaling.
:param awidth: actual window width
:param aheight: actual window height
:param standard_window_width: standard window width used as reference
:param standard_window_height: standard window height used as reference
"""
for w in self.dynamic_scaling_list:
self.apply_dynamic_scaling(awidth, aheight, w, standard_window_width, standard_window_height)
self.update_user_image()
def register_dynamic_font(self, widget, scale=None, *_):
"""
Function to be called for registering widget containing text to resize and set default font.
:param widget: widget to be resized on window height change
:param scale: desired scale (or None)
"""
if scale is None:
scale = 1
try:
label = widget.props.label
except AttributeError:
label = ""
if "#s:" in label:
try:
scale = float(label[label.find("#s:") + 3:])
except ValueError:
scale = 1
gtk_element_editor.change_button_label_text(widget, label[:label.find("#s:")])
self.dynamic_font_list.append((widget, scale))
if self.window_size is not None:
self.apply_dynamic_font(self.default_font_factor * scale, self.window_size[1], widget)
@staticmethod
def apply_dynamic_font(factor, aheight, widget):
"""
Sets font size on specific widget according to actual window height.
:param factor: font size divided by window height
:param aheight: actual window height
:param widget: widget to setup
"""
widget.modify_font(gtk_element_editor.create_font_from_description(str(ceil(factor * aheight))))
def apply_dynamic_font_all(self, factor, aheight):
"""
Sets font size on registered widgets according to actual window height.
:param factor: factor * widget scaling factor * aheight = font size
:param aheight: actual height of window
"""
for w in self.dynamic_font_list:
self.apply_dynamic_font(factor * w[1], aheight, w[0])
def set_actual_window(self, window):
"""
After creating, event handler should be given reference to window it is handling. It is not required, but when
it is not set, returning to default window won't work.
:param window: window which is operating
"""
self.actual_window = window
def event_jmp_profile(self, *_):
"""
Switches current window to profile window.
"""
if self.selected_user is not None:
self.window_history.append(self.actual_window)
self.actual_window.hide()
self.actual_window = window_creator.create_window_profile(self)
def event_jmp_back(self, *_):
"""
Switches window to previous window on window_history.
"""
self.actual_window.hide()
self.actual_window = self.window_history.pop()
self.actual_window.show()
def register_user_name(self, label, *_):
"""
Function to be called for registering GtkLabel for displaying name of selected user.
"""
self.user_name_label_list.append(label)
self.update_user_name_label()
def register_user_balance(self, label, *_):
"""
Function to be called for registering GtkLabel for displaying name of selected user.
"""
self.user_balance_label_list.append(label)
self.update_user_balance_labels()
def register_numpad_value(self, label, *_):
"""
Function to be called for registering GtkLabel for displaying value on numpad.
"""
self.numpad_value_label_list.append(label)
self.update_numpad_value_label()
def register_resulting_balance(self, label, *_):
pass # todo
def register_filter_clear_button(self, button, *_):
"""
Function to register clear button.
:param button: Gtk.Button
"""
self.filter_clear_button = button
self.filter_clear_button.hide()
def register_selected_amount_entry(self, entry, *_):
"""
Function to register selected item amount entry.
:param entry: Gtk.Entry
"""
self.selected_amount_entry = entry
def register_edit_nick(self, entry, *_):
"""
Function to register Gtk.Entry for new nick of user.
:param entry: Gtk.Entry
"""
self.edit_nick_entry = entry
gtk_element_editor.change_label_entry_text(entry,
self.user_to_edit.nick if (self.user_to_edit.nick is not None)
else "")
def register_edit_real_name(self, entry, *_):
"""
Function to register Gtk.Entry for new name of user.
:param entry: Gtk.Entry
"""
self.edit_name_entry = entry
gtk_element_editor.change_label_entry_text(entry,
self.user_to_edit.name if (self.user_to_edit.name is not None)
else "")
def register_food_image(self, image, *_):
self.food_image_list.append(image)
self.update_food_image()
def register_food_price(self, label, *_):
self.food_price_label_list.append(label)
self.update_food_price_labels()
def register_food_name(self, label, *_):
self.food_name_label_list.append(label)
self.update_food_name_labels()
def event_jmp_transaction(self, *_):
"""
Switches current window to transaction window and resets value on numpad.
"""
self.current_numpad_value = 0
self.window_history.append(self.actual_window)
self.actual_window.hide()
self.actual_window = window_creator.create_window_transaction(self, self.database)
def event_jmp_edit_food(self, *_, new=False):
"""
Switches current window to food editing window.
"""
self.creating_new_food = new
if not new:
self.food_to_edit = self.selected_food
else:
self.food_to_edit = Item()
self.window_history.append(self.actual_window)
self.actual_window.hide()
self.actual_window = window_creator.create_window_edit_food(self)
def event_numpad(self, num):
"""
This function should be called by respective event_numpad_x function.
:param num: number clicked on numpad
"""
self.current_numpad_value *= 10
self.current_numpad_value += num
self.update_numpad_value_label()
def event_make_transaction(self, *_):
pass # todo
def event_numpad_1(self, *_):
self.event_numpad(1)
def event_numpad_2(self, *_):
self.event_numpad(2)
def event_numpad_3(self, *_):
self.event_numpad(3)
def event_numpad_4(self, *_):
self.event_numpad(4)
def event_numpad_5(self, *_):
self.event_numpad(5)
def event_numpad_6(self, *_):
self.event_numpad(6)
def event_numpad_7(self, *_):
self.event_numpad(7)
def event_numpad_8(self, *_):
self.event_numpad(8)
def event_numpad_9(self, *_):
self.event_numpad(9)
def event_numpad_0(self, *_):
self.event_numpad(0)
def event_numpad_clear(self, *_):
self.current_numpad_value = 0
self.update_numpad_value_label()
def event_numpad_backspace(self, *_):
self.current_numpad_value //= 10
self.update_numpad_value_label()
def event_filter(self, button, *_):
"""
Updates regex to filter users and food.
Should be called on filter button click.
:param button: label of this Gtk.Button is used as part of regex.
"""
self.regex_str += "[" + gtk_element_editor.get_text_from_button(button).lower() + "]"
self.regex_obj = re.compile(self.regex_str)
gtk_element_editor.set_listbox_filter(self.user_list, self.user_filter)
self.filter_clear_button.show()
def event_filter_clear(self, *_):
"""
Resets filter.
Should be called on filter button click.
"""
self.regex_str = ""
self.regex_obj = None
gtk_element_editor.set_listbox_filter(self.user_list, self.user_filter)
self.filter_clear_button.hide()
def event_jmp_edit_user(self, *_, new=False):
"""
Switches current window to profile editing window.
"""
self.creating_new_user = new
if not new:
self.user_to_edit = self.selected_user
else:
self.user_to_edit = User()
self.window_history.append(self.actual_window)
self.actual_window.hide()
self.actual_window = window_creator.create_window_edit_profile(self)
def event_jmp_new_user(self, *_):
"""
Opens window for creating new user.
"""
self.event_jmp_edit_user(new=True)
def event_jmp_new_food(self, *_):
"""
Opens window for creating new food.
"""
self.event_jmp_edit_food(new=True)
|
{
"content_hash": "d181c26cbbef6637cecf5f2037803f50",
"timestamp": "",
"source": "github",
"line_count": 768,
"max_line_length": 120,
"avg_line_length": 34.4140625,
"alnum_prop": 0.5861899356791525,
"repo_name": "peto2006/sortiment-frontent",
"id": "782c255ad38612416ab35e14de9ad6ddafbba317",
"size": "26430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sortimentGUI/window_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47017"
}
],
"symlink_target": ""
}
|
import time
import testtools
from tempest.api.identity import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
CONF = config.CONF
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
@classmethod
def skip_checks(cls):
super(UsersV3TestJSON, cls).skip_checks()
if CONF.identity_feature_enabled.immutable_user_source:
raise cls.skipException('Skipped because environment has an '
'immutable user source and solely '
'provides read-only access to users.')
@decorators.idempotent_id('b537d090-afb9-4519-b95d-270b0708e87e')
def test_user_update(self):
# Test case to check if updating of user attributes is successful.
# Creating first user
u_name = data_utils.rand_name('user')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
u_password = data_utils.rand_password()
user = self.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, enabled=False)['user']
# Delete the User at the end of this method
self.addCleanup(self.users_client.delete_user, user['id'])
# Creating second project for updation
project = self.setup_test_project()
# Updating user details with new values
update_kwargs = {'name': data_utils.rand_name('user2'),
'description': data_utils.rand_name('desc2'),
'project_id': project['id'],
'email': 'user2@testmail.tm',
'enabled': False}
updated_user = self.users_client.update_user(
user['id'], **update_kwargs)['user']
for field in update_kwargs:
self.assertEqual(update_kwargs[field], updated_user[field])
# GET by id after updating
new_user_get = self.users_client.show_user(user['id'])['user']
# Assert response body of GET after updation
for field in update_kwargs:
self.assertEqual(update_kwargs[field], new_user_get[field])
@decorators.idempotent_id('2d223a0e-e457-4a70-9fb1-febe027a0ff9')
def test_update_user_password(self):
# Creating User to check password updation
u_name = data_utils.rand_name('user')
original_password = data_utils.rand_password()
user = self.users_client.create_user(
name=u_name, password=original_password)['user']
# Delete the User at the end all test methods
self.addCleanup(self.users_client.delete_user, user['id'])
# Update user with new password
new_password = data_utils.rand_password()
self.users_client.update_user_password(
user['id'], password=new_password,
original_password=original_password)
# NOTE(morganfainberg): Fernet tokens are not subsecond aware and
# Keystone should only be precise to the second. Sleep to ensure
# we are passing the second boundary.
time.sleep(1)
resp = self.token.auth(user_id=user['id'],
password=new_password).response
subject_token = resp['x-subject-token']
# Perform GET Token to verify and confirm password is updated
token_details = self.client.show_token(subject_token)['token']
self.assertEqual(token_details['user']['id'], user['id'])
self.assertEqual(token_details['user']['name'], u_name)
@decorators.idempotent_id('a831e70c-e35b-430b-92ed-81ebbc5437b8')
def test_list_user_projects(self):
# List the projects that a user has access upon
assigned_project_ids = list()
fetched_project_ids = list()
u_project = self.setup_test_project()
# Create a user.
u_name = data_utils.rand_name('user')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
u_password = data_utils.rand_password()
user_body = self.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, enabled=False, project_id=u_project['id'])['user']
# Delete the User at the end of this method
self.addCleanup(self.users_client.delete_user, user_body['id'])
# Creating Role
role_body = self.setup_test_role()
user = self.users_client.show_user(user_body['id'])['user']
role = self.roles_client.show_role(role_body['id'])['role']
for _ in range(2):
# Creating project so as to assign role
project_body = self.setup_test_project()
project = self.projects_client.show_project(
project_body['id'])['project']
# Assigning roles to user on project
self.roles_client.create_user_role_on_project(project['id'],
user['id'],
role['id'])
assigned_project_ids.append(project['id'])
body = self.users_client.list_user_projects(user['id'])['projects']
for i in body:
fetched_project_ids.append(i['id'])
# verifying the project ids in list
missing_projects =\
[p for p in assigned_project_ids
if p not in fetched_project_ids]
self.assertEmpty(missing_projects,
"Failed to find project %s in fetched list" %
', '.join(m_project for m_project
in missing_projects))
@decorators.idempotent_id('c10dcd90-461d-4b16-8e23-4eb836c00644')
def test_get_user(self):
# Get a user detail
user = self.setup_test_user()
fetched_user = self.users_client.show_user(user['id'])['user']
self.assertEqual(user['id'], fetched_user['id'])
@testtools.skipUnless(CONF.identity_feature_enabled.security_compliance,
'Security compliance not available.')
@decorators.idempotent_id('568cd46c-ee6c-4ab4-a33a-d3791931979e')
def test_password_history_not_enforced_in_admin_reset(self):
old_password = self.os_primary.credentials.password
user_id = self.os_primary.credentials.user_id
new_password = data_utils.rand_password()
self.users_client.update_user(user_id, password=new_password)
# To be safe, we add this cleanup to restore the original password in
# case something goes wrong before it is restored later.
self.addCleanup(
self.users_client.update_user, user_id, password=old_password)
# Check authorization with new password
self.token.auth(user_id=user_id, password=new_password)
if CONF.identity.user_unique_last_password_count > 1:
# The password history is not enforced via the admin reset route.
# We can set the same password.
self.users_client.update_user(user_id, password=new_password)
# Restore original password
self.users_client.update_user(user_id, password=old_password)
# Check authorization with old password
self.token.auth(user_id=user_id, password=old_password)
|
{
"content_hash": "e80ed73f8617227fba440ce887db2401",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 77,
"avg_line_length": 46.062893081761004,
"alnum_prop": 0.6119606772255598,
"repo_name": "masayukig/tempest",
"id": "8955a93ed9f85d9eb911200a78582bc8f4b8a5e9",
"size": "7960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/identity/admin/v3/test_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4682048"
},
{
"name": "Shell",
"bytes": "12734"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daiquiri_jobs', '0006_owner_blank_true'),
]
operations = [
migrations.AddField(
model_name='job',
name='creation_time',
field=models.DateTimeField(blank=True, null=True),
),
]
|
{
"content_hash": "c6a8baaf030052302358e7120b0e8383",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 22,
"alnum_prop": 0.5984848484848485,
"repo_name": "aipescience/django-daiquiri",
"id": "ca45d4932612b7f6ac459383fa9dabd62d5456a4",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daiquiri/jobs/migrations/0007_creation_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28598"
},
{
"name": "HTML",
"bytes": "236579"
},
{
"name": "JavaScript",
"bytes": "97087"
},
{
"name": "Python",
"bytes": "602159"
}
],
"symlink_target": ""
}
|
"""A dummy backend for use in tests.
This backend implements the backend API in the simplest way possible. It is
used in tests of the frontends.
"""
from __future__ import unicode_literals
import pykka
from mopidy import backend
from mopidy.models import Playlist, Ref, SearchResult
def create_dummy_backend_proxy(config=None, audio=None):
return DummyBackend.start(config=config, audio=audio).proxy()
class DummyBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(DummyBackend, self).__init__()
self.library = DummyLibraryProvider(backend=self)
self.playback = DummyPlaybackProvider(audio=audio, backend=self)
self.playlists = DummyPlaylistsProvider(backend=self)
self.uri_schemes = ['dummy']
class DummyLibraryProvider(backend.LibraryProvider):
root_directory = Ref.directory(uri='dummy:/', name='dummy')
def __init__(self, *args, **kwargs):
super(DummyLibraryProvider, self).__init__(*args, **kwargs)
self.dummy_library = []
self.dummy_browse_result = {}
self.dummy_find_exact_result = SearchResult()
self.dummy_search_result = SearchResult()
def browse(self, path):
return self.dummy_browse_result.get(path, [])
def find_exact(self, **query):
return self.dummy_find_exact_result
def lookup(self, uri):
return filter(lambda t: uri == t.uri, self.dummy_library)
def refresh(self, uri=None):
pass
def search(self, **query):
return self.dummy_search_result
class DummyPlaybackProvider(backend.PlaybackProvider):
def __init__(self, *args, **kwargs):
super(DummyPlaybackProvider, self).__init__(*args, **kwargs)
self._time_position = 0
def pause(self):
return True
def play(self, track):
"""Pass a track with URI 'dummy:error' to force failure"""
self._time_position = 0
return track.uri != 'dummy:error'
def resume(self):
return True
def seek(self, time_position):
self._time_position = time_position
return True
def stop(self):
return True
def get_time_position(self):
return self._time_position
class DummyPlaylistsProvider(backend.PlaylistsProvider):
def create(self, name):
playlist = Playlist(name=name, uri='dummy:%s' % name)
self._playlists.append(playlist)
return playlist
def delete(self, uri):
playlist = self.lookup(uri)
if playlist:
self._playlists.remove(playlist)
def lookup(self, uri):
for playlist in self._playlists:
if playlist.uri == uri:
return playlist
def refresh(self):
pass
def save(self, playlist):
old_playlist = self.lookup(playlist.uri)
if old_playlist is not None:
index = self._playlists.index(old_playlist)
self._playlists[index] = playlist
else:
self._playlists.append(playlist)
return playlist
|
{
"content_hash": "6d486d6340a8a2ff3c7e90e0bdd547a8",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 76,
"avg_line_length": 27.513513513513512,
"alnum_prop": 0.6394891944990176,
"repo_name": "abarisain/mopidy",
"id": "94b01433817811aba8f2030572ddf265bf5aeb32",
"size": "3054",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "mopidy/backend/dummy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1283"
},
{
"name": "JavaScript",
"bytes": "81639"
},
{
"name": "Python",
"bytes": "847422"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import hashlib
import os
import random
import re
import string
import tempfile
import time
import warnings
import pickle
from django.conf import settings
from django.core import management
from django.core.cache import get_cache
from django.core.cache.backends.base import (CacheKeyWarning,
InvalidCacheBackendError)
from django.db import router, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import (HttpResponse, HttpRequest, StreamingHttpResponse,
QueryDict)
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.template import Template
from django.template.response import TemplateResponse
from django.test import TestCase, TransactionTestCase, RequestFactory
from django.test.utils import override_settings, IgnorePendingDeprecationWarningsMixin
from django.utils import six, timezone, translation, unittest
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class DummyCacheTests(unittest.TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has different test requirements.
backend_name = 'django.core.cache.backends.dummy.DummyCache'
def setUp(self):
self.cache = get_cache(self.backend_name)
def test_simple(self):
"Dummy cache backend ignores cache set calls"
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), None)
def test_add(self):
"Add doesn't do anything in dummy cache backend"
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, True)
self.assertEqual(self.cache.get("addkey1"), None)
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), None)
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), False)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, False)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
"Dummy cache values can't be incremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.incr, 'answer')
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.decr, 'answer')
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), None)
def test_expiration(self):
"Expiration has no effect on the dummy cache"
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), None)
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x' : 1 }
}
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), None)
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.cache.set_many({'a': 1, 'b': 2})
self.cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
self.cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
self.cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.incr_version, 'answer')
self.assertRaises(ValueError, self.cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.decr_version, 'answer')
self.assertRaises(ValueError, self.cache.decr_version, 'does_not_exist')
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def _get_request_cache(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = path
request._cache_update_cache = True
request.method = 'GET'
return request
def test_simple(self):
# Simple cache set/get works
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, False)
self.assertEqual(self.cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
self.cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(self.prefix_cache.has_key('somekey'))
self.prefix_cache.set('somekey', 'value2')
self.assertEqual(self.cache.get('somekey'), 'value')
self.assertEqual(self.prefix_cache.get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {'a' : 'a', 'c' : 'c', 'd' : 'd'})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {'a' : 'a', 'b' : 'b'})
def test_delete(self):
# Cache keys can be deleted
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), "spam")
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), True)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
# The in operator can be used to inspect cache contents
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, True)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
# Cache values can be incremented
self.cache.set('answer', 41)
self.assertEqual(self.cache.incr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.incr('answer', 10), 52)
self.assertEqual(self.cache.get('answer'), 52)
self.assertEqual(self.cache.incr('answer', -10), 42)
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
self.cache.set('answer', 43)
self.assertEqual(self.cache.decr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.decr('answer', 10), 32)
self.assertEqual(self.cache.get('answer'), 32)
self.assertEqual(self.cache.decr('answer', -10), 42)
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(self.cache, 'close'))
self.cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
self.cache.set('question', my_poll)
cached_poll = self.cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
self.cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cached_polls = self.cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), "newvalue")
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x' : 1 }
}
# Test `set`
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
self.cache.delete(key)
self.cache.add(key, value)
self.assertEqual(self.cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
self.cache.delete(key)
self.cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(self.cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
self.cache.set('binary1', compressed_value)
compressed_result = self.cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
self.cache.add('binary1-add', compressed_value)
compressed_result = self.cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
self.cache.set_many({'binary1-set_many': compressed_value})
compressed_result = self.cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
self.cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(self.cache.get("key1"), "spam")
self.assertEqual(self.cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
self.cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.set("key3", "ham")
self.cache.delete_many(["key1", "key2"])
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
self.assertEqual(self.cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.clear()
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
self.cache.set('key1', 'eggs', 60*60*24*30 + 1) #30 days + 1 second
self.assertEqual(self.cache.get('key1'), 'eggs')
self.cache.add('key2', 'ham', 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key2'), 'ham')
self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key3'), 'sausage')
self.assertEqual(self.cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
self.cache.set('key1', 'eggs', None)
self.assertEqual(self.cache.get('key1'), 'eggs')
self.cache.add('key2', 'ham', None)
self.assertEqual(self.cache.get('key2'), 'ham')
self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(self.cache.get('key3'), 'sausage')
self.assertEqual(self.cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
self.cache.set('key1', 'eggs', 0)
self.assertEqual(self.cache.get('key1'), None)
self.cache.add('key2', 'ham', 0)
self.assertEqual(self.cache.get('key2'), None)
self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertEqual(self.cache.get('key3'), None)
self.assertEqual(self.cache.get('key4'), None)
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
self.cache.set("key1", "spam", 100.2)
self.assertEqual(self.cache.get("key1"), "spam")
def perform_cull_test(self, initial_count, final_count):
"""This is implemented as a utility method, because only some of the backends
implement culling. The culling algorithm also varies slightly, so the final
number of entries will vary between backends"""
# Create initial cache key entries. This will overflow the cache, causing a cull
for i in range(1, initial_count):
self.cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if self.cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = self.cache.key_func
self.cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
self.cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
self.cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
self.cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
self.cache.set('answer1', 42)
self.assertEqual(self.cache.get('answer1'), 42)
self.assertEqual(self.cache.get('answer1', version=1), 42)
self.assertEqual(self.cache.get('answer1', version=2), None)
self.assertEqual(self.v2_cache.get('answer1'), None)
self.assertEqual(self.v2_cache.get('answer1', version=1), 42)
self.assertEqual(self.v2_cache.get('answer1', version=2), None)
# set, default version = 1, but manually override version = 2
self.cache.set('answer2', 42, version=2)
self.assertEqual(self.cache.get('answer2'), None)
self.assertEqual(self.cache.get('answer2', version=1), None)
self.assertEqual(self.cache.get('answer2', version=2), 42)
self.assertEqual(self.v2_cache.get('answer2'), 42)
self.assertEqual(self.v2_cache.get('answer2', version=1), None)
self.assertEqual(self.v2_cache.get('answer2', version=2), 42)
# v2 set, using default version = 2
self.v2_cache.set('answer3', 42)
self.assertEqual(self.cache.get('answer3'), None)
self.assertEqual(self.cache.get('answer3', version=1), None)
self.assertEqual(self.cache.get('answer3', version=2), 42)
self.assertEqual(self.v2_cache.get('answer3'), 42)
self.assertEqual(self.v2_cache.get('answer3', version=1), None)
self.assertEqual(self.v2_cache.get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
self.v2_cache.set('answer4', 42, version=1)
self.assertEqual(self.cache.get('answer4'), 42)
self.assertEqual(self.cache.get('answer4', version=1), 42)
self.assertEqual(self.cache.get('answer4', version=2), None)
self.assertEqual(self.v2_cache.get('answer4'), None)
self.assertEqual(self.v2_cache.get('answer4', version=1), 42)
self.assertEqual(self.v2_cache.get('answer4', version=2), None)
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
self.cache.add('answer1', 42, version=2)
self.assertEqual(self.cache.get('answer1', version=1), None)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.add('answer1', 37, version=2)
self.assertEqual(self.cache.get('answer1', version=1), None)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.add('answer1', 37, version=1)
self.assertEqual(self.cache.get('answer1', version=1), 37)
self.assertEqual(self.cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
self.v2_cache.add('answer2', 42)
self.assertEqual(self.cache.get('answer2', version=1), None)
self.assertEqual(self.cache.get('answer2', version=2), 42)
self.v2_cache.add('answer2', 37)
self.assertEqual(self.cache.get('answer2', version=1), None)
self.assertEqual(self.cache.get('answer2', version=2), 42)
self.v2_cache.add('answer2', 37, version=1)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
self.v2_cache.add('answer3', 42, version=1)
self.assertEqual(self.cache.get('answer3', version=1), 42)
self.assertEqual(self.cache.get('answer3', version=2), None)
self.v2_cache.add('answer3', 37, version=1)
self.assertEqual(self.cache.get('answer3', version=1), 42)
self.assertEqual(self.cache.get('answer3', version=2), None)
self.v2_cache.add('answer3', 37)
self.assertEqual(self.cache.get('answer3', version=1), 42)
self.assertEqual(self.cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
self.cache.set('answer1', 42)
# has_key
self.assertTrue(self.cache.has_key('answer1'))
self.assertTrue(self.cache.has_key('answer1', version=1))
self.assertFalse(self.cache.has_key('answer1', version=2))
self.assertFalse(self.v2_cache.has_key('answer1'))
self.assertTrue(self.v2_cache.has_key('answer1', version=1))
self.assertFalse(self.v2_cache.has_key('answer1', version=2))
def test_cache_versioning_delete(self):
self.cache.set('answer1', 37, version=1)
self.cache.set('answer1', 42, version=2)
self.cache.delete('answer1')
self.assertEqual(self.cache.get('answer1', version=1), None)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.set('answer2', 37, version=1)
self.cache.set('answer2', 42, version=2)
self.cache.delete('answer2', version=2)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), None)
self.cache.set('answer3', 37, version=1)
self.cache.set('answer3', 42, version=2)
self.v2_cache.delete('answer3')
self.assertEqual(self.cache.get('answer3', version=1), 37)
self.assertEqual(self.cache.get('answer3', version=2), None)
self.cache.set('answer4', 37, version=1)
self.cache.set('answer4', 42, version=2)
self.v2_cache.delete('answer4', version=1)
self.assertEqual(self.cache.get('answer4', version=1), None)
self.assertEqual(self.cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
self.cache.set('answer1', 37, version=1)
self.cache.set('answer1', 42, version=2)
self.cache.incr('answer1')
self.assertEqual(self.cache.get('answer1', version=1), 38)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.decr('answer1')
self.assertEqual(self.cache.get('answer1', version=1), 37)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.set('answer2', 37, version=1)
self.cache.set('answer2', 42, version=2)
self.cache.incr('answer2', version=2)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), 43)
self.cache.decr('answer2', version=2)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), 42)
self.cache.set('answer3', 37, version=1)
self.cache.set('answer3', 42, version=2)
self.v2_cache.incr('answer3')
self.assertEqual(self.cache.get('answer3', version=1), 37)
self.assertEqual(self.cache.get('answer3', version=2), 43)
self.v2_cache.decr('answer3')
self.assertEqual(self.cache.get('answer3', version=1), 37)
self.assertEqual(self.cache.get('answer3', version=2), 42)
self.cache.set('answer4', 37, version=1)
self.cache.set('answer4', 42, version=2)
self.v2_cache.incr('answer4', version=1)
self.assertEqual(self.cache.get('answer4', version=1), 38)
self.assertEqual(self.cache.get('answer4', version=2), 42)
self.v2_cache.decr('answer4', version=1)
self.assertEqual(self.cache.get('answer4', version=1), 37)
self.assertEqual(self.cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
self.cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(self.cache.get_many(['ford1','arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(self.cache.get_many(['ford1','arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(self.cache.get_many(['ford1','arthur1'], version=2), {})
self.assertEqual(self.v2_cache.get_many(['ford1','arthur1']), {})
self.assertEqual(self.v2_cache.get_many(['ford1','arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(self.v2_cache.get_many(['ford1','arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
self.cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(self.cache.get_many(['ford2','arthur2']), {})
self.assertEqual(self.cache.get_many(['ford2','arthur2'], version=1), {})
self.assertEqual(self.cache.get_many(['ford2','arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(self.v2_cache.get_many(['ford2','arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(self.v2_cache.get_many(['ford2','arthur2'], version=1), {})
self.assertEqual(self.v2_cache.get_many(['ford2','arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
self.v2_cache.set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(self.cache.get_many(['ford3','arthur3']), {})
self.assertEqual(self.cache.get_many(['ford3','arthur3'], version=1), {})
self.assertEqual(self.cache.get_many(['ford3','arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(self.v2_cache.get_many(['ford3','arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(self.v2_cache.get_many(['ford3','arthur3'], version=1), {})
self.assertEqual(self.v2_cache.get_many(['ford3','arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
self.v2_cache.set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(self.cache.get_many(['ford4','arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(self.cache.get_many(['ford4','arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(self.cache.get_many(['ford4','arthur4'], version=2), {})
self.assertEqual(self.v2_cache.get_many(['ford4','arthur4']), {})
self.assertEqual(self.v2_cache.get_many(['ford4','arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(self.v2_cache.get_many(['ford4','arthur4'], version=2), {})
def test_incr_version(self):
self.cache.set('answer', 42, version=2)
self.assertEqual(self.cache.get('answer'), None)
self.assertEqual(self.cache.get('answer', version=1), None)
self.assertEqual(self.cache.get('answer', version=2), 42)
self.assertEqual(self.cache.get('answer', version=3), None)
self.assertEqual(self.cache.incr_version('answer', version=2), 3)
self.assertEqual(self.cache.get('answer'), None)
self.assertEqual(self.cache.get('answer', version=1), None)
self.assertEqual(self.cache.get('answer', version=2), None)
self.assertEqual(self.cache.get('answer', version=3), 42)
self.v2_cache.set('answer2', 42)
self.assertEqual(self.v2_cache.get('answer2'), 42)
self.assertEqual(self.v2_cache.get('answer2', version=1), None)
self.assertEqual(self.v2_cache.get('answer2', version=2), 42)
self.assertEqual(self.v2_cache.get('answer2', version=3), None)
self.assertEqual(self.v2_cache.incr_version('answer2'), 3)
self.assertEqual(self.v2_cache.get('answer2'), None)
self.assertEqual(self.v2_cache.get('answer2', version=1), None)
self.assertEqual(self.v2_cache.get('answer2', version=2), None)
self.assertEqual(self.v2_cache.get('answer2', version=3), 42)
self.assertRaises(ValueError, self.cache.incr_version, 'does_not_exist')
def test_decr_version(self):
self.cache.set('answer', 42, version=2)
self.assertEqual(self.cache.get('answer'), None)
self.assertEqual(self.cache.get('answer', version=1), None)
self.assertEqual(self.cache.get('answer', version=2), 42)
self.assertEqual(self.cache.decr_version('answer', version=2), 1)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.get('answer', version=1), 42)
self.assertEqual(self.cache.get('answer', version=2), None)
self.v2_cache.set('answer2', 42)
self.assertEqual(self.v2_cache.get('answer2'), 42)
self.assertEqual(self.v2_cache.get('answer2', version=1), None)
self.assertEqual(self.v2_cache.get('answer2', version=2), 42)
self.assertEqual(self.v2_cache.decr_version('answer2'), 1)
self.assertEqual(self.v2_cache.get('answer2'), None)
self.assertEqual(self.v2_cache.get('answer2', version=1), 42)
self.assertEqual(self.v2_cache.get('answer2', version=2), None)
self.assertRaises(ValueError, self.cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
self.cache.set('answer1', 42)
self.assertEqual(self.cache.get('answer1'), 42)
self.assertEqual(self.custom_key_cache.get('answer1'), None)
self.assertEqual(self.custom_key_cache2.get('answer1'), None)
self.custom_key_cache.set('answer2', 42)
self.assertEqual(self.cache.get('answer2'), None)
self.assertEqual(self.custom_key_cache.get('answer2'), 42)
self.assertEqual(self.custom_key_cache2.get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = self.cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = self.cache
request = self._get_request_cache('/cache/test')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
class DBCacheTests(BaseCacheTests, TransactionTestCase):
backend_name = 'django.core.cache.backends.db.DatabaseCache'
def setUp(self):
# Spaces are used in the table name to ensure quoting/escaping is working
self._table_name = 'test cache table'
management.call_command('createcachetable', self._table_name, verbosity=0, interactive=False)
self.cache = get_cache(self.backend_name, LOCATION=self._table_name, OPTIONS={'MAX_ENTRIES': 30})
self.prefix_cache = get_cache(self.backend_name, LOCATION=self._table_name, KEY_PREFIX='cacheprefix')
self.v2_cache = get_cache(self.backend_name, LOCATION=self._table_name, VERSION=2)
self.custom_key_cache = get_cache(self.backend_name, LOCATION=self._table_name, KEY_FUNCTION=custom_key_func)
self.custom_key_cache2 = get_cache(self.backend_name, LOCATION=self._table_name, KEY_FUNCTION='cache.tests.custom_key_func')
def tearDown(self):
from django.db import connection
cursor = connection.cursor()
cursor.execute('DROP TABLE %s' % connection.ops.quote_name(self._table_name))
connection.commit()
def test_cull(self):
self.perform_cull_test(50, 29)
def test_zero_cull(self):
self.cache = get_cache(self.backend_name, LOCATION=self._table_name, OPTIONS={'MAX_ENTRIES': 30, 'CULL_FREQUENCY': 0})
self.perform_cull_test(50, 18)
def test_old_initialization(self):
self.cache = get_cache('db://%s?max_entries=30&cull_frequency=0' % self._table_name)
self.perform_cull_test(50, 18)
def test_second_call_doesnt_crash(self):
with six.assertRaisesRegex(self, management.CommandError,
"Cache table 'test cache table' could not be created"):
management.call_command(
'createcachetable',
self._table_name,
verbosity=0,
interactive=False
)
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
self.cache.set("key1", "spam")
self.cache.clear()
transaction.rollback()
self.assertEqual(self.cache.get("key1"), None)
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_syncdb(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
def test_createcachetable_observes_database_router(self):
old_routers = router.routers
try:
router.routers = [DBCacheRouter()]
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', 'cache_table',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# one query is used to create the table and another one the index
with self.assertNumQueries(2, using='other'):
management.call_command('createcachetable', 'cache_table',
database='other',
verbosity=0, interactive=False)
finally:
router.routers = old_routers
class LocMemCacheTests(unittest.TestCase, BaseCacheTests):
backend_name = 'django.core.cache.backends.locmem.LocMemCache'
def setUp(self):
self.cache = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30})
self.prefix_cache = get_cache(self.backend_name, KEY_PREFIX='cacheprefix')
self.v2_cache = get_cache(self.backend_name, VERSION=2)
self.custom_key_cache = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30}, KEY_FUNCTION=custom_key_func)
self.custom_key_cache2 = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30}, KEY_FUNCTION='cache.tests.custom_key_func')
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
self.prefix_cache._cache = self.cache._cache
self.prefix_cache._expire_info = self.cache._expire_info
self.v2_cache._cache = self.cache._cache
self.v2_cache._expire_info = self.cache._expire_info
self.custom_key_cache._cache = self.cache._cache
self.custom_key_cache._expire_info = self.cache._expire_info
self.custom_key_cache2._cache = self.cache._cache
self.custom_key_cache2._expire_info = self.cache._expire_info
def tearDown(self):
self.cache.clear()
def test_cull(self):
self.perform_cull_test(50, 29)
def test_zero_cull(self):
self.cache = get_cache(self.backend_name, OPTIONS={'MAX_ENTRIES': 30, 'CULL_FREQUENCY': 0})
self.perform_cull_test(50, 19)
def test_old_initialization(self):
self.cache = get_cache('locmem://?max_entries=30&cull_frequency=0')
self.perform_cull_test(50, 19)
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
mirror_cache = get_cache(self.backend_name)
other_cache = get_cache(self.backend_name, LOCATION='other')
self.cache.set('value1', 42)
self.assertEqual(mirror_cache.get('value1'), 42)
self.assertEqual(other_cache.get('value1'), None)
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = self.cache.make_key(key)
self.cache.set(key, 1, timeout=self.cache.default_timeout*10)
expire = self.cache._expire_info[_key]
self.cache.incr(key)
self.assertEqual(expire, self.cache._expire_info[_key])
self.cache.decr(key)
self.assertEqual(expire, self.cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
@unittest.skipUnless(
any(cache['BACKEND'].startswith('django.core.cache.backends.memcached.')
for cache in settings.CACHES.values()),
"memcached not available")
class MemcachedCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
for cache_key, cache in settings.CACHES.items():
if cache['BACKEND'].startswith('django.core.cache.backends.memcached.'):
break
random_prefix = ''.join(random.choice(string.ascii_letters) for x in range(10))
self.cache = get_cache(cache_key)
self.prefix_cache = get_cache(cache_key, KEY_PREFIX=random_prefix)
self.v2_cache = get_cache(cache_key, VERSION=2)
self.custom_key_cache = get_cache(cache_key, KEY_FUNCTION=custom_key_func)
self.custom_key_cache2 = get_cache(cache_key, KEY_FUNCTION='cache.tests.custom_key_func')
def tearDown(self):
self.cache.clear()
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, self.cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, self.cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
any(cache['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache'
for cache in settings.CACHES.values()),
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache in settings.CACHES.items():
if cache['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(get_cache(cache_key)._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
class FileBasedCacheTests(unittest.TestCase, BaseCacheTests):
"""
Specific test cases for the file-based cache.
"""
backend_name = 'django.core.cache.backends.filebased.FileBasedCache'
def setUp(self):
self.dirname = tempfile.mkdtemp()
self.cache = get_cache(self.backend_name, LOCATION=self.dirname, OPTIONS={'MAX_ENTRIES': 30})
self.prefix_cache = get_cache(self.backend_name, LOCATION=self.dirname, KEY_PREFIX='cacheprefix')
self.v2_cache = get_cache(self.backend_name, LOCATION=self.dirname, VERSION=2)
self.custom_key_cache = get_cache(self.backend_name, LOCATION=self.dirname, KEY_FUNCTION=custom_key_func)
self.custom_key_cache2 = get_cache(self.backend_name, LOCATION=self.dirname, KEY_FUNCTION='cache.tests.custom_key_func')
def tearDown(self):
self.cache.clear()
def test_hashing(self):
"""Test that keys are hashed into subdirectories correctly"""
self.cache.set("foo", "bar")
key = self.cache.make_key("foo")
keyhash = hashlib.md5(key.encode()).hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assertTrue(os.path.exists(keypath))
def test_subdirectory_removal(self):
"""
Make sure that the created subdirectories are correctly removed when empty.
"""
self.cache.set("foo", "bar")
key = self.cache.make_key("foo")
keyhash = hashlib.md5(key.encode()).hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assertTrue(os.path.exists(keypath))
self.cache.delete("foo")
self.assertTrue(not os.path.exists(keypath))
self.assertTrue(not os.path.exists(os.path.dirname(keypath)))
self.assertTrue(not os.path.exists(os.path.dirname(os.path.dirname(keypath))))
def test_cull(self):
self.perform_cull_test(50, 29)
def test_old_initialization(self):
self.cache = get_cache('file://%s?max_entries=30' % self.dirname)
self.perform_cull_test(50, 29)
class CustomCacheKeyValidationTests(unittest.TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
cache = get_cache('cache.liberal_backend://')
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
class GetCacheTests(unittest.TestCase):
def test_simple(self):
cache = get_cache('locmem://')
from django.core.cache.backends.locmem import LocMemCache
self.assertIsInstance(cache, LocMemCache)
from django.core.cache import cache
self.assertIsInstance(cache, get_cache('default').__class__)
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache', **{'TIMEOUT': 120})
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
from django.core import signals
cache = get_cache('cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.path = '/cache/test/'
self.cache = get_cache('default')
def tearDown(self):
self.cache.clear()
def _get_request(self, path, method='GET'):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.method = method
request.path = request.path_info = "/cache/%s" % path
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self._get_request(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
def test_get_cache_key_with_query(self):
request = self._get_request(self.path + '?test=1')
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.bd889c5a59603af44333ed21504db3cd.d41d8cd98f00b204e9800998ecf8427e')
def test_learn_cache_key(self):
request = self._get_request(self.path, 'HEAD')
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private' : True}, set(['private'])),
# Test whether private/public attributes are mutually exclusive
('private', {'private' : True}, set(['private'])),
('private', {'public' : True}, set(['public'])),
('public', {'public' : True}, set(['public'])),
('public', {'private' : True}, set(['private'])),
('must-revalidate,max-age=60,private', {'public' : True}, set(['must-revalidate', 'max-age=60', 'public'])),
('must-revalidate,max-age=60,public', {'private' : True}, set(['must-revalidate', 'max-age=60', 'private'])),
('must-revalidate,max-age=60', {'public' : True}, set(['must-revalidate', 'max-age=60', 'public'])),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.cache = get_cache('default')
def tearDown(self):
self.cache.clear()
def _get_request(self, method):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.method = method
request.path = request.path_info = self.path
return request
def _get_request_cache(self, method):
request = self._get_request(method)
request._cache_update_cache = True
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self._get_request_cache('HEAD')
self._set_cache(request, test_content)
request = self._get_request('HEAD')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self._get_request_cache('GET')
self._set_cache(request, test_content)
request = self._get_request('HEAD')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.cache = get_cache('default')
def tearDown(self):
self.cache.clear()
def _get_request(self, method='GET'):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.method = method
request.path = request.path_info = self.path
return request
def _get_request_cache(self, query_string=None):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
if query_string:
request.META['QUERY_STRING'] = query_string
request.GET = QueryDict(query_string)
request.path = request.path_info = self.path
request._cache_update_cache = True
request.method = 'GET'
request.session = {}
return request
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self._get_request()
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self._get_request()
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self._get_request()
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n (self):
request = self._get_request()
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self._get_request()
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self._get_request_cache(query_string='foo=bar&other=true')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self._get_request_cache(query_string='foo=bar&somethingelse=true')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
# i18n tests
en_message ="Hello world!"
es_message ="Hola mundo!"
request = self._get_request_cache()
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self._get_request_cache()
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self._get_request()
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHE_MIDDLEWARE_ANONYMOUS_ONLY=False,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(IgnorePendingDeprecationWarningsMixin, TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = get_cache('default')
self.other_cache = get_cache('other')
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache_anonymous_only, False)
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 300) # Timeout value for 'default' cache, i.e. 300
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_anonymous_only, False)
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_anonymous_only=True, cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
self.assertEqual(as_view_decorator_with_custom.cache_anonymous_only, True)
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertEqual(result, None)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
@override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
def test_cache_middleware_anonymous_only_wont_cause_session_access(self):
""" The cache middleware shouldn't cause a session access due to
CACHE_MIDDLEWARE_ANONYMOUS_ONLY if nothing else has accessed the
session. Refs 13283 """
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth.middleware import AuthenticationMiddleware
middleware = CacheMiddleware()
session_middleware = SessionMiddleware()
auth_middleware = AuthenticationMiddleware()
request = self.factory.get('/view_anon/')
# Put the request through the request middleware
session_middleware.process_request(request)
auth_middleware.process_request(request)
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
session_middleware.process_response(request, response)
response = middleware.process_response(request, response)
self.assertEqual(request.session.accessed, False)
@override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
def test_cache_middleware_anonymous_only_with_cache_page(self):
"""CACHE_MIDDLEWARE_ANONYMOUS_ONLY should still be effective when used
with the cache_page decorator: the response to a request from an
authenticated user should not be cached."""
request = self.factory.get('/view_anon/')
class MockAuthenticatedUser(object):
def is_authenticated(self):
return True
class MockAccessedSession(object):
accessed = True
request.user = MockAuthenticatedUser()
request.session = MockAccessedSession()
response = cache_page(60)(hello_world_view)(request, '1')
self.assertFalse("Cache-Control" in response)
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
cache = get_cache('default')
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.cache = get_cache('default')
def tearDown(self):
self.cache.clear()
def _get_request(self, path, method='GET'):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.method = method
request.path = request.path_info = "/cache/%s" % path
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = TemplateResponse(HttpResponse(), Template("This is a test"))
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self._get_request(self.path)
response = TemplateResponse(HttpResponse(), Template("This is a test"))
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
def test_get_cache_key_with_query(self):
request = self._get_request(self.path + '?test=1')
response = TemplateResponse(HttpResponse(), Template("This is a test"))
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.bd889c5a59603af44333ed21504db3cd.d41d8cd98f00b204e9800998ecf8427e')
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestEtagWithAdmin(TestCase):
# See https://code.djangoproject.com/ticket/16003
urls = "admin_views.urls"
def test_admin(self):
with self.settings(USE_ETAGS=False):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 200)
self.assertFalse(response.has_header('ETag'))
with self.settings(USE_ETAGS=True):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
|
{
"content_hash": "c7faaf9d0d4b702ac780cf244a89d4da",
"timestamp": "",
"source": "github",
"line_count": 1927,
"max_line_length": 190,
"avg_line_length": 42.587960560456665,
"alnum_prop": 0.6306554400672622,
"repo_name": "eltonsantos/django",
"id": "da80c4805805f3727047d0f4edfa858730f7ec7c",
"size": "82233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/cache/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
urlpatterns = patterns('',
# Core
url(r'^$',
'mcfinance.core.views.dashboard',
name='dashboard'),
url(r'^switch-account/(?P<account_id>[0-9a-f]{24})/$',
'mcfinance.core.views.switch_account',
name='switch-account'),
)
|
{
"content_hash": "e4999904457712bc02d336eb4db08748",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 58,
"avg_line_length": 22.071428571428573,
"alnum_prop": 0.5922330097087378,
"repo_name": "lardissone/mcfinance",
"id": "9c7cae5a535f94ebe29a21b686db7b41c72a5d91",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcfinance/core/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1390"
},
{
"name": "JavaScript",
"bytes": "378"
},
{
"name": "Python",
"bytes": "23242"
}
],
"symlink_target": ""
}
|
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
class _PerDeviceGenerator(dataset_ops.DatasetV2):
"""A `dummy` generator dataset."""
def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id,
source_device, element_structure):
self._structure = element_structure
multi_device_iterator_string_handle = (
gen_dataset_ops.multi_device_iterator_to_string_handle(
multi_device_iterator_resource))
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(autograph=False) # Pure graph code.
def _init_func():
return multi_device_iterator_string_handle
init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(autograph=False) # Pure graph code.
def _remote_init_func():
return functional_ops.remote_call(
target=source_device,
args=init_func_concrete.captured_inputs,
Tout=[dtypes.string],
f=init_func_concrete)
self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access
self._init_captured_args = self._init_func.captured_inputs
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _next_func(string_handle):
# pylint: disable=protected-access
multi_device_iterator = (
gen_dataset_ops.multi_device_iterator_from_string_handle(
string_handle=string_handle,
output_types=self._structure._flat_types,
output_shapes=self._structure._flat_shapes))
return gen_dataset_ops.multi_device_iterator_get_next_from_shard(
multi_device_iterator=multi_device_iterator,
shard_num=shard_num,
incarnation_id=incarnation_id,
output_types=self._structure._flat_types,
output_shapes=self._structure._flat_shapes)
next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun_with_attributes(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
attributes={"experimental_ints_on_device": True},
autograph=False) # Pure graph code.
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + next_func_concrete.captured_inputs,
Tout=self._structure._flat_types, # pylint: disable=protected-access
f=next_func_concrete)
self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access
self._next_captured_args = self._next_func.captured_inputs
self._incarnation_id_index = -1
for i, arg in enumerate(self._next_captured_args):
if arg == incarnation_id:
self._incarnation_id_index = i
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _finalize_func(unused_string_handle):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + finalize_func_concrete.captured_inputs,
Tout=[dtypes.int64],
f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access
)
self._finalize_captured_args = self._finalize_func.captured_inputs
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**dataset_ops.flat_structure(self))
super(_PerDeviceGenerator, self).__init__(variant_tensor)
def _inputs(self):
# TODO(b/116506223): Determine which datasets should be used as inputs here.
return []
@property
def _element_structure(self):
return self._structure
class _ReincarnatedPerDeviceGenerator(dataset_ops.DatasetV2):
"""Creates a _PerDeviceGenerator-like dataset with a new incarnation_id.
Re-uses the functions from the provided per_device_dataset and just switches
out the function argument corresponding to the incarnation_id.
"""
def __init__(self, per_device_dataset, incarnation_id):
# pylint: disable=protected-access
self._structure = per_device_dataset._structure
self._init_func = per_device_dataset._init_func
self._init_captured_args = self._init_func.captured_inputs
self._next_func = per_device_dataset._next_func
self._next_captured_args = per_device_dataset._next_captured_args
# The captured arguments to the next_func are string_handle, incarnation_id.
# We update the incarnation id to the new one.
self._next_captured_args[
per_device_dataset._incarnation_id_index] = incarnation_id
self._finalize_func = per_device_dataset._finalize_func
self._finalize_captured_args = per_device_dataset._finalize_captured_args
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**dataset_ops.flat_structure(self))
super(_ReincarnatedPerDeviceGenerator, self).__init__(variant_tensor)
def _inputs(self):
# TODO(b/116506223): Determine which datasets should be used as inputs here.
return []
@property
def _element_structure(self):
return self._structure
class MultiDeviceIterator(object):
"""An iterator over multiple devices."""
def __init__(self,
dataset,
devices,
max_buffer_size=1,
prefetch_buffer_size=1,
source_device="/cpu:0"):
"""Constructs a MultiDeviceIterator.
Args:
dataset: The input dataset to be iterated over.
devices: The list of devices to fetch data to.
max_buffer_size: Maximum size of the host side per device buffer to keep.
prefetch_buffer_size: if > 1, then we setup a buffer on each device
to prefetch into.
source_device: The host device to place the `dataset` on.
In order to prevent deadlocks, if the prefetch_buffer_size is greater
than the max_buffer_size, we set the max_buffer_size to
prefetch_buffer_size.
Raises:
RuntimeError: If run in Eager mode.
"""
self._dataset = dataset._apply_options() # pylint: disable=protected-access
self._devices = devices
self._source_device = source_device
self._source_device_tensor = ops.convert_to_tensor(source_device)
self._max_buffer_size = max_buffer_size
self._prefetch_buffer_size = prefetch_buffer_size
if self._prefetch_buffer_size > self._max_buffer_size:
self._max_buffer_size = self._prefetch_buffer_size
# Create the MultiDeviceIterator.
with ops.device(self._source_device):
# TODO(b/121378567): Get rid of this shared_name hack.
shared_name = ""
if context.executing_eagerly():
shared_name = context.shared_name()
self._multi_device_iterator_resource = (
gen_dataset_ops.multi_device_iterator(
devices=self._devices,
shared_name=shared_name,
container="",
**dataset_ops.flat_structure(self._dataset)))
if context.executing_eagerly():
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._multi_device_iterator_resource,
handle_device=self._source_device)
# The incarnation ID is used to ensure consistency between the per-device
# iterators and the multi-device iterator.
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor, # pylint: disable=protected-access
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
self._prototype_device_datasets = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _PerDeviceGenerator(
i, self._multi_device_iterator_resource, self._incarnation_id,
self._source_device_tensor, self._dataset._element_structure) # pylint: disable=protected-access
self._prototype_device_datasets.append(ds)
# TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
# initialize the device side of the pipeline. This would allow the
# MultiDeviceIterator to choose, for example, to move some transformations
# into the device side from its input. It might be useful in rewriting.
# Create the per device iterators.
self._device_iterators = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = self._create_device_dataset(i)
if context.executing_eagerly():
self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds))
else:
self._device_iterators.append(
dataset_ops.make_initializable_iterator(ds))
if not context.executing_eagerly():
device_iterator_initializers = [
iterator.initializer for iterator in self._device_iterators
]
self._initializer = control_flow_ops.group(*device_iterator_initializers)
def _create_device_dataset(self, i):
"""Uses _prototype_device_datasets[i] to build a dataset for the device."""
ds = self._prototype_device_datasets[i]
ds = _ReincarnatedPerDeviceGenerator(ds, self._incarnation_id)
if self._prefetch_buffer_size > 0:
ds = ds.prefetch(self._prefetch_buffer_size)
# TODO(jsimsa): Enable auto-tuning and optimizations when supported for
# non-CPU devices.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
ds = ds.with_options(options)
return ds
def get_next(self, device=None):
"""Returns the next element given a `device`, else returns all in a list."""
if device is not None:
index = self._devices.index(device)
return self._device_iterators[index].get_next()
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next())
return result
def get_next_as_optional(self):
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(iterator_ops.get_next_as_optional(
self._device_iterators[i]))
return result
@property
def initializer(self):
if context.executing_eagerly():
return control_flow_ops.no_op()
return self._initializer
def _eager_reset(self):
"""Resets the MultiDeviceIterator in eager mode."""
if not context.executing_eagerly():
raise ValueError("Eager reset is only supported in eager mode.")
# pylint: disable=protected-access
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor,
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
for i, device in enumerate(self._devices):
with ops.device(device):
ds = self._create_device_dataset(i)
# Reset the device iterator resources with the new dataset.
ds_variant = ds._variant_tensor
gen_dataset_ops.make_iterator(
ds_variant, self._device_iterators[i]._iterator_resource)
@property
def _element_structure(self):
return dataset_ops.get_structure(self._dataset)
|
{
"content_hash": "a1c4d65478ffbd9adb3c190c5c1c4f1f",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 116,
"avg_line_length": 40.91692307692308,
"alnum_prop": 0.6837118363663709,
"repo_name": "kevin-coder/tensorflow-fork",
"id": "efa8a11b75ba4c4c7094fc415c3b445076ebb337",
"size": "13987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/ops/multi_device_iterator_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9117"
},
{
"name": "C",
"bytes": "340300"
},
{
"name": "C++",
"bytes": "39383425"
},
{
"name": "CMake",
"bytes": "194940"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33617202"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425910"
}
],
"symlink_target": ""
}
|
r"""Provides tools for interpolating irregularly spaced data onto a regular grid.
Deprecated in 0.9 in favor of the more general `interpolate` subpackage.
"""
import warnings
import metpy.deprecation
from .interpolate import (interpolate, inverse_distance, natural_neighbor,
remove_nan_observations, remove_observations_below_value,
remove_repeat_coordinates) # noqa: F401, F403
__all__ = ['interpolate', 'inverse_distance', 'natural_neighbor', 'remove_nan_observations',
'remove_observations_below_value', 'remove_repeat_coordinates']
# Any use of this module should raise a deprecation warning
warnings.warn('The use of the "gridding" subpackage has been deprecated, and will be removed '
'in 0.12. Use the "interpolate" subpackage instead.',
metpy.deprecation.metpyDeprecation)
|
{
"content_hash": "80256f022c6506130271f263460e8951",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 94,
"avg_line_length": 46.21052631578947,
"alnum_prop": 0.7015945330296127,
"repo_name": "jrleeman/MetPy",
"id": "5385480462190be0b12668258af8bde8775772c7",
"size": "1016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metpy/gridding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1241442"
},
{
"name": "Shell",
"bytes": "1600"
}
],
"symlink_target": ""
}
|
__source__ = 'https://leetcode.com/problems/valid-parenthesis-string/description/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 678. Valid Parenthesis String
#
# Given a string containing only three types of characters: '(', ')' and '*',
# write a function to check whether this string is valid.
# We define the validity of a string by these rules:
#
# Any left parenthesis '(' must have a corresponding right parenthesis ')'.
# Any right parenthesis ')' must have a corresponding left parenthesis '('.
# Left parenthesis '(' must go before the corresponding right parenthesis ')'.
# '*' could be treated as a single right parenthesis ')' or a single left parenthesis '(' or an empty string.
# An empty string is also valid.
# Example 1:
# Input: "()"
# Output: True
# Example 2:
# Input: "(*)"
# Output: True
# Example 3:
# Input: "(*))"
# Output: True
# Note:
# The string size will be in the range [1, 100].
#
# Companies
# Alibaba
# Related Topics
# String
#
import unittest
# Thought:
# The number of open parenthesis is in a range [cmin, cmax]
# cmax counts the maximum open parenthesis.
# cmin counts the minimum open parenthesis.
# The string is valid for 2 condition:
#
# cmax will never be negative.
# cmin is 0 at the end.
#
#20ms 100%
class Solution(object):
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
cmin = cmax = 0
for i in s:
if i == '(':
cmax += 1
cmin += 1
if i == ')':
cmax -= 1
cmin = max(cmin - 1, 0)
if i == '*':
cmax += 1
cmin = max(cmin - 1, 0)
if cmax < 0:
return False
return cmin == 0
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/valid-parenthesis-string/solution/
# 2ms 100%
public class Solution {
public boolean checkValidString(String s) {
int low = 0;
int high = 0;
for (int i = 0; i < s.length(); i++) {
if (s.charAt(i) == '(') {
low++;
high++;
} else if (s.charAt(i) == ')') {
if (low > 0) {
low--;
}
high--;
} else {
if (low > 0) {
low--;
}
high++;
}
if (high < 0) {
return false;
}
}
return low == 0;
}
}
# Greedy
# 2ms 100%
class Solution {
public boolean checkValidString(String s) {
int lo = 0, hi = 0;
for (char c: s.toCharArray()) {
lo += c == '(' ? 1 : -1;
hi += c != ')' ? 1 : -1;
if (hi < 0) break;
lo = Math.max(lo, 0);
}
return lo == 0;
}
}
'''
|
{
"content_hash": "4ea1a9f56c3ddf7ab997df4abe415717",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 109,
"avg_line_length": 25.050420168067227,
"alnum_prop": 0.5038577658503858,
"repo_name": "JulyKikuAkita/PythonPrac",
"id": "f2efa1f5c3f88ce03531734b78090d5e083669ba",
"size": "2981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cs15211/ValidParenthesisString.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "647778"
},
{
"name": "Python",
"bytes": "5429558"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.