text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
pygaarst.hyperionutils
Utility functions for processing Hyperion datasets
Created by Chris Waigl on 2014-04-25.
"""
from __future__ import division, print_function
import os
import numpy as np
def gethyperionbands():
"""
Load Hyperion spectral band values into Numpy structured array.
Source: http://eo1.usgs.gov/sensors/hyperioncoverage
"""
def converter(bandname):
return bandname.decode('utf-8').replace('B', 'band')
this_dir, _ = os.path.split(__file__)
tabfile = os.path.join(this_dir, 'data', 'Hyperion_Spectral_coverage.tab')
return np.recfromtxt(
tabfile,
delimiter='\t',
skip_header=1,
names=True,
dtype=('U7', 'f8', 'f8', 'i8', 'U1'),
converters={0: converter}
)
def gethyperionirradiance():
"""Load Hyperion spectral irradiance into Numpy array"""
def converter(bandname):
return bandname.decode('utf-8').replace('b', 'band')
this_dir, _ = os.path.split(__file__)
tabfile = os.path.join(
this_dir, 'data', 'Hyperion_Spectral_Irradiance.txt')
return np.recfromtxt(
tabfile,
delimiter='\t',
skip_header=1,
names=True,
dtype=('U7', 'f8', 'f8'),
converters={0: converter}
)
def getesun(band):
irradiances = gethyperionirradiance()
return irradiances[
irradiances['Hyperion_band'] == band]['Spectral_irradiance_Wm2mu'][0]
def find_nearest_hyp(wavelength):
"""
Returns index and wavelength of Hyperion band closest to input wavelength
Arguments:
wavelength (float): wavelength in nm
Returns:
idx (int): band index of closest band, starting at 0
band (str): band name of closest band, starting at 'band1'
bandwavelength (float): closest band wavelength in nm
"""
bands = gethyperionbands().Hyperion_Band
wavs = gethyperionbands().Average_Wavelength_nm
idx = (np.abs(wavs - wavelength)).argmin()
return idx, bands[idx], wavs[idx]
|
{
"content_hash": "6fc933e27684aa74c619322b6aeaa943",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 28.253521126760564,
"alnum_prop": 0.6390827517447657,
"repo_name": "chryss/pygaarst",
"id": "111599d9273627c031b0f62eedc39507aeab71cf",
"size": "2022",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pygaarst/hyperionutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114088"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtGui, uic
import sys
qtCreatorFile = "assetdetailW.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class AssetDetail(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
png = QtGui.QPixmap('/home/linuxll/EODAnalyzer/test.jpg') # 载入资金曲线图
self.assetmap.setPixmap(png)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = AssetDetail()
window.show()
sys.exit(app.exec_())
|
{
"content_hash": "0c5956d239be73546a412f0b73c9956d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 28.5,
"alnum_prop": 0.6578947368421053,
"repo_name": "nealchenzhang/EODAnalyzer",
"id": "4c649f2e10ed9a70d84ef97c61e1faf53ea08d68",
"size": "607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uiAssetDetail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58127"
}
],
"symlink_target": ""
}
|
"""Futures for long-running operations returned from Google Cloud APIs.
These futures can be used to synchronously wait for the result of a
long-running operation using :meth:`Operation.result`:
.. code-block:: python
operation = my_api_client.long_running_method()
result = operation.result()
Or asynchronously using callbacks and :meth:`Operation.add_done_callback`:
.. code-block:: python
operation = my_api_client.long_running_method()
def my_callback(future):
result = future.result()
operation.add_done_callback(my_callback)
"""
import functools
import threading
from google.api_core import exceptions
from google.api_core import protobuf_helpers
from google.api_core.future import polling
from google.longrunning import operations_pb2
from google.protobuf import json_format
from google.rpc import code_pb2
class Operation(polling.PollingFuture):
"""A Future for interacting with a Google API Long-Running Operation.
Args:
operation (google.longrunning.operations_pb2.Operation): The
initial operation.
refresh (Callable[[], ~.api_core.operation.Operation]): A callable that
returns the latest state of the operation.
cancel (Callable[[], None]): A callable that tries to cancel
the operation.
result_type (func:`type`): The protobuf type for the operation's
result.
metadata_type (func:`type`): The protobuf type for the operation's
metadata.
retry (google.api_core.retry.Retry): The retry configuration used
when polling. This can be used to control how often :meth:`done`
is polled. Regardless of the retry's ``deadline``, it will be
overridden by the ``timeout`` argument to :meth:`result`.
"""
def __init__(
self, operation, refresh, cancel,
result_type, metadata_type=None, retry=polling.DEFAULT_RETRY):
super(Operation, self).__init__(retry=retry)
self._operation = operation
self._refresh = refresh
self._cancel = cancel
self._result_type = result_type
self._metadata_type = metadata_type
self._completion_lock = threading.Lock()
# Invoke this in case the operation came back already complete.
self._set_result_from_operation()
@property
def operation(self):
"""google.longrunning.Operation: The current long-running operation."""
return self._operation
@property
def metadata(self):
"""google.protobuf.Message: the current operation metadata."""
if not self._operation.HasField('metadata'):
return None
return protobuf_helpers.from_any_pb(
self._metadata_type, self._operation.metadata)
def _set_result_from_operation(self):
"""Set the result or exception from the operation if it is complete."""
# This must be done in a lock to prevent the polling thread
# and main thread from both executing the completion logic
# at the same time.
with self._completion_lock:
# If the operation isn't complete or if the result has already been
# set, do not call set_result/set_exception again.
# Note: self._result_set is set to True in set_result and
# set_exception, in case those methods are invoked directly.
if not self._operation.done or self._result_set:
return
if self._operation.HasField('response'):
response = protobuf_helpers.from_any_pb(
self._result_type, self._operation.response)
self.set_result(response)
elif self._operation.HasField('error'):
exception = exceptions.GoogleAPICallError(
self._operation.error.message,
errors=(self._operation.error,),
response=self._operation)
self.set_exception(exception)
else:
exception = exceptions.GoogleAPICallError(
'Unexpected state: Long-running operation had neither '
'response nor error set.')
self.set_exception(exception)
def _refresh_and_update(self):
"""Refresh the operation and update the result if needed."""
# If the currently cached operation is done, no need to make another
# RPC as it will not change once done.
if not self._operation.done:
self._operation = self._refresh()
self._set_result_from_operation()
def done(self):
"""Checks to see if the operation is complete.
Returns:
bool: True if the operation is complete, False otherwise.
"""
self._refresh_and_update()
return self._operation.done
def cancel(self):
"""Attempt to cancel the operation.
Returns:
bool: True if the cancel RPC was made, False if the operation is
already complete.
"""
if self.done():
return False
self._cancel()
return True
def cancelled(self):
"""True if the operation was cancelled."""
self._refresh_and_update()
return (self._operation.HasField('error') and
self._operation.error.code == code_pb2.CANCELLED)
def _refresh_http(api_request, operation_name):
"""Refresh an operation using a JSON/HTTP client.
Args:
api_request (Callable): A callable used to make an API request. This
should generally be
:meth:`google.cloud._http.Connection.api_request`.
operation_name (str): The name of the operation.
Returns:
google.longrunning.operations_pb2.Operation: The operation.
"""
path = 'operations/{}'.format(operation_name)
api_response = api_request(method='GET', path=path)
return json_format.ParseDict(
api_response, operations_pb2.Operation())
def _cancel_http(api_request, operation_name):
"""Cancel an operation using a JSON/HTTP client.
Args:
api_request (Callable): A callable used to make an API request. This
should generally be
:meth:`google.cloud._http.Connection.api_request`.
operation_name (str): The name of the operation.
"""
path = 'operations/{}:cancel'.format(operation_name)
api_request(method='POST', path=path)
def from_http_json(operation, api_request, result_type, **kwargs):
"""Create an operation future using a HTTP/JSON client.
This interacts with the long-running operations `service`_ (specific
to a given API) via `HTTP/JSON`_.
.. _HTTP/JSON: https://cloud.google.com/speech/reference/rest/\
v1beta1/operations#Operation
Args:
operation (dict): Operation as a dictionary.
api_request (Callable): A callable used to make an API request. This
should generally be
:meth:`google.cloud._http.Connection.api_request`.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
"""
operation_proto = json_format.ParseDict(
operation, operations_pb2.Operation())
refresh = functools.partial(
_refresh_http, api_request, operation_proto.name)
cancel = functools.partial(
_cancel_http, api_request, operation_proto.name)
return Operation(operation_proto, refresh, cancel, result_type, **kwargs)
def _refresh_grpc(operations_stub, operation_name):
"""Refresh an operation using a gRPC client.
Args:
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The gRPC operations stub.
operation_name (str): The name of the operation.
Returns:
google.longrunning.operations_pb2.Operation: The operation.
"""
request_pb = operations_pb2.GetOperationRequest(name=operation_name)
return operations_stub.GetOperation(request_pb)
def _cancel_grpc(operations_stub, operation_name):
"""Cancel an operation using a gRPC client.
Args:
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The gRPC operations stub.
operation_name (str): The name of the operation.
"""
request_pb = operations_pb2.CancelOperationRequest(name=operation_name)
operations_stub.CancelOperation(request_pb)
def from_grpc(operation, operations_stub, result_type, **kwargs):
"""Create an operation future using a gRPC client.
This interacts with the long-running operations `service`_ (specific
to a given API) via gRPC.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The operations stub.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
"""
refresh = functools.partial(
_refresh_grpc, operations_stub, operation.name)
cancel = functools.partial(
_cancel_grpc, operations_stub, operation.name)
return Operation(operation, refresh, cancel, result_type, **kwargs)
def from_gapic(operation, operations_client, result_type, **kwargs):
"""Create an operation future from a gapic client.
This interacts with the long-running operations `service`_ (specific
to a given API) via a gapic client.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_client (google.api_core.operations_v1.OperationsClient):
The operations client.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
"""
refresh = functools.partial(
operations_client.get_operation, operation.name)
cancel = functools.partial(
operations_client.cancel_operation, operation.name)
return Operation(operation, refresh, cancel, result_type, **kwargs)
|
{
"content_hash": "68d5c0b21be00a4bcf514e404d7d8d90",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 79,
"avg_line_length": 37.38275862068966,
"alnum_prop": 0.6520616179319251,
"repo_name": "jonparrott/gcloud-python",
"id": "a97a137398a38764f323a82aa01861d6464a7fc7",
"size": "11416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api_core/google/api_core/operation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import absolute_import
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: khmer-project@idyll.org
#
import khmer
import screed
from . import khmer_tst_utils as utils
from nose.plugins.attrib import attr
def teardown():
utils.cleanup()
class Test_ExactGraphFu(object):
def setup(self):
self.ht = khmer.Hashbits(12, 1e4, 2)
def test_counts(self):
ht = self.ht
ht.consume_fasta(utils.get_test_data('test-graph.fa'))
kmer = "TTAGGACTGCAC"
x = ht.calc_connected_graph_size(kmer)
assert x == 69, x
kmer = "TGCGTTTCAATC"
x = ht.calc_connected_graph_size(kmer)
assert x == 68, x
kmer = "ATACTGTAAATA"
x = ht.calc_connected_graph_size(kmer)
assert x == 36, x
def test_graph_links_next_a(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "A")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_c(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "C")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_g(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "G")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_t(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "T")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_a(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("A" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_c(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("C" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_g(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("G" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_t(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("T" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
class Test_InexactGraphFu(object):
def setup(self):
self.ht = khmer.Hashbits(12, 4 ** 3 + 1, 2)
def test_graph_links_next_a(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "A")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_c(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "C")
x = ht.calc_connected_graph_size(word)
assert x == 2, x
def test_graph_links_next_g(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "G")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_next_t(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume(word[1:] + "T")
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_a(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("A" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_c(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("C" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_g(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("G" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
def test_graph_links_prev_t(self):
ht = self.ht
word = "TGCGTTTCAATC"
ht.consume(word)
ht.consume("T" + word[:-1])
x = ht.calc_connected_graph_size(word)
assert x == 2
#
class Test_Partitioning(object):
def test_output_unassigned(self):
import screed
filename = utils.get_test_data('random-20-a.fa')
ht = khmer._Hashbits(21, [5, 7, 11, 13])
ht.consume_fasta_and_tag(filename)
output_file = utils.get_temp_filename('part0test')
ht.output_partitions(filename, output_file, True)
len1 = len(list(screed.open(filename)))
len2 = len(list(screed.open(output_file)))
assert len1 > 0
assert len1 == len2, (len1, len2)
def test_not_output_unassigned(self):
import screed
filename = utils.get_test_data('random-20-a.fa')
ht = khmer._Hashbits(21, [5, 7, 11, 13])
ht.consume_fasta_and_tag(filename)
output_file = utils.get_temp_filename('parttest')
ht.output_partitions(filename, output_file, False)
len1 = len(list(screed.open(filename)))
len2 = len(list(screed.open(output_file)))
assert len1 > 0
assert len2 == 0, len2
def test_output_fq(self):
filename = utils.get_test_data('random-20-a.fq')
ht = khmer.Hashbits(20, 1e4, 4)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
ht.merge_subset(subset)
output_file = utils.get_temp_filename('parttest')
ht.output_partitions(filename, output_file, False)
print(open(output_file).read())
x = set([r.quality for r in screed.open(output_file)])
assert x, x
def test_disconnected_20_a(self):
filename = utils.get_test_data('random-20-a.fa')
ht = khmer.Hashbits(21, 1e5, 4)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = ht.subset_count_partitions(subset)
assert x == (99, 0), x # disconnected @ 21
def test_connected_20_a(self):
filename = utils.get_test_data('random-20-a.fa')
ht = khmer.Hashbits(20, 1e4, 4)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = ht.subset_count_partitions(subset)
assert x == (1, 0) # connected @ 20
def test_disconnected_20_b(self):
filename = utils.get_test_data('random-20-b.fa')
ht = khmer.Hashbits(21, 1e4, 4)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = ht.subset_count_partitions(subset)
assert x == (99, 0), x # disconnected @ 21
def test_connected_20_b(self):
filename = utils.get_test_data('random-20-b.fa')
ht = khmer.Hashbits(20, 1e4, 4)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = ht.subset_count_partitions(subset)
assert x == (1, 0) # connected @ 20
def test_disconnected_31_c(self):
filename = utils.get_test_data('random-31-c.fa')
ht = khmer.Hashbits(32, 1e6, 4)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = ht.subset_count_partitions(subset)
assert x == (999, 0), x # disconnected @ K = 32
def test_connected_31_c(self):
filename = utils.get_test_data('random-31-c.fa')
ht = khmer.Hashbits(31, 1e5, 4)
ht.consume_fasta_and_tag(filename)
subset = ht.do_subset_partition(0, 0)
x = ht.subset_count_partitions(subset)
assert x == (1, 0) # connected @ K = 31
#
class Test_PythonAPI(object):
def test_find_all_tags_kmersize(self):
ht = khmer.Hashbits(20, 4 ** 4 + 1, 2)
a = "ATTGGGACTCTGGGAGCACTTATCATGGAGAT"
b = "GAGCACTTTAACCCTGCAGAGTGGCCAAGGCT"
c = "GGAGCACTTATCATGGAGATATATCCCGTGCTTAAACATCGCACTTTAACCCTGCAGAGT"
print(ht.consume(a))
try:
ppi = ht.find_all_tags(c[:19])
assert False, "should raise a ValueError for wrong k-mer size"
except ValueError:
pass
try:
ppi = ht.find_all_tags(c[:21])
assert False, "should raise a ValueError for wrong k-mer size"
except ValueError:
pass
def test_ordered_connect(self):
ht = khmer.Hashbits(20, 4 ** 4 + 1, 2)
a = "ATTGGGACTCTGGGAGCACTTATCATGGAGAT"
b = "GAGCACTTTAACCCTGCAGAGTGGCCAAGGCT"
c = "GGAGCACTTATCATGGAGATATATCCCGTGCTTAAACATCGCACTTTAACCCTGCAGAGT"
print(ht.consume(a))
ppi = ht.find_all_tags(a[:20])
pid = ht.assign_partition_id(ppi)
assert pid == 0, pid
print(ht.consume(b))
ppi = ht.find_all_tags(b[:20])
pid = ht.assign_partition_id(ppi)
assert pid == 0, pid
print(ht.consume(c))
ppi = ht.find_all_tags(c[:20])
pid = ht.assign_partition_id(ppi)
assert pid == 2, pid
#
|
{
"content_hash": "727990b60d3e6f1b21ff082557bb2827",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 74,
"avg_line_length": 26.85593220338983,
"alnum_prop": 0.564952140528032,
"repo_name": "jas14/khmer",
"id": "5afcb92671976e65885f23985db8a57b2974f44c",
"size": "9507",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_graph.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "491634"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "6187"
},
{
"name": "Makefile",
"bytes": "15123"
},
{
"name": "Python",
"bytes": "672782"
},
{
"name": "Shell",
"bytes": "4469"
}
],
"symlink_target": ""
}
|
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.stats import circvar
import astropy.units as u
from skimage.measure import find_contours
import scipy.ndimage as nd
from utils import ceil_int, eight_conn, robust_skewed_std
from masking_utils import smooth_edges
# from contour_orientation import shell_orientation
def find_bubble_edges(array, blob, max_extent=1.0,
edge_mask=None,
nsig_thresh=1, value_thresh=None,
radius=None, return_mask=False, min_pixels=16,
filter_size=4, verbose=False,
min_radius_frac=0.0, try_local_bkg=True,
**kwargs):
'''
Expand/contract to match the contours in the data.
Parameters
----------
array : 2D numpy.ndarray or spectral_cube.LowerDimensionalObject
Data used to define the region boundaries.
max_extent : float, optional
Multiplied by the major radius to set how far should be searched
when searching for the boundary.
nsig_thresh : float, optional
Number of times sigma above the mean to set the boundary intensity
requirement. This is used whenever the local background is higher
than the given `value_thresh`.
value_thresh : float, optional
When given, sets the minimum intensity for defining a bubble edge.
The natural choice is a few times the noise level in the cube.
radius : float, optional
Give an optional radius to use instead of the major radius defined
for the bubble.
kwargs : passed to profile.profile_line.
Returns
-------
extent_coords : np.ndarray
Array with the positions of edges.
'''
if try_local_bkg:
mean, std = intensity_props(array, blob)
background_thresh = mean + nsig_thresh * std
# Define a suitable background based on the intensity within the
# elliptical region
if value_thresh is None:
value_thresh = background_thresh
else:
# If value_thresh is higher use it. Otherwise use the bkg.
if value_thresh < background_thresh:
value_thresh = background_thresh
# Set the number of theta to be ~ the perimeter.
y, x, major, minor, pa = blob[:5]
y = int(np.round(y, decimals=0))
x = int(np.round(x, decimals=0))
# If the center is on the edge of the array, subtract one to
# index correctly
if y == array.shape[0]:
y -= 1
if x == array.shape[1]:
x -= 1
# Use the ellipse model to define a bounding box for the mask.
bbox = Ellipse2D(True, 0.0, 0.0, major * max_extent,
minor * max_extent, pa).bounding_box
y_range = ceil_int(bbox[0][1] - bbox[0][0] + 1 + filter_size)
x_range = ceil_int(bbox[1][1] - bbox[1][0] + 1 + filter_size)
shell_thetas = []
yy, xx = np.mgrid[-int(y_range / 2): int(y_range / 2) + 1,
-int(x_range / 2): int(x_range / 2) + 1]
if edge_mask is not None:
arr = edge_mask[max(0, y - int(y_range / 2)):
y + int(y_range / 2) + 1,
max(0, x - int(x_range / 2)):
x + int(x_range / 2) + 1]
else:
arr = array[max(0, y - int(y_range / 2)):y + int(y_range / 2) + 1,
max(0, x - int(x_range / 2)):x + int(x_range / 2) + 1]
# Adjust meshes if they exceed the array shape
x_min = -min(0, x - int(x_range / 2))
x_max = xx.shape[1] - max(0, x + int(x_range / 2) - array.shape[1] + 1)
y_min = -min(0, y - int(y_range / 2))
y_max = yy.shape[0] - max(0, y + int(y_range / 2) - array.shape[0] + 1)
offset = (max(0, int(y - (y_range / 2))),
max(0, int(x - (x_range / 2))))
yy = yy[y_min:y_max, x_min:x_max]
xx = xx[y_min:y_max, x_min:x_max]
dist_arr = np.sqrt(yy**2 + xx**2)
if edge_mask is not None:
smooth_mask = arr
else:
smooth_mask = \
smooth_edges(arr <= value_thresh, filter_size, min_pixels)
region_mask = \
Ellipse2D(True, 0.0, 0.0, major * max_extent, minor * max_extent,
pa)(xx, yy).astype(bool)
region_mask = nd.binary_dilation(region_mask, eight_conn, iterations=2)
# The bubble center must fall within a valid region
mid_pt = np.where(dist_arr == 0.0)
if len(mid_pt[0]) == 0:
middle_fail = True
else:
local_center = zip(*np.where(dist_arr == 0.0))[0]
middle_fail = False
# _make_bubble_mask(smooth_mask, local_center)
# If the center is not contained within a bubble region, return
# empties.
bad_case = not smooth_mask.any() or smooth_mask.all() or \
(smooth_mask * region_mask).all() or middle_fail
if bad_case:
if return_mask:
return np.array([]), 0.0, 0.0, value_thresh, smooth_mask
return np.array([]), 0.0, 0.0, value_thresh
orig_perim = find_contours(region_mask, 0, fully_connected='high')[0]
# new_perim = find_contours(smooth_mask, 0, fully_connected='high')
coords = []
extent_mask = np.zeros_like(region_mask)
# for perim in new_perim:
# perim = perim.astype(np.int)
# good_pts = \
# np.array([pos for pos, pt in enumerate(perim)
# if region_mask[pt[0], pt[1]]])
# if not good_pts.any():
# continue
# # Now split into sections
# from utils import consec_split
# split_pts = consec_split(good_pts)
# # Remove the duplicated end point if it was initially connected
# if len(split_pts) > 1:
# # Join these if the end pts initially matched
# if split_pts[0][0] == split_pts[-1][-1]:
# split_pts[0] = np.append(split_pts[0],
# split_pts[-1][::-1])
# split_pts.pop(-1)
# for split in split_pts:
# coords.append(perim[split])
# extent_mask[perim[good_pts][:, 0], perim[good_pts][:, 1]] = True
# Based on the curvature of the shell, only fit points whose
# orientation matches the assumed centre.
# incoord, outcoord = shell_orientation(coords, local_center,
# verbose=False)
# Now only keep the points that are not blocked from the centre pixel
for pt in orig_perim:
theta = np.arctan2(pt[0] - local_center[0],
pt[1] - local_center[1])
num_pts = int(np.round(np.hypot(pt[0] - local_center[0],
pt[1] - local_center[1]),
decimals=0))
ys = np.round(np.linspace(local_center[0], pt[0], num_pts),
decimals=0).astype(np.int)
xs = np.round(np.linspace(local_center[1], pt[1], num_pts),
decimals=0).astype(np.int)
not_on_edge = np.logical_and(ys < smooth_mask.shape[0],
xs < smooth_mask.shape[1])
ys = ys[not_on_edge]
xs = xs[not_on_edge]
dist = np.sqrt((ys - local_center[0])**2 +
(xs - local_center[1])**2)
prof = smooth_mask[ys, xs]
prof = prof[dist >= min_radius_frac * minor]
ys = ys[dist >= min_radius_frac * minor]
xs = xs[dist >= min_radius_frac * minor]
# Look for the first 0 and ignore all others past it
zeros = np.where(prof == 0)[0]
# If none, move on
if not zeros.any():
continue
edge = zeros[0]
extent_mask[ys[edge], xs[edge]] = True
coords.append((ys[edge], xs[edge]))
shell_thetas.append(theta)
# Calculate the fraction of the region associated with a shell
shell_frac = len(shell_thetas) / float(len(orig_perim))
shell_thetas = np.array(shell_thetas)
coords = np.array(coords)
# Use the theta values to find the standard deviation i.e. how
# dispersed the shell locations are. Assumes a circle, but we only
# consider moderately elongated ellipses, so the statistics approx.
# hold.
theta_var = np.sqrt(circvar(shell_thetas * u.rad)).value
extent_coords = \
np.vstack([pt + off for pt, off in
zip(np.where(extent_mask), offset)]).T
if verbose:
print("Shell fraction : " + str(shell_frac))
print("Angular Std. : " + str(theta_var))
import matplotlib.pyplot as p
true_region_mask = \
Ellipse2D(True, 0.0, 0.0, major, minor,
pa)(xx, yy).astype(bool)
ax = p.subplot(121)
ax.imshow(arr, origin='lower',
interpolation='nearest')
ax.contour(smooth_mask, colors='b')
ax.contour(region_mask, colors='r')
ax.contour(true_region_mask, colors='g')
if len(coords) > 0:
p.plot(coords[:, 1], coords[:, 0], 'bD')
p.plot(local_center[1], local_center[0], 'gD')
ax2 = p.subplot(122)
ax2.imshow(extent_mask, origin='lower',
interpolation='nearest')
p.draw()
raw_input("?")
p.clf()
if return_mask:
return extent_coords, shell_frac, theta_var, value_thresh, \
extent_mask
return extent_coords, shell_frac, theta_var, value_thresh
def intensity_props(data, blob, min_rad=4):
'''
Return the mean and std for the elliptical region in the given data.
Parameters
----------
data : LowerDimensionalObject or SpectralCube
Data to estimate the background from.
blob : numpy.array
Contains the properties of the region.
'''
y, x, major, minor, pa = blob[:5]
inner_ellipse = \
Ellipse2D(True, x, y, max(min_rad, 0.75 * major),
max(min_rad, 0.75 * minor), pa)
yy, xx = np.mgrid[:data.shape[-2], :data.shape[-1]]
ellip_mask = inner_ellipse(xx, yy).astype(bool)
vals = data[ellip_mask]
mean, sig = robust_skewed_std(vals)
return mean, sig
def _make_bubble_mask(edge_mask, center):
'''
When a region is too large, unconnected and unrelated edges may be picked
up. This removes those and only keeps the region that contains the center
point.
'''
labels, num = nd.label(edge_mask)
if num == 1:
return edge_mask
contains_center = 0
for n in range(1, num + 1):
pts = zip(*np.where(labels == n))
if center in pts:
contains_center = n
break
if contains_center == 0:
Warning("The center is not within any hole region.")
for n in range(1, num + 1):
if n == contains_center:
continue
edge_mask[labels == n] = False
|
{
"content_hash": "ea6c6fc047a79f1ae54455fe3392671b",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 79,
"avg_line_length": 36.09657320872274,
"alnum_prop": 0.5241218607059636,
"repo_name": "e-koch/BaSiCs",
"id": "86cedf2aa723a1a8a043bf0f35559e6c8f339016",
"size": "11588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basics/bubble_edge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "190036953"
},
{
"name": "Python",
"bytes": "88642"
}
],
"symlink_target": ""
}
|
import webapp2
import cgi
form="""
<form method="post">
What is your birthday?
<br />
<label>
Month
<input type="text" name="month" value="%(month)s">
</label>
<label>
Day
<input type="text" name="day" value="%(day)s">
</label>
<label>
Year
<input type="text" name="year" value="%(year)s">
</label>
<br />
<br />
<div style="color: red">%(error)s</div>
<input type="submit">
</form>
"""
def valid_month(user_month):
if user_month and user_month.isdigit:
if user_month in ['January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December']:
return True
else:
return None
else:
return None
def valid_day(user_day):
return True
def valid_year(user_year):
return True
def escape_html(s):
return cgi.escape(s, quote = True)
class MainPage(webapp2.RequestHandler):
def write_form(self, error="", month="", day="", year=""):
self.response.out.write(form % {"error": error,
"month": escape_html(month),
"day": escape_html(day),
"year": escape_html(year)})
def get(self):
self.write_form()
#self.response.headers['Content-Type'] = 'text/plain'
def post(self):
user_month = self.request.get('month')
user_day = self.request.get('day')
user_year = self.request.get('year')
month = valid_month(user_month)
day = valid_day(user_day)
year = valid_year(user_year)
if not (month and user_day and user_year):
self.write_form("That doesn't look valid to me, friend.", user_month, user_day, user_year)
else:
self.redirect("/thanks")
class ThanksHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write("Thanks! That's a totally valid day!")
app = webapp2.WSGIApplication([('/', MainPage), ('/thanks', ThanksHandler)], debug=True)
|
{
"content_hash": "cc7373e85758ea93e6becf590fce5f61",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 93,
"avg_line_length": 22.294117647058822,
"alnum_prop": 0.6131926121372032,
"repo_name": "JaimeLynSchatz/project-stash",
"id": "7b843c468b564f473a460eba36bfe8240ca517fc",
"size": "2496",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "udacity/unit2/web_app_unit2/engineapp/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5124"
},
{
"name": "CSS",
"bytes": "173262"
},
{
"name": "JavaScript",
"bytes": "46076"
},
{
"name": "Python",
"bytes": "13799"
},
{
"name": "Shell",
"bytes": "50"
}
],
"symlink_target": ""
}
|
"""
==========================================
Author: Tyler Brockett
Username: /u/tylerbrockett
Description: Alert Bot
==========================================
"""
from utils.env import env, BOT_USERNAME, DEV_USERNAME, SUBREDDIT
GITHUB_HOME = 'https://github.com/tylerbrockett/Alert-Bot-Reddit'
GITHUB_README = 'https://github.com/tylerbrockett/Alert-Bot-Reddit/blob/master/README.md'
def format_subject(s):
while len(s) >= 3 and s[:3].lower() == 're:':
s = s[3:]
while len(s) > 0 and s[0] == ' ':
s = s[1:]
return s
def format_subscription_list(subs, title):
result = '##' + title + '\n'
i = 0
if len(subs) == 0:
result += 'No Subscriptions'
for sub in subs:
i += 1
result += sub.to_table('Subscription #' + str(i)) + '\n \n'
return result
def compose_greeting(username):
return 'Hi /u/' + username + ',\n\n'
def compose_salutation():
result = '\n\t \n\t \n-/u/' + env(BOT_USERNAME) + '\n\t \n\t \n' + \
env(SUBREDDIT) + ' | ' + \
'/u/' + env(DEV_USERNAME) + ' | ' + \
'[Bot Code](' + GITHUB_HOME + ')\n'
return result
DEFAULT_SUB_MESSAGE = '\t \n**Note:** No subreddit was specified, so /r/buildapcsales will be used by default\t \n'
def compose_subscribe_message(username, new_sub, subs, subreddit_not_specified):
result = compose_greeting(username) + \
'Thanks for your subscription. ' + \
'You will continue to receive updates for posts that match your new subscription. ' + \
'To unsubscribe, send me a message with the body "unsubscribe #" (without quotes) where "#" is the ' + \
'actual subscription number.\t \nAlternatively, you can reply to this message or any replies from ' + \
'the bot in regards to this subscription and reply with "unsubscribe" as the body.\t \n' + \
(DEFAULT_SUB_MESSAGE if subreddit_not_specified else '') + \
new_sub.to_table('New Subscription') + '\t \n\t \n' + \
format_subscription_list(subs, 'Your Subscriptions') + \
compose_salutation()
return result
def compose_all_subscriptions_message(username, all_subscriptions):
result = compose_greeting(username) + \
format_subscription_list(all_subscriptions, 'Your Subscriptions') + \
compose_salutation()
return result
def compose_duplicate_subscription_message(username, existing_sub, new_sub):
result = compose_greeting(username) + \
'We think you already have an existing subscription matching the criteria specified. Below ' + \
'both subscriptions are listed. If you believe there has been a mistake, please visit ' + \
env(SUBREDDIT) + ' or message /u/' + env(DEV_USERNAME) + '.\n\n' + \
existing_sub.to_table('Existing Subscription') + '\n\n' + \
new_sub.to_table('New Subscription') + '\n' + \
compose_salutation()
return result
def compose_help_message(username, subs):
result = compose_greeting(username) + \
'Please visit the bot\'s [Github Readme](' + GITHUB_README + ') for ' + \
'detailed information on how the bot works. If you still have questions, please visit ' + \
env(SUBREDDIT) + ' or message /u/' + env(DEV_USERNAME) + '. Thanks!\t \n\t \n' + \
format_subscription_list(subs, 'Your Subscriptions') + \
compose_salutation()
return result
def compose_unsubscribe_invalid_sub_message(username, subs):
result = compose_greeting(username) + \
'I\'m sorry, but it looks like the subscription you\'re trying to unsubscribe from is invalid. Please ' + \
'make sure you are replying to a message that was in regards to a valid and active subscription. If you ' + \
'think you are receiving this message in error, please visit ' + env(SUBREDDIT) + ' or message ' + \
'/u/' + env(DEV_USERNAME) + ' to get this sorted out.\n\n' + \
format_subscription_list(subs, 'Your Subscriptions') + \
compose_salutation()
return result
def compose_unsubscribe_message(username, removed_subs, subs):
result = compose_greeting(username) + \
'You have unsubscribed from the following item. Thanks for using the bot!\n\n' + \
removed_subs[0].to_table('Unsubscribed From') + \
'\n' + \
format_subscription_list(subs, 'Your Subscriptions') + \
compose_salutation()
return result
def compose_unsubscribe_all_message(username):
result = compose_greeting(username) + \
'You have successfully unsubscribed from all subscriptions.' + \
compose_salutation()
return result
def compose_unsubscribe_from_num_message(username, removed_sub, subs):
result = compose_greeting(username) + \
'You have successfully unsubscribed from the following item.\t \n\t \n' + \
removed_sub.to_table('Unsubscribed From') + '\t \n\t \n' + \
format_subscription_list(subs, 'Your Subscriptions') + \
compose_salutation()
return result
def compose_edit_message(username):
result = compose_greeting(username) + \
'Unfortunately, the bot has only partially implemented this feature, so it is not available quite ' + \
'yet. Please try again at a later date. Sorry for the inconvenience! ' + \
compose_salutation()
return result
def compose_feedback_message(username):
result = compose_greeting(username) + \
'Thank you very much for your feedback! \t \n' + \
'I am open to whatever requests the community makes. If your message is urgent, please feel free to ' + \
'PM me at /u/' + env(DEV_USERNAME) + '. Thanks again!' + \
compose_salutation()
return result
def compose_reject_message(username, subject, body, error):
result = compose_greeting(username) + \
'**There was an error processing your request.** Please review your message and ' + \
'make sure it follows [the guidelines](' + GITHUB_README + ') that have been set. ' + \
'You can also visit ' + env(SUBREDDIT) + ' or message /u/' + env(DEV_USERNAME) + \
'. Thank you for your patience! \n\t \n\t \n' + \
'**Error:** \t \n' + \
error + '\t \n\t \n' + \
'**Your request:** \t \n' + \
'Subject:\t' + subject + '\t \n' + \
'Body: \t' + body + \
compose_salutation()
return result
def format_subreddit_list(subreddits, title):
i = 0
result = '###' + title + '\n' + \
'\#|Subreddit' + '\n' + \
':--|:--' + '\n'
for subreddit in subreddits:
i += 1
result += str(i) + '|' + str(subreddit) + '\n'
return result
def compose_invalid_subreddit_message(username, invalid_subreddits, message):
result = compose_greeting(username) + \
'Unfortunately, it appears that the following subreddit(s) you tried to subscribe to were invalid. If you ' + \
'believe this is a mistake please visit ' + env(SUBREDDIT) + ' or message ' + \
'/u/' + env(DEV_USERNAME) + '. Sorry for the inconvenience!\t \n\t \n' + \
'**Subject:**\t' + message.subject + '\t \n' + \
'**Body:**\t\t' + message.body + '\t \n' + \
format_subreddit_list(invalid_subreddits, 'Invalid Subreddits') + \
compose_salutation()
return result
def format_submission_body_summary(submission):
if submission.is_self:
return '**Body Text:**\t \n' + submission.selftext[:500] + (submission.selftext[500:] and '...')
else:
return '**Post Content Link:**\t \n[Content Link](' + submission.url + ')'
def compose_match_message(sub, submission, subs):
result = compose_greeting(sub.username) + \
'**Post Title:**\t \n' + \
'[' + submission.title + '](' + submission.permalink + ')\t \n\t \n' + \
format_submission_body_summary(submission) + '\t \n\t \n' + \
sub.to_table('Matched Subscription') + '\t \n\t \n' + \
'Reply to the bot with "subs" or "subscriptions" to view your subscriptions. Reply with "unsub", ' + \
'"unsubscribe", or "stop" to remove this subscription.' + \
compose_salutation()
return result
def compose_too_generic_message(username):
result = compose_greeting(username) + \
'Unfortunately, your subscription request is too generic. Allowing such a subscription would probably hog ' + \
'the bot\'s resources. Try constraining the subscription a bit. Sorry, and thanks for your understanding.' + \
compose_salutation()
return result
def format_subreddits(subreddits):
result = '###Subreddits\n'
if len(subreddits) == 0:
result += 'No Results'
return result
result += \
'\#|Subreddit|# of Subscriptions\n' + \
':--|:--:|:--\n'
i = 0
for sub in subreddits:
i += 1
result += \
str(i) + '|' + '/r/' + sub[0] + '|' + str(sub[1]) + '\n'
return result
def compose_statistics(username, current_users, all_users, unique_subs, all_subs, unique_subreddits, all_matches, subreddits):
result = compose_greeting(username) + \
'###Statistics\n' + \
'Statistic|Value\n' + \
':--|:--:' + '\n' + \
'Current Users Subscribed|' + str(current_users) + '\n' + \
'Total Users|' + str(all_users) + '\n' + \
'Unique Subscriptions|' + str(unique_subs) + '\n' + \
'Active Subscriptions|' + str(all_subs) + '\n' + \
'Unique Subreddits|' + str(unique_subreddits) + '\n' + \
'Total Matches to Date|' + str(all_matches) + '\n\n\n' + \
format_subreddits(subreddits) + '\n\n\n' + \
'Thank ***YOU*** for being a part of that!\n' + \
compose_salutation()
return result
def compose_feedback_forward(developer_username, username, body):
result = compose_greeting(developer_username) + \
'You have received feedback from /u/' + username + '. The feedback is quoted below:\n\n"' + \
body + '"' + compose_salutation()
return result
def compose_username_mention_forward(developer_username, username, body):
result = compose_greeting(developer_username) + \
'The bot has been mentioned in a post! the body of the message is quoted below:\n\n' + \
'USERNAME: ' + username + '\t \nBODY:\n' + body
return result
def compose_username_mention_reply(username):
result = 'Hi /u/' + username + ', thanks for the mention!\t \n ' + \
'For those of you that don\'t know about this bot, it\'s purpose is to peruse Reddit for you, and ' + \
'alert you when it finds a match based on what you tell it to look for. You can filter by subreddit, ' + \
'words/phrases in the title or selftext/link of the post, the Redditor that created the post, etc. ' + \
'It is great for finding things you want in subreddits with sales or giveaways! ' + \
'For more information, please visit [the Github README](' + GITHUB_README + ').' + \
compose_salutation()
return result
def compose_post_reply_forward(developer_username, username, body):
result = compose_greeting(developer_username) + \
'Someone has responded to a post by the bot! the comment is quoted below:\n\n' + \
'USERNAME: ' + username + '\nBODY:\n' + body
return result
|
{
"content_hash": "2f350d1c7bc5a878f62243d6ba5956ee",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 126,
"avg_line_length": 42.67279411764706,
"alnum_prop": 0.5928319117773757,
"repo_name": "tylerbrockett/reddit-bot-buildapcsales",
"id": "cc8239bd4477accc544897652c0c3cdc930468e4",
"size": "11607",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/utils/inbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42868"
},
{
"name": "Shell",
"bytes": "225"
}
],
"symlink_target": ""
}
|
import constants
class Topology:
"""
Represents the topology provided by the Cloud Controller
"""
def __init__(self):
self.service_map = {}
""" :type : dict[str, Service] """
self.initialized = False
""" :type : bool """
self.json_str = None
""" :type : str """
def get_services(self):
"""
Provides the list of services on the topology
:return: The list of Service objects
:rtype: list[Service]
"""
return self.service_map.values()
def get_service(self, service_name):
"""
Provides the service information for the given service name
:param str service_name: service name to be retrieved
:return: Service object of the service, None if the provided service name is invalid
:rtype: Service
"""
if service_name in self.service_map:
return self.service_map[service_name]
return None
def add_service(self, service):
"""
Adds a service to the list of services on the topology
:param Service service:
:return: void
"""
self.service_map[service.service_name] = service
def add_services(self, services):
"""
:param list[Service] services:
:return: void
"""
for service in services:
self.add_service(service)
def remove_service(self, service_name):
"""
Removes the service of the provided service name
:param str service_name:
:return: void
"""
if service_name in self.service_map:
self.service_map.pop(service_name)
def service_exists(self, service_name):
"""
Checks if the service of the provided service name exists
:param str service_name:
:return: True if the service exists, False if otherwise
:rtype: bool
"""
return service_name in self.service_map
def clear(self):
"""
Clears the service information list
:return: void
"""
self.service_map = {}
def __str__(self):
"""
to string override
:return:
"""
return "Topology [serviceMap= %r , initialized= %r ]" % (self.service_map, self.initialized)
class Service:
"""
Represents a service on the topology
"""
def __init__(self, service_name, service_type):
self.service_name = service_name
""" :type : str """
self.service_type = service_type
""" :type : str """
self.cluster_id_cluster_map = {}
""" :type : dict[str, Cluster] """
self.port_map = {}
""" :type : dict[str, Port] """
self.properties = {}
""" :type : dict[str, str] """
def get_clusters(self):
"""
Provides the list of clusters in the particular service
:return: The list of Cluster objects
:rtype: list[Cluster]
"""
return self.cluster_id_cluster_map.values()
def add_cluster(self, cluster):
"""
Adds a cluster to the service
:param Cluster cluster: the cluster to be added
:return: void
"""
self.cluster_id_cluster_map[cluster.cluster_id] = cluster
def remove_cluster(self, cluster_id):
if cluster_id in self.cluster_id_cluster_map:
self.cluster_id_cluster_map.pop(cluster_id)
def cluster_exists(self, cluster_id):
"""
Checks if the cluster with the given cluster id exists for ther service
:param str cluster_id:
:return: True if the cluster for the given cluster id exists, False if otherwise
:rtype: bool
"""
return cluster_id in self.cluster_id_cluster_map
def get_cluster(self, cluster_id):
"""
Provides the Cluster information for the provided cluster id
:param str cluster_id: the cluster id to search for
:return: Cluster object for the given cluster id, None if the cluster id is invalid
:rtype: Cluster
"""
if cluster_id in self.cluster_id_cluster_map:
return self.cluster_id_cluster_map[cluster_id]
return None
def get_ports(self):
"""
Returns the list of ports in the particular service
:return: The list of Port object
:rtype: list[Port]
"""
return self.port_map.values()
def get_port(self, proxy_port):
"""
Provides the port information for the provided proxy port
:param str proxy_port:
:return: Port object for the provided port, None if port is invalid
:rtype: Port
"""
if proxy_port in self.port_map:
return self.port_map[proxy_port]
return None
def add_port(self, port):
self.port_map[port.proxy] = port
def add_ports(self, ports):
for port in ports:
self.add_port(port)
class Cluster:
"""
Represents a cluster for a service
"""
def __init__(self, service_name="", cluster_id="", deployment_policy_name="", autoscale_policy_name=""):
self.service_name = service_name
""" :type : str """
self.cluster_id = cluster_id
""" :type : str """
self.deployment_policy_name = deployment_policy_name
""" :type : str """
self.autoscale_policy_name = autoscale_policy_name
""" :type : str """
self.hostnames = []
""" :type : list[str] """
self.member_map = {}
""" :type : dict[str, Member] """
self.tenant_range = None
""" :type : str """
self.is_lb_cluster = False
""" :type : bool """
self.is_kubernetes_cluster = False
""" :type : bool """
# self.status = None
# """ :type : str """
self.load_balancer_algorithm_name = None
""" :type : str """
self.properties = {}
""" :type : dict[str, str] """
self.member_list_json = None
""" :type : str """
self.app_id = ""
""" :type : str """
# Not relevant to cartridge agent
# self.instance_id_instance_context_map = {}
# """ :type : dict[str, ClusterInstance] """
def add_hostname(self, hostname):
self.hostnames.append(hostname)
def set_tenant_range(self, tenant_range):
Cluster.validate_tenant_range(tenant_range)
self.tenant_range = tenant_range
def get_members(self):
"""
Provides the list of member information in the cluster
:return: The list of Member object
:rtype: list[Member]
"""
return self.member_map.values()
def add_member(self, member):
self.member_map[member.member_id] = member
def remove_member(self, member_id):
if self.member_exists(member_id):
self.member_map.pop(member_id)
def get_member(self, member_id):
"""
Provides the member information for the provided member id
:param str member_id:
:return: Member object for the provided member id, None if member id is invalid
:rtype: Member
"""
if self.member_exists(member_id):
return self.member_map[member_id]
return None
def member_exists(self, member_id):
"""
Checks if the member for the provided member id exists in this cluster
:param str member_id: member id to be searched
:return: True if the member exists, False if otherwise
:rtype: bool
"""
return member_id in self.member_map
def __str__(self):
return "Cluster [serviceName=" + self.service_name + ", clusterId=" + self.cluster_id \
+ ", autoscalePolicyName=" + self.autoscale_policy_name + ", deploymentPolicyName=" \
+ self.deployment_policy_name + ", hostNames=" + self.hostnames + ", tenantRange=" + self.tenant_range \
+ ", isLbCluster=" + self.is_lb_cluster + ", properties=" + self.properties + "]"
def tenant_id_in_range(self, tenant_id):
"""
Check whether a given tenant id is in tenant range of the cluster.
:param str tenant_id: tenant id to be checked
:return: True if the tenant id is in tenant id range, False if otherwise
:rtype: bool
"""
if self.tenant_range is None:
return False
if self.tenant_range == "*":
return True
else:
arr = self.tenant_range.split(constants.TENANT_RANGE_DELIMITER)
tenant_start = int(arr[0])
if tenant_start <= tenant_id:
tenant_end = arr[1]
if tenant_end == "*":
return True
else:
if tenant_id <= int(tenant_end):
return True
return False
@staticmethod
def validate_tenant_range(tenant_range):
"""
Validates the tenant range to be either '*' or a delimeted range of numbers
:param str tenant_range: The tenant range string to be validated
:return: void if the provided tenant range is valid, RuntimeError if otherwise
:exception: RuntimeError if the tenant range is invalid
"""
valid = False
if tenant_range == "*":
valid = True
else:
arr = tenant_range.split(constants.TENANT_RANGE_DELIMITER)
if len(arr) == 2:
if arr[0].isdigit() and arr[1].isdigit():
valid = True
elif arr[0].isdigit() and arr[1] == "*":
valid = True
if not valid:
raise RuntimeError("Tenant range %r is not valid" % tenant_range)
class Member:
"""
Represents a member on a particular cluster
"""
def __init__(self, service_name="", cluster_id="", network_partition_id="", partition_id="", member_id="",
cluster_instance_id=""):
self.service_name = service_name
""" :type : str """
self.cluster_id = cluster_id
""" :type : str """
self.network_partition_id = network_partition_id
""" :type : str """
self.cluster_instance_id = cluster_instance_id
""" :type : str """
self.partition_id = partition_id
""" :type : str """
self.member_id = member_id
""" :type : str """
self.port_map = {}
""" :type : dict[str, Port] """
self.init_time = None
""" :type : int """
self.member_public_ips = None
""" :type : str """
self.member_default_public_ip = None
""" :type : str """
self.status = None
""" :type : str """
self.member_private_ips = None
""" :type : str """
self.member_default_private_ip = None
""" :type : str """
self.properties = {}
""" :type : dict[str, str] """
self.lb_cluster_id = None
""" :type : str """
self.json_str = None
""" :type : str """
def is_active(self):
"""
Checks if the member is in active state
:return: True if active, False if otherwise
:rtype: bool
"""
return self.status == MemberStatus.Active
def get_ports(self):
"""
Provides the list of the ports in the member
:return: List of Port objects
:rtype: list[Port]
"""
return self.port_map.values()
def get_port(self, proxy):
"""
Provides the port information for the given port id
:param str proxy: The port id
:return: Port object of the provided port id, None if otherwise
:rtype: Port
"""
if proxy in self.port_map:
return self.port_map[proxy]
return None
def add_port(self, port):
self.port_map[port.proxy] = port
def add_ports(self, ports):
for port in ports:
self.add_port(port)
class Port:
"""
Represents a port on a particular member
"""
def __init__(self, protocol, value, proxy):
self.protocol = protocol
""" :type : str """
self.value = value
""" :type : str """
self.proxy = proxy
""" :type : str """
def __str__(self):
return "Port [protocol=%r, value=%r proxy=%r]" % (self.protocol, self.value, self.proxy)
class ServiceType:
"""
ServiceType enum
"""
SingleTenant = 1
MultiTenant = 2
class ClusterStatus:
"""
ClusterStatus enum
"""
Created = 1
In_Maintenance = 2
Removed = 3
class MemberStatus:
"""
MemberStatus enum
"""
Created = "Created"
Initialized = "Initialized"
Starting = "Starting"
Active = "Active"
In_Maintenance = "In_Maintenance"
ReadyToShutDown = "ReadyToShutDown"
Suspended = "Suspended"
Terminated = "Terminated"
class TopologyContext:
"""
Handles and maintains a model of the topology provided by the Cloud Controller
"""
topology = Topology()
@staticmethod
def get_topology():
if TopologyContext.topology is None:
TopologyContext.topology = Topology()
return TopologyContext.topology
@staticmethod
def update(topology):
TopologyContext.topology = topology
TopologyContext.topology.initialized = True
class Tenant:
"""
Object type representing the tenant details of a single tenant
"""
def __init__(self, tenant_id, tenant_domain):
self.tenant_id = tenant_id
""" :type : int """
self.tenant_domain = tenant_domain
""" :type : str """
self.service_name_subscription_map = {}
""" :type : dict[str, Subscription] """
def get_subscription(self, service_name):
"""
Returns the Subscription object related to the provided service name
:param str service_name: service name to be retrieved
:return: Subscription of the service or None if the service name doesn't exist
:rtype: Subscription
"""
if service_name in self.service_name_subscription_map:
return self.service_name_subscription_map[service_name]
return None
def is_subscribed(self, service_name):
"""
Checks if the given service name has a subscription from this tenant
:param str service_name: name of the service to check
:return: True if the tenant is subscribed to the given service name, False if not
:rtype: bool
"""
return service_name in self.service_name_subscription_map
def add_subscription(self, subscription):
"""
Adds a subscription information entry on the subscription list for this tenant
:param Subscription subscription: Subscription information to be added
:return: void
:rtype: void
"""
self.service_name_subscription_map[subscription.service_name] = subscription
def remove_subscription(self, service_name):
"""
Removes the specified subscription details from the subscription list
:param str service_name: The service name of the subscription to be removed
:return: void
:rtype: void
"""
if service_name in self.service_name_subscription_map:
self.service_name_subscription_map.pop(service_name)
class Subscription:
"""
Subscription information of a particular subscription to a service
"""
def __init__(self, service_name, cluster_ids):
self.service_name = service_name
""" :type : str """
self.cluster_ids = cluster_ids
""" :type : list[str] """
self.subscription_domain_map = {}
""" :type : dict[str, SubscriptionDomain] """
def add_subscription_domain(self, domain_name, application_context):
"""
Adds a subscription domain
:param str domain_name:
:param str application_context:
:return: void
:rtype: void
"""
self.subscription_domain_map[domain_name] = SubscriptionDomain(domain_name, application_context)
def remove_subscription_domain(self, domain_name):
"""
Removes the subscription domain of the specified domain name
:param str domain_name:
:return: void
:rtype: void
"""
if domain_name in self.subscription_domain_map:
self.subscription_domain_map.pop(domain_name)
def subscription_domain_exists(self, domain_name):
"""
Returns the SubscriptionDomain information of the specified domain name
:param str domain_name:
:return: SubscriptionDomain
:rtype: SubscriptionDomain
"""
return domain_name in self.subscription_domain_map
def get_subscription_domains(self):
"""
Returns the list of subscription domains of this subscription
:return: List of SubscriptionDomain objects
:rtype: list[SubscriptionDomain]
"""
return self.subscription_domain_map.values()
class SubscriptionDomain:
"""
Represents a Subscription Domain
"""
def __init__(self, domain_name, application_context):
self.domain_name = domain_name
""" :type : str """
self.application_context = application_context
""" :type : str """
class TenantContext:
"""
Handles and maintains a model of all the information related to tenants within this instance
"""
tenants = {}
initialized = False
tenant_domains = {"carbon.super": Tenant(-1234, "carbon.super")}
@staticmethod
def add_tenant(tenant):
TenantContext.tenants[tenant.tenant_id] = tenant
TenantContext.tenant_domains[tenant.tenant_domain] = tenant
@staticmethod
def remove_tenant(tenant_id):
if tenant_id in TenantContext.tenants:
tenant = TenantContext.get_tenant(tenant_id)
TenantContext.tenants.pop(tenant.tenant_id)
TenantContext.tenant_domains.pop(tenant.tenant_domain)
@staticmethod
def update(tenants):
for tenant in tenants:
TenantContext.add_tenant(tenant)
@staticmethod
def get_tenant(tenant_id):
"""
Gets the Tenant object of the provided tenant ID
:param int tenant_id:
:return: Tenant object of the provided tenant ID
:rtype: Tenant
"""
if tenant_id in TenantContext.tenants:
return TenantContext.tenants[tenant_id]
return None
@staticmethod
def get_tenant_by_domain(tenant_domain):
"""
Gets the Tenant object of the provided tenant domain
:param str tenant_domain:
:return: Tenant object of the provided tenant domain
:rtype: str
"""
if tenant_domain in TenantContext.tenant_domains:
return TenantContext.tenant_domains[tenant_domain]
return None
|
{
"content_hash": "4e232bc105146cd4100d6d14ab9b164c",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 119,
"avg_line_length": 30.778675282714055,
"alnum_prop": 0.5777346210371614,
"repo_name": "asankasanjaya/stratos",
"id": "fd40511756b9e6a0f9f989fcde6d9424b87fb9cb",
"size": "19838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/entity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "17184"
},
{
"name": "C",
"bytes": "27195"
},
{
"name": "CSS",
"bytes": "81438"
},
{
"name": "HTML",
"bytes": "168864"
},
{
"name": "Java",
"bytes": "5906045"
},
{
"name": "JavaScript",
"bytes": "760876"
},
{
"name": "Python",
"bytes": "514140"
},
{
"name": "Ruby",
"bytes": "3546"
},
{
"name": "Shell",
"bytes": "138441"
}
],
"symlink_target": ""
}
|
import six
from dns import zone as dnszone
from webtest import TestApp
from oslo_config import cfg
from designate.api import admin as admin_api
from designate.api import middleware
from designate.tests.test_api.test_v2 import ApiV2TestCase
cfg.CONF.import_opt('enabled_extensions_admin', 'designate.api.admin',
group='service:api')
class APIV2ZoneImportExportTest(ApiV2TestCase):
def setUp(self):
super(APIV2ZoneImportExportTest, self).setUp()
self.config(enable_api_admin=True, group='service:api')
self.config(enabled_extensions_admin=['zones'], group='service:api')
# Create the application
adminapp = admin_api.factory({})
# Inject the NormalizeURIMiddleware middleware
adminapp = middleware.NormalizeURIMiddleware(adminapp)
# Inject the FaultWrapper middleware
adminapp = middleware.FaultWrapperMiddleware(adminapp)
# Inject the TestContext middleware
adminapp = middleware.TestContextMiddleware(
adminapp, self.admin_context.tenant,
self.admin_context.tenant)
# Obtain a test client
self.adminclient = TestApp(adminapp)
# # Zone import/export
def test_missing_origin(self):
fixture = self.get_zonefile_fixture(variant='noorigin')
response = self.client.post_json('/zones/tasks/imports', fixture,
headers={'Content-type': 'text/dns'})
import_id = response.json_body['id']
self.wait_for_import(import_id, errorok=True)
url = '/zones/tasks/imports/%s' % import_id
response = self.client.get(url)
self.assertEqual(response.json['status'], 'ERROR')
origin_msg = ("The $ORIGIN statement is required and must be the"
" first statement in the zonefile.")
self.assertEqual(response.json['message'], origin_msg)
def test_missing_soa(self):
fixture = self.get_zonefile_fixture(variant='nosoa')
response = self.client.post_json('/zones/tasks/imports', fixture,
headers={'Content-type': 'text/dns'})
import_id = response.json_body['id']
self.wait_for_import(import_id, errorok=True)
url = '/zones/tasks/imports/%s' % import_id
response = self.client.get(url)
self.assertEqual(response.json['status'], 'ERROR')
origin_msg = ("Malformed zonefile.")
self.assertEqual(response.json['message'], origin_msg)
def test_malformed_zonefile(self):
fixture = self.get_zonefile_fixture(variant='malformed')
response = self.client.post_json('/zones/tasks/imports', fixture,
headers={'Content-type': 'text/dns'})
import_id = response.json_body['id']
self.wait_for_import(import_id, errorok=True)
url = '/zones/tasks/imports/%s' % import_id
response = self.client.get(url)
self.assertEqual(response.json['status'], 'ERROR')
origin_msg = ("Malformed zonefile.")
self.assertEqual(response.json['message'], origin_msg)
def test_import_export(self):
# Since v2 doesn't support getting records, import and export the
# fixture, making sure they're the same according to dnspython
post_response = self.client.post('/zones/tasks/imports',
self.get_zonefile_fixture(),
headers={'Content-type': 'text/dns'})
import_id = post_response.json_body['id']
self.wait_for_import(import_id)
url = '/zones/tasks/imports/%s' % import_id
response = self.client.get(url)
self.policy({'zone_export': '@'})
get_response = self.adminclient.get('/zones/export/%s' %
response.json['zone_id'],
headers={'Accept': 'text/dns'})
if six.PY2:
exported_zonefile = get_response.body
else:
exported_zonefile = get_response.body.decode('utf-8')
imported = dnszone.from_text(self.get_zonefile_fixture())
exported = dnszone.from_text(exported_zonefile)
# Compare SOA emails, since zone comparison takes care of origin
imported_soa = imported.get_rdataset(imported.origin, 'SOA')
imported_email = imported_soa[0].rname.to_text()
exported_soa = exported.get_rdataset(exported.origin, 'SOA')
exported_email = exported_soa[0].rname.to_text()
self.assertEqual(imported_email, exported_email)
# Delete SOAs since they have, at the very least, different serials,
# and dnspython considers that to be not equal.
imported.delete_rdataset(imported.origin, 'SOA')
exported.delete_rdataset(exported.origin, 'SOA')
# Delete NS records, since they won't be the same
imported.delete_rdataset(imported.origin, 'NS')
exported.delete_rdataset(exported.origin, 'NS')
imported.delete_rdataset('delegation', 'NS')
self.assertEqual(imported, exported)
|
{
"content_hash": "112268e32e20360c9452d35ca49ec784",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 41.66393442622951,
"alnum_prop": 0.6283690733818611,
"repo_name": "cneill/designate-testing",
"id": "97493279e16248478326e7c49f678d124bd17018",
"size": "5689",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "designate/tests/test_api/test_v2/test_import_export.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2257800"
},
{
"name": "Ruby",
"bytes": "4170"
},
{
"name": "Shell",
"bytes": "12933"
}
],
"symlink_target": ""
}
|
"""
Tests for the archive subsys. With each configured driver.
"""
import os
import pytest
from anchore_engine.subsys import logger, object_store
from anchore_engine.subsys.object_store import get_manager
from anchore_engine.subsys.object_store.config import (
ALT_OBJECT_STORE_CONFIG_KEY,
DEFAULT_OBJECT_STORE_MANAGER_ID,
)
from anchore_engine.subsys.object_store.exc import (
BadCredentialsError,
DriverConfigurationError,
)
from .conftest import (
test_s3_bucket,
test_s3_key,
test_s3_region,
test_s3_secret_key,
test_s3_url,
test_swift_auth_url,
test_swift_container,
test_swift_key,
test_swift_user,
)
logger.enable_test_logging()
document_1 = b'{"document": {"user_id": "admin", "final_action_reason": "policy_evaluation", "matched_whitelisted_images_rule": "matched_blacklisted_images_rule": false}}'
document_json = {
"user_id": "admin",
"final_action_reason": "policy_evaluation",
"matched_whitelisted_images_rule": False,
"created_at": 1522454550,
"evaluation_problems": [],
"last_modified": 1522454550,
"final_action": "stop",
"matched_mapping_rule": {
"name": "default",
"repository": "*",
"image": {"type": "tag", "value": "*"},
"whitelist_ids": ["37fd763e-1765-11e8-add4-3b16c029ac5c"],
"registry": "*",
"id": "c4f9bf74-dc38-4ddf-b5cf-00e9c0074611",
"policy_id": "48e6f7d6-1765-11e8-b5f9-8b6f228548b6",
},
"matched_blacklisted_images_rule": False,
}
test_user_id = "testuser1"
test_bucket_id = "testbucket1"
disable_tests = False
def run_test():
"""
Common test path for all configs to test against
:return:
"""
mgr = get_manager()
logger.info("Basic string operations using get/put/delete")
resp = mgr.put(
userId=test_user_id,
bucket=test_bucket_id,
archiveid="document_1",
data=document_1,
)
logger.info("Document 1 PUT: {}".format(resp))
resp = mgr.get(userId=test_user_id, bucket=test_bucket_id, archiveid="document_1")
assert document_1 == resp
assert mgr.exists(test_user_id, test_bucket_id, "document_1")
assert not mgr.exists(test_user_id, test_bucket_id, "document_10")
logger.info("Document operations")
resp = mgr.put_document(
userId=test_user_id,
bucket=test_bucket_id,
archiveId="document_json",
data=document_json,
)
logger.info("Document JSON PUT Doc: {}".format(resp))
resp = mgr.get_document(
userId=test_user_id, bucket=test_bucket_id, archiveId="document_json"
)
logger.info("Document JSON GET Dock: {}".format(resp))
assert document_json == resp
logger.info("Document operations")
resp = mgr.put_document(
userId=test_user_id,
bucket=test_bucket_id,
archiveId="document_json",
data=document_1.decode("utf-8"),
)
logger.info("Document string PUT Doc: {}".format(resp))
resp = mgr.get_document(
userId=test_user_id, bucket=test_bucket_id, archiveId="document_json"
)
logger.info("Document string GET Dock: {}".format(resp))
assert document_1.decode("utf-8") == resp
def test_noop(anchore_db):
pass
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_fs(anchore_db):
config = {
"archive": {
"compression": {"enabled": True},
"storage_driver": {
"name": "localfs",
"config": {"archive_data_dir": "/tmp/archive_test/fs_driver"},
},
}
}
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
run_test()
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_swift(swift_container, anchore_db):
config = {
"archive": {
"compression": {"enabled": True},
"storage_driver": {
"name": "swift",
"config": {
"user": test_swift_user,
"key": test_swift_key,
"auth": test_swift_auth_url,
"container": test_swift_container,
},
},
}
}
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
run_test()
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_swift_create_container(swift_container, anchore_db):
config = {
"archive": {
"compression": {"enabled": True},
"storage_driver": {
"name": "swift",
"config": {
"user": test_swift_user,
"key": test_swift_key,
"auth": test_swift_auth_url,
"container": "testarchive2",
"create_container": True,
},
},
}
}
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
run_test()
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_swift_bad_creds(swift_container, anchore_db):
config = {
"archive": {
"compression": {"enabled": True},
"storage_driver": {
"name": "swift",
"config": {
"user": test_swift_user,
"key": "badkey",
"auth": test_swift_auth_url,
"container": test_swift_container,
},
},
}
}
with pytest.raises(BadCredentialsError) as err:
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
pytest.fail("Should have raised bad creds exception on init")
logger.info("Got expected error: {}".format(err.type))
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_swift_bad_container(swift_container, anchore_db):
config = {
"archive": {
"compression": {"enabled": True},
"storage_driver": {
"name": "swift",
"config": {
"user": test_swift_user,
"key": test_swift_key,
"auth": test_swift_auth_url,
"container": "testarchive_does_not_exist",
},
},
}
}
with pytest.raises(DriverConfigurationError) as err:
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
logger.info("Got expected error: {}".format(err.type))
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_db(anchore_db):
config = {
"archive": {
"compression": {"enabled": True},
"storage_driver": {"name": "db2", "config": {}},
}
}
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
run_test()
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_legacy_db(anchore_db):
# NOTE: legacy db driver does not support compression since it uses string type instead of binary for content storage
config = {
"archive": {
"compression": {"enabled": False},
"storage_driver": {"name": "db", "config": {}},
}
}
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
run_test()
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_s3(s3_bucket, anchore_db):
logger.info("Creds: {} / {}".format(test_s3_key, test_s3_secret_key))
config = {
"archive": {
"compression": {"enabled": False},
"storage_driver": {
"name": "s3",
"config": {
"access_key": test_s3_key,
"secret_key": test_s3_secret_key,
"url": test_s3_url,
"region": test_s3_region,
"bucket": test_s3_bucket,
},
},
}
}
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
run_test()
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_s3_create_bucket(s3_bucket, anchore_db):
config = {
"archive": {
"compression": {"enabled": False},
"storage_driver": {
"name": "s3",
"config": {
"create_bucket": True,
"access_key": test_s3_key,
"secret_key": test_s3_secret_key,
"url": test_s3_url,
"region": test_s3_region,
"bucket": "testarchivebucket2",
},
},
}
}
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
run_test()
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_s3_bad_creds(s3_bucket, anchore_db):
config = {
"archive": {
"compression": {"enabled": False},
"storage_driver": {
"name": "s3",
"config": {
"access_key": test_s3_key,
"secret_key": "notrealkey",
"url": test_s3_url,
"region": test_s3_region,
"bucket": test_s3_bucket,
},
},
}
}
with pytest.raises(BadCredentialsError) as err:
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
pytest.fail("Should have gotten a bad creds error")
logger.info("Got expected error: {}".format(err.type))
config = {
"archive": {
"compression": {"enabled": False},
"storage_driver": {
"name": "s3",
"config": {
"access_key": test_s3_key,
"secret_key": "notrealkey",
"url": test_s3_url,
"region": test_s3_region,
"bucket": test_s3_bucket,
},
},
}
}
with pytest.raises(BadCredentialsError) as err:
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
pytest.fail("Should have gotten a bad creds error")
logger.info("Got expected error: {}".format(err.type))
@pytest.mark.skipif(disable_tests, reason="skipped by config")
def test_s3_bad_bucket(s3_bucket, anchore_db):
config = {
"archive": {
"compression": {"enabled": False},
"storage_driver": {
"name": "s3",
"config": {
"access_key": test_s3_key,
"secret_key": test_s3_secret_key,
"url": test_s3_url,
"region": None,
"bucket": "testarchivebucket_does_not_exist",
},
},
}
}
with pytest.raises(DriverConfigurationError) as err:
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
logger.info("Got expected error: {}".format(err.type))
@pytest.mark.skip # if(disable_tests, reason='skipped by config')
def test_s3_auto(s3_bucket, anchore_db):
os.environ["AWS_ACCESS_KEY"] = test_s3_key
os.environ["AWS_SECRET_ACCESS_KEY"] = test_s3_secret_key
config = {
"archive": {
"compression": {"enabled": False},
"storage_driver": {
"name": "s3",
"config": {
"iamauto": True,
"bucket": "testarchivebucket_does_not_exist",
},
},
}
}
with pytest.raises(DriverConfigurationError) as err:
object_store.initialize(
config,
check_db=False,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY],
allow_legacy_fallback=False,
force=True,
)
logger.info("Got expected error: {}".format(err.typee))
|
{
"content_hash": "9a194e424a622247978e2760ea4b986e",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 171,
"avg_line_length": 30.863247863247864,
"alnum_prop": 0.5414705067848241,
"repo_name": "anchore/anchore-engine",
"id": "3542cf1ef620bdf4afcb148863b333551b9d2ddd",
"size": "14444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/subsys/object_store/test_object_store.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
}
|
import collections
import struct
from supriya.tools.systemtools.SupriyaObject import SupriyaObject
class SynthDefCompiler(SupriyaObject):
### CLASS VARIABLES ###
__documentation_section__ = 'SynthDef Internals'
### PUBLIC METHODS ###
@staticmethod
def compile_synthdef(synthdef, name):
result = SynthDefCompiler.encode_string(name)
result += synthdef._compiled_ugen_graph
return result
@staticmethod
def compile_parameters(synthdef):
result = []
result.append(SynthDefCompiler.encode_unsigned_int_32bit(
sum(len(_[1]) for _ in synthdef.indexed_parameters)))
for control_ugen in synthdef.control_ugens:
for parameter in control_ugen.parameters:
value = parameter.value
if not isinstance(value, tuple):
value = (value,)
for x in value:
result.append(SynthDefCompiler.encode_float(x))
result.append(SynthDefCompiler.encode_unsigned_int_32bit(
len(synthdef.indexed_parameters)))
for index, parameter in synthdef.indexed_parameters:
name = parameter.name
result.append(SynthDefCompiler.encode_string(name))
result.append(SynthDefCompiler.encode_unsigned_int_32bit(index))
return bytes().join(result)
@staticmethod
def compile_synthdefs(synthdefs):
def flatten(value):
if isinstance(value, collections.Sequence) and \
not isinstance(value, (bytes, bytearray)):
return bytes().join(flatten(x) for x in value)
return value
result = []
encoded_file_type_id = b'SCgf'
result.append(encoded_file_type_id)
encoded_file_version = SynthDefCompiler.encode_unsigned_int_32bit(2)
result.append(encoded_file_version)
encoded_synthdef_count = SynthDefCompiler.encode_unsigned_int_16bit(
len(synthdefs))
result.append(encoded_synthdef_count)
for synthdef in synthdefs:
name = synthdef.name
if not name:
name = synthdef.anonymous_name
result.append(SynthDefCompiler.compile_synthdef(
synthdef, name))
result = flatten(result)
result = bytes(result)
return result
@staticmethod
def compile_ugen(ugen, synthdef):
outputs = ugen._get_outputs()
result = []
result.append(SynthDefCompiler.encode_string(type(ugen).__name__))
result.append(SynthDefCompiler.encode_unsigned_int_8bit(ugen.calculation_rate))
result.append(SynthDefCompiler.encode_unsigned_int_32bit(len(ugen.inputs)))
result.append(SynthDefCompiler.encode_unsigned_int_32bit(len(outputs)))
result.append(SynthDefCompiler.encode_unsigned_int_16bit(int(ugen.special_index)))
for input_ in ugen.inputs:
result.append(SynthDefCompiler.compile_ugen_input_spec(input_, synthdef))
for output in outputs:
result.append(SynthDefCompiler.encode_unsigned_int_8bit(output))
result = bytes().join(result)
return result
@staticmethod
def compile_ugen_graph(synthdef):
result = []
result.append(SynthDefCompiler.encode_unsigned_int_32bit(len(synthdef.constants)))
for constant in synthdef.constants:
result.append(SynthDefCompiler.encode_float(constant))
result.append(SynthDefCompiler.compile_parameters(synthdef))
result.append(SynthDefCompiler.encode_unsigned_int_32bit(len(synthdef.ugens)))
for ugen_index, ugen in enumerate(synthdef.ugens):
result.append(SynthDefCompiler.compile_ugen(ugen, synthdef))
result.append(SynthDefCompiler.encode_unsigned_int_16bit(0))
result = bytes().join(result)
return result
@staticmethod
def compile_ugen_input_spec(input_, synthdef):
from supriya.tools import synthdeftools
result = []
if isinstance(input_, float):
result.append(SynthDefCompiler.encode_unsigned_int_32bit(0xffffffff))
constant_index = synthdef._constants.index(input_)
result.append(SynthDefCompiler.encode_unsigned_int_32bit(
constant_index))
elif isinstance(input_, synthdeftools.OutputProxy):
ugen = input_.source
output_index = input_.output_index
ugen_index = synthdef._ugens.index(ugen)
result.append(SynthDefCompiler.encode_unsigned_int_32bit(ugen_index))
result.append(SynthDefCompiler.encode_unsigned_int_32bit(output_index))
else:
raise Exception('Unhandled input spec: {}'.format(input_))
return bytes().join(result)
@staticmethod
def encode_string(value):
result = bytes(struct.pack('>B', len(value)))
result += bytes(bytearray(value, encoding='ascii'))
return result
@staticmethod
def encode_float(value):
return bytes(struct.pack('>f', value))
@staticmethod
def encode_unsigned_int_8bit(value):
return bytes(struct.pack('>B', value))
@staticmethod
def encode_unsigned_int_16bit(value):
return bytes(struct.pack('>H', value))
@staticmethod
def encode_unsigned_int_32bit(value):
return bytes(struct.pack('>I', value))
|
{
"content_hash": "e5a3ab5af76fd2c06b9e04cad1d4267d",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 90,
"avg_line_length": 40.1044776119403,
"alnum_prop": 0.6462597692593971,
"repo_name": "andrewyoung1991/supriya",
"id": "b6ec76317165d3fb85c333cbeeb64816847750ae",
"size": "5400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/tools/synthdeftools/SynthDefCompiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2693776"
}
],
"symlink_target": ""
}
|
u"""
This module contains unsorted simple helper functions used in currency
converter.
"""
__author__ = u"Pavol Vargovčík"
__copyright__ = u"Copyright (c) 2017 Pavol Vargovčík"
__credits__ = [u"Pavol Vargovčík"]
__license__ = u"MIT"
__version__ = u"0.1.0"
__maintainer__ = u"Pavol Vargovčík"
__email__ = u"pavol.vargovcik@gmail.com"
__status__ = u"Development"
__docformat__ = u'reStructuredText'
import requests
def get(addr):
u"""
make GET request, close it and if something fails, raise exception
:param addr: request URL
:type addr: :class:`str`
:returns: the closed response object with status code 200 OK
:rtype: :class:`requests.Response`
:raises: :class:`requests.RequestException`
"""
r = requests.get(addr)
r.close()
r.raise_for_status()
return r
|
{
"content_hash": "6c8ad76d4f4b548037bd9ccead2e304d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 25.303030303030305,
"alnum_prop": 0.6455089820359281,
"repo_name": "p4l1ly/currency_converter",
"id": "4f5cc64dd4595c6c98ec10fe39a3a47db1aca828",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "currency/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "77007"
},
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "32372"
}
],
"symlink_target": ""
}
|
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.contrib import admin
from .models.Attachment import Attachment
from .models.Audit import Audit
from .models.Contact import Contact
from .models.CreditTrade import CreditTrade
from .models.CreditTradeLogEntry import CreditTradeLogEntry
from .models.FuelSupplier import FuelSupplier
from .models.Group import Group
from .models.GroupMembership import GroupMembership
from .models.History import History
from .models.LookupList import LookupList
from .models.Note import Note
from .models.Notification import Notification
from .models.NotificationEvent import NotificationEvent
from .models.Offer import Offer
from .models.Permission import Permission
from .models.Role import Role
from .models.RolePermission import RolePermission
from .models.User import User
from .models.UserFavourite import UserFavourite
from .models.UserRole import UserRole
admin.site.register(Attachment)
admin.site.register(Audit)
admin.site.register(Contact)
admin.site.register(CreditTrade)
admin.site.register(CreditTradeLogEntry)
admin.site.register(FuelSupplier)
admin.site.register(Group)
admin.site.register(GroupMembership)
admin.site.register(History)
admin.site.register(LookupList)
admin.site.register(Note)
admin.site.register(Notification)
admin.site.register(NotificationEvent)
admin.site.register(Offer)
admin.site.register(Permission)
admin.site.register(Role)
admin.site.register(RolePermission)
admin.site.register(User)
admin.site.register(UserFavourite)
admin.site.register(UserRole)
|
{
"content_hash": "713973ae791e16052cfb9107e029cfaa",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 208,
"avg_line_length": 30.692307692307693,
"alnum_prop": 0.8074352548036758,
"repo_name": "Kiesum/tfrs-1",
"id": "1996b7b7dd7e4dde85e917c95b8ae503b6e31b5d",
"size": "2394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6813"
},
{
"name": "CSS",
"bytes": "482690"
},
{
"name": "Groovy",
"bytes": "5034"
},
{
"name": "HTML",
"bytes": "235627"
},
{
"name": "JavaScript",
"bytes": "272050"
},
{
"name": "Python",
"bytes": "597343"
},
{
"name": "Shell",
"bytes": "10011"
}
],
"symlink_target": ""
}
|
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from time import sleep
from PySiQ import Queue
# ************************************************************************
# Initialize queue
# ************************************************************************
N_WORKERS = 2
queue_instance = Queue()
queue_instance.start_worker(N_WORKERS)
#NOTE: Uncomment this line to enable verbose queuing
#queue_instance.enableStdoutLogging()
# ************************************************************************
# Queue tasks
# ************************************************************************
def foo(N, message):
print message + " started..."
sleep(N)
print message + " finished"
queue_instance.enqueue(
fn=foo,
args=(10, "Task 1"),
task_id= "Task 1"
)
queue_instance.enqueue(
fn=foo,
args=(4, "Task 2"),
task_id= "Task 2"
)
queue_instance.enqueue(
fn=foo,
args=(10, "Task 3"),
task_id= "Task 3"
)
queue_instance.enqueue(
fn=foo,
args=(5, "Task 4"),
task_id= "Task 4"
)
queue_instance.enqueue(
fn=foo,
args=(4, "Task 5"),
task_id= "Task 5",
depend= ["Task 3", "Task 4"]
)
# Uncomment this code to show the queue status
# while 1:
# print("Task 1 is " + str(queue_instance.check_status("Task 1")))
# print("Task 2 is " + str(queue_instance.check_status("Task 2")))
# print("Task 3 is " + str(queue_instance.check_status("Task 3")))
# print("Task 4 is " + str(queue_instance.check_status("Task 4")))
# print("Task 5 is " + str(queue_instance.check_status("Task 5")))
# sleep(3)
|
{
"content_hash": "4f8174f3ede7882ac99bd206baf51a94",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 85,
"avg_line_length": 24.35820895522388,
"alnum_prop": 0.5214460784313726,
"repo_name": "fikipollo/PySiQ",
"id": "2d5b0874f7b2b8b60e9ddde2f3275549dc50c00e",
"size": "1632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6811"
},
{
"name": "Python",
"bytes": "24102"
}
],
"symlink_target": ""
}
|
from nfsn import NearlyFreeSpeechService
from webfaction import WebFactionService
services= [WebFactionService, NearlyFreeSpeechService]
services_by_name= dict(((s.__name__,s) for s in services))
|
{
"content_hash": "bf54d34bbe9e84656a468b7923dbd16f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 58,
"avg_line_length": 39.4,
"alnum_prop": 0.8121827411167513,
"repo_name": "bkonkle/update-ip",
"id": "24f0c6c494f81e8ebac822ae3156cf6a71fdea70",
"size": "197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "update_ip/services/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22006"
}
],
"symlink_target": ""
}
|
import decimal
import os
from contextlib import contextmanager
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
from unittest.mock import patch
from configurations.values import (Value, BooleanValue, IntegerValue,
FloatValue, DecimalValue, ListValue,
TupleValue, SingleNestedTupleValue,
SingleNestedListValue, SetValue,
DictValue, URLValue, EmailValue, IPValue,
RegexValue, PathValue, SecretValue,
DatabaseURLValue, EmailURLValue,
CacheURLValue, BackendsValue,
CastingMixin, SearchURLValue,
setup_value, PositiveIntegerValue)
@contextmanager
def env(**kwargs):
with patch.dict(os.environ, clear=True, **kwargs):
yield
class FailingCasterValue(CastingMixin, Value):
caster = 'non.existing.caster'
class ValueTests(TestCase):
def test_value_with_default(self):
value = Value('default', environ=False)
self.assertEqual(type(value), type('default'))
self.assertEqual(value, 'default')
self.assertEqual(str(value), 'default')
def test_value_with_default_and_late_binding(self):
value = Value('default', environ=False, late_binding=True)
self.assertEqual(type(value), Value)
with env(DJANGO_TEST='override'):
self.assertEqual(value.setup('TEST'), 'default')
value = Value(environ_name='TEST')
self.assertEqual(type(value), type('override'))
self.assertEqual(value, 'override')
self.assertEqual(str(value), 'override')
self.assertEqual('{0}'.format(value), 'override')
self.assertEqual('%s' % value, 'override')
value = Value(environ_name='TEST', late_binding=True)
self.assertEqual(type(value), Value)
self.assertEqual(value.value, 'override')
self.assertEqual(str(value), 'override')
self.assertEqual('{0}'.format(value), 'override')
self.assertEqual('%s' % value, 'override')
self.assertEqual(repr(value), repr('override'))
def test_value_truthy(self):
value = Value('default')
self.assertTrue(bool(value))
def test_value_falsey(self):
value = Value()
self.assertFalse(bool(value))
@patch.dict(os.environ, clear=True, DJANGO_TEST='override')
def test_env_var(self):
value = Value('default')
self.assertEqual(value.setup('TEST'), 'override')
self.assertEqual(str(value), 'override')
self.assertNotEqual(value.setup('TEST'), value.default)
self.assertEqual(value.to_python(os.environ['DJANGO_TEST']),
value.setup('TEST'))
def test_value_reuse(self):
value1 = Value('default')
value2 = Value(value1)
self.assertEqual(value1.setup('TEST1'), 'default')
self.assertEqual(value2.setup('TEST2'), 'default')
with env(DJANGO_TEST1='override1', DJANGO_TEST2='override2'):
self.assertEqual(value1.setup('TEST1'), 'override1')
self.assertEqual(value2.setup('TEST2'), 'override2')
def test_value_var_equal(self):
value1 = Value('default')
value2 = Value('default')
self.assertEqual(value1, value2)
self.assertTrue(value1 in ['default'])
def test_env_var_prefix(self):
with patch.dict(os.environ, clear=True, ACME_TEST='override'):
value = Value('default', environ_prefix='ACME')
self.assertEqual(value.setup('TEST'), 'override')
with patch.dict(os.environ, clear=True, TEST='override'):
value = Value('default', environ_prefix='')
self.assertEqual(value.setup('TEST'), 'override')
with patch.dict(os.environ, clear=True, ACME_TEST='override'):
value = Value('default', environ_prefix='ACME_')
self.assertEqual(value.setup('TEST'), 'override')
def test_boolean_values_true(self):
value = BooleanValue(False)
for truthy in value.true_values:
with env(DJANGO_TEST=truthy):
self.assertTrue(bool(value.setup('TEST')))
def test_boolean_values_faulty(self):
self.assertRaises(ValueError, BooleanValue, 'false')
def test_boolean_values_false(self):
value = BooleanValue(True)
for falsy in value.false_values:
with env(DJANGO_TEST=falsy):
self.assertFalse(bool(value.setup('TEST')))
def test_boolean_values_nonboolean(self):
value = BooleanValue(True)
with env(DJANGO_TEST='nonboolean'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_boolean_values_assign_false_to_another_booleanvalue(self):
value1 = BooleanValue(False)
value2 = BooleanValue(value1)
self.assertFalse(value1.setup('TEST1'))
self.assertFalse(value2.setup('TEST2'))
def test_integer_values(self):
value = IntegerValue(1)
with env(DJANGO_TEST='2'):
self.assertEqual(value.setup('TEST'), 2)
with env(DJANGO_TEST='noninteger'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_positive_integer_values(self):
value = PositiveIntegerValue(1)
with env(DJANGO_TEST='2'):
self.assertEqual(value.setup('TEST'), 2)
with env(DJANGO_TEST='noninteger'):
self.assertRaises(ValueError, value.setup, 'TEST')
with env(DJANGO_TEST='-1'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_float_values(self):
value = FloatValue(1.0)
with env(DJANGO_TEST='2.0'):
self.assertEqual(value.setup('TEST'), 2.0)
with env(DJANGO_TEST='noninteger'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_decimal_values(self):
value = DecimalValue(decimal.Decimal(1))
with env(DJANGO_TEST='2'):
self.assertEqual(value.setup('TEST'), decimal.Decimal(2))
with env(DJANGO_TEST='nondecimal'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_failing_caster(self):
self.assertRaises(ImproperlyConfigured, FailingCasterValue)
def test_list_values_default(self):
value = ListValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ['2', '2'])
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), ['2', '2'])
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), [])
def test_list_values_separator(self):
value = ListValue(separator=':')
with env(DJANGO_TEST='/usr/bin:/usr/sbin:/usr/local/bin'):
self.assertEqual(value.setup('TEST'),
['/usr/bin', '/usr/sbin', '/usr/local/bin'])
def test_List_values_converter(self):
value = ListValue(converter=int)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), [2, 2])
value = ListValue(converter=float)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), [2.0, 2.0])
def test_list_values_custom_converter(self):
value = ListValue(converter=lambda x: x * 2)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ['22', '22'])
def test_list_values_converter_exception(self):
value = ListValue(converter=int)
with env(DJANGO_TEST='2,b'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_tuple_values_default(self):
value = TupleValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ('2', '2'))
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), ('2', '2'))
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), ())
def test_single_nested_list_values_default(self):
value = SingleNestedListValue()
with env(DJANGO_TEST='2,3;4,5'):
expected = [['2', '3'], ['4', '5']]
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2;3;4;5'):
expected = [['2'], ['3'], ['4'], ['5']]
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2,3,4,5'):
expected = [['2', '3', '4', '5']]
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2, 3 , ; 4 , 5 ; '):
expected = [['2', '3'], ['4', '5']]
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), [])
def test_single_nested_list_values_separator(self):
value = SingleNestedListValue(seq_separator=':')
with env(DJANGO_TEST='2,3:4,5'):
self.assertEqual(value.setup('TEST'), [['2', '3'], ['4', '5']])
def test_single_nested_list_values_converter(self):
value = SingleNestedListValue(converter=int)
with env(DJANGO_TEST='2,3;4,5'):
self.assertEqual(value.setup('TEST'), [[2, 3], [4, 5]])
def test_single_nested_list_values_converter_default(self):
value = SingleNestedListValue([['2', '3'], ['4', '5']], converter=int)
self.assertEqual(value.value, [[2, 3], [4, 5]])
def test_single_nested_tuple_values_default(self):
value = SingleNestedTupleValue()
with env(DJANGO_TEST='2,3;4,5'):
expected = (('2', '3'), ('4', '5'))
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2;3;4;5'):
expected = (('2',), ('3',), ('4',), ('5',))
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2,3,4,5'):
expected = (('2', '3', '4', '5'),)
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2, 3 , ; 4 , 5 ; '):
expected = (('2', '3'), ('4', '5'))
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), ())
def test_single_nested_tuple_values_separator(self):
value = SingleNestedTupleValue(seq_separator=':')
with env(DJANGO_TEST='2,3:4,5'):
self.assertEqual(value.setup('TEST'), (('2', '3'), ('4', '5')))
def test_single_nested_tuple_values_converter(self):
value = SingleNestedTupleValue(converter=int)
with env(DJANGO_TEST='2,3;4,5'):
self.assertEqual(value.setup('TEST'), ((2, 3), (4, 5)))
def test_single_nested_tuple_values_converter_default(self):
value = SingleNestedTupleValue((('2', '3'), ('4', '5')), converter=int)
self.assertEqual(value.value, ((2, 3), (4, 5)))
def test_set_values_default(self):
value = SetValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), {'2', '2'})
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), {'2', '2'})
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), set())
def test_dict_values_default(self):
value = DictValue()
with env(DJANGO_TEST='{2: 2}'):
self.assertEqual(value.setup('TEST'), {2: 2})
expected = {2: 2, '3': '3', '4': [1, 2, 3]}
with env(DJANGO_TEST="{2: 2, '3': '3', '4': [1, 2, 3]}"):
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST="""{
2: 2,
'3': '3',
'4': [1, 2, 3],
}"""):
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), {})
with env(DJANGO_TEST='spam'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_email_values(self):
value = EmailValue('spam@eg.gs')
with env(DJANGO_TEST='spam@sp.am'):
self.assertEqual(value.setup('TEST'), 'spam@sp.am')
with env(DJANGO_TEST='spam'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_url_values(self):
value = URLValue('http://eggs.spam')
with env(DJANGO_TEST='http://spam.eggs'):
self.assertEqual(value.setup('TEST'), 'http://spam.eggs')
with env(DJANGO_TEST='httb://spam.eggs'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_url_values_with_no_default(self):
value = URLValue() # no default
with env(DJANGO_TEST='http://spam.eggs'):
self.assertEqual(value.setup('TEST'), 'http://spam.eggs')
def test_url_values_with_wrong_default(self):
self.assertRaises(ValueError, URLValue, 'httb://spam.eggs')
def test_ip_values(self):
value = IPValue('0.0.0.0')
with env(DJANGO_TEST='127.0.0.1'):
self.assertEqual(value.setup('TEST'), '127.0.0.1')
with env(DJANGO_TEST='::1'):
self.assertEqual(value.setup('TEST'), '::1')
with env(DJANGO_TEST='spam.eggs'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_regex_values(self):
value = RegexValue('000--000', regex=r'\d+--\d+')
with env(DJANGO_TEST='123--456'):
self.assertEqual(value.setup('TEST'), '123--456')
with env(DJANGO_TEST='123456'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_path_values_with_check(self):
value = PathValue()
with env(DJANGO_TEST='/'):
self.assertEqual(value.setup('TEST'), '/')
with env(DJANGO_TEST='~/'):
self.assertEqual(value.setup('TEST'), os.path.expanduser('~'))
with env(DJANGO_TEST='/does/not/exist'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_path_values_no_check(self):
value = PathValue(check_exists=False)
with env(DJANGO_TEST='/'):
self.assertEqual(value.setup('TEST'), '/')
with env(DJANGO_TEST='~/spam/eggs'):
self.assertEqual(value.setup('TEST'),
os.path.join(os.path.expanduser('~'),
'spam', 'eggs'))
with env(DJANGO_TEST='/does/not/exist'):
self.assertEqual(value.setup('TEST'), '/does/not/exist')
def test_secret_value(self):
# no default allowed, only environment values are
self.assertRaises(ValueError, SecretValue, 'default')
value = SecretValue()
self.assertRaises(ValueError, value.setup, 'TEST')
with env(DJANGO_SECRET_KEY='123'):
self.assertEqual(value.setup('SECRET_KEY'), '123')
value = SecretValue(environ_name='FACEBOOK_API_SECRET',
environ_prefix=None,
late_binding=True)
self.assertRaises(ValueError, value.setup, 'TEST')
with env(FACEBOOK_API_SECRET='123'):
self.assertEqual(value.setup('TEST'), '123')
def test_database_url_value(self):
value = DatabaseURLValue()
self.assertEqual(value.default, {})
with env(DATABASE_URL='sqlite://'):
self.assertEqual(value.setup('DATABASE_URL'), {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'django.db.backends.sqlite3',
'HOST': '',
'NAME': ':memory:',
'PASSWORD': '',
'PORT': '',
'USER': '',
}})
def test_database_url_additional_args(self):
def mock_database_url_caster(self, url, engine=None):
return {'URL': url, 'ENGINE': engine}
with patch('configurations.values.DatabaseURLValue.caster',
mock_database_url_caster):
value = DatabaseURLValue(
engine='django_mysqlpool.backends.mysqlpool')
with env(DATABASE_URL='sqlite://'):
self.assertEqual(value.setup('DATABASE_URL'), {
'default': {
'URL': 'sqlite://',
'ENGINE': 'django_mysqlpool.backends.mysqlpool'
}
})
def test_email_url_value(self):
value = EmailURLValue()
self.assertEqual(value.default, {})
with env(EMAIL_URL='smtps://user@domain.com:password@smtp.example.com:587'): # noqa: E501
self.assertEqual(value.setup('EMAIL_URL'), {
'EMAIL_BACKEND': 'django.core.mail.backends.smtp.EmailBackend',
'EMAIL_FILE_PATH': '',
'EMAIL_HOST': 'smtp.example.com',
'EMAIL_HOST_PASSWORD': 'password',
'EMAIL_HOST_USER': 'user@domain.com',
'EMAIL_PORT': 587,
'EMAIL_USE_SSL': False,
'EMAIL_USE_TLS': True})
with env(EMAIL_URL='console://'):
self.assertEqual(value.setup('EMAIL_URL'), {
'EMAIL_BACKEND': 'django.core.mail.backends.console.EmailBackend', # noqa: E501
'EMAIL_FILE_PATH': '',
'EMAIL_HOST': None,
'EMAIL_HOST_PASSWORD': None,
'EMAIL_HOST_USER': None,
'EMAIL_PORT': None,
'EMAIL_USE_SSL': False,
'EMAIL_USE_TLS': False})
with env(EMAIL_URL='smtps://user@domain.com:password@smtp.example.com:wrong'): # noqa: E501
self.assertRaises(ValueError, value.setup, 'TEST')
def test_cache_url_value(self):
cache_setting = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://host:6379/1',
}
}
cache_url = 'redis://user@host:6379/1'
value = CacheURLValue(cache_url)
self.assertEqual(value.default, cache_setting)
value = CacheURLValue()
self.assertEqual(value.default, {})
with env(CACHE_URL='redis://user@host:6379/1'):
self.assertEqual(value.setup('CACHE_URL'), cache_setting)
with env(CACHE_URL='wrong://user@host:port/1'):
with self.assertRaises(Exception) as cm:
value.setup('TEST')
self.assertEqual(cm.exception.args[0], 'Unknown backend: "wrong"')
with env(CACHE_URL='redis://user@host:port/1'):
with self.assertRaises(ValueError) as cm:
value.setup('TEST')
self.assertEqual(
cm.exception.args[0],
"Cannot interpret cache URL value 'redis://user@host:port/1'")
def test_search_url_value(self):
value = SearchURLValue()
self.assertEqual(value.default, {})
with env(SEARCH_URL='elasticsearch://127.0.0.1:9200/index'):
self.assertEqual(value.setup('SEARCH_URL'), {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine', # noqa: E501
'URL': 'http://127.0.0.1:9200',
'INDEX_NAME': 'index',
}})
def test_backend_list_value(self):
backends = ['django.middleware.common.CommonMiddleware']
value = BackendsValue(backends)
self.assertEqual(value.setup('TEST'), backends)
backends = ['non.existing.Backend']
self.assertRaises(ValueError, BackendsValue, backends)
def test_tuple_value(self):
value = TupleValue(None)
self.assertEqual(value.default, ())
self.assertEqual(value.value, ())
value = TupleValue((1, 2))
self.assertEqual(value.default, (1, 2))
self.assertEqual(value.value, (1, 2))
def test_set_value(self):
value = SetValue()
self.assertEqual(value.default, set())
self.assertEqual(value.value, set())
value = SetValue([1, 2])
self.assertEqual(value.default, {1, 2})
self.assertEqual(value.value, {1, 2})
def test_setup_value(self):
class Target:
pass
value = EmailURLValue()
with env(EMAIL_URL='smtps://user@domain.com:password@smtp.example.com:587'): # noqa: E501
setup_value(Target, 'EMAIL', value)
self.assertEqual(Target.EMAIL, {
'EMAIL_BACKEND': 'django.core.mail.backends.smtp.EmailBackend',
'EMAIL_FILE_PATH': '',
'EMAIL_HOST': 'smtp.example.com',
'EMAIL_HOST_PASSWORD': 'password',
'EMAIL_HOST_USER': 'user@domain.com',
'EMAIL_PORT': 587,
'EMAIL_USE_SSL': False,
'EMAIL_USE_TLS': True
})
self.assertEqual(
Target.EMAIL_BACKEND,
'django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(Target.EMAIL_FILE_PATH, '')
self.assertEqual(Target.EMAIL_HOST, 'smtp.example.com')
self.assertEqual(Target.EMAIL_HOST_PASSWORD, 'password')
self.assertEqual(Target.EMAIL_HOST_USER, 'user@domain.com')
self.assertEqual(Target.EMAIL_PORT, 587)
self.assertEqual(Target.EMAIL_USE_TLS, True)
|
{
"content_hash": "81e7574236ba51a63580f94119bdc2bf",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 112,
"avg_line_length": 41.59574468085106,
"alnum_prop": 0.5622878400372007,
"repo_name": "jezdez/django-configurations",
"id": "2547e509e2e4ecf859e2b522e867d39fcf1cbaef",
"size": "21505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_values.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "75424"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
import sys
import os
import struct
from collections import namedtuple
import io
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('rootdir', type=str, help='the path to rootfs')
parser.add_argument('output', type=argparse.FileType('wb'), nargs='?', help='output file name')
parser.add_argument('--dump', action='store_true', help='dump the fs hierarchy')
parser.add_argument('--binary', action='store_true', help='output binary file')
parser.add_argument('--addr', default='0', help='set the base address of the binary file, default to 0.')
class File(object):
def __init__(self, name):
self._name = name
self._data = open(name, 'rb').read()
@property
def name(self):
return self._name
@property
def c_name(self):
return '_' + self._name.replace('.', '_')
@property
def bin_name(self):
# Pad to 4 bytes boundary with \0
pad_len = 4
bn = self._name + '\0' * (pad_len - len(self._name) % pad_len)
return bn
def c_data(self, prefix=''):
'''Get the C code represent of the file content.'''
head = 'static const rt_uint8_t %s[] = {\n' % \
(prefix + self.c_name)
tail = '\n};'
if self.entry_size == 0:
return ''
if len(self._data) > 0 and type(self._data[0]) == int:
return head + ','.join(('0x%02x' % i for i in self._data)) + tail
else:
return head + ','.join(('0x%02x' % ord(i) for i in self._data)) + tail
@property
def entry_size(self):
return len(self._data)
def bin_data(self, base_addr=0x0):
return bytes(self._data)
def dump(self, indent=0):
print('%s%s' % (' ' * indent, self._name))
class Folder(object):
bin_fmt = struct.Struct('IIII')
bin_item = namedtuple('dirent', 'type, name, data, size')
def __init__(self, name):
self._name = name
self._children = []
@property
def name(self):
return self._name
@property
def c_name(self):
# add _ to avoid conflict with C key words.
return '_' + self._name
@property
def bin_name(self):
# Pad to 4 bytes boundary with \0
pad_len = 4
bn = self._name + '\0' * (pad_len - len(self._name) % pad_len)
return bn
def walk(self):
# os.listdir will return unicode list if the argument is unicode.
# TODO: take care of the unicode names
for ent in os.listdir(u'.'):
if os.path.isdir(ent):
cwd = os.getcwd()
d = Folder(ent)
# depth-first
os.chdir(os.path.join(cwd, ent))
d.walk()
# restore the cwd
os.chdir(cwd)
self._children.append(d)
else:
self._children.append(File(ent))
def sort(self):
def _sort(x, y):
if x.name == y.name:
return 0
elif x.name > y.name:
return 1
else:
return -1
from functools import cmp_to_key
self._children.sort(key=cmp_to_key(_sort))
# sort recursively
for c in self._children:
if isinstance(c, Folder):
c.sort()
def dump(self, indent=0):
print('%s%s' % (' ' * indent, self._name))
for c in self._children:
c.dump(indent + 1)
def c_data(self, prefix=''):
'''get the C code represent of the folder.
It is recursive.'''
# make the current dirent
# static is good. Only root dirent is global visible.
if self.entry_size == 0:
return ''
dhead = 'static const struct romfs_dirent %s[] = {\n' % (prefix + self.c_name)
dtail = '\n};'
body_fmt = ' {{{type}, "{name}", (rt_uint8_t *){data}, sizeof({data})/sizeof({data}[0])}}'
body_fmt0= ' {{{type}, "{name}", RT_NULL, 0}}'
# prefix of children
cpf = prefix+self.c_name
body_li = []
payload_li = []
for c in self._children:
entry_size = c.entry_size
if isinstance(c, File):
tp = 'ROMFS_DIRENT_FILE'
elif isinstance(c, Folder):
tp = 'ROMFS_DIRENT_DIR'
else:
assert False, 'Unkown instance:%s' % str(c)
if entry_size == 0:
body_li.append(body_fmt0.format(type=tp, name = c.name))
else:
body_li.append(body_fmt.format(type=tp,
name=c.name,
data=cpf+c.c_name))
payload_li.append(c.c_data(prefix=cpf))
# All the data we need is defined in payload so we should append the
# dirent to it. It also meet the depth-first policy in this code.
payload_li.append(dhead + ',\n'.join(body_li) + dtail)
return '\n\n'.join(payload_li)
@property
def entry_size(self):
return len(self._children)
def bin_data(self, base_addr=0x0):
'''Return StringIO object'''
# The binary layout is different from the C code layout. We put the
# dirent before the payload in this mode. But the idea is still simple:
# Depth-First.
#{
# rt_uint32_t type;
# const char *name;
# const rt_uint8_t *data;
# rt_size_t size;
#}
d_li = []
# payload base
p_base = base_addr + self.bin_fmt.size * self.entry_size
# the length to record how many data is in
v_len = p_base
# payload
p_li = []
for c in self._children:
if isinstance(c, File):
# ROMFS_DIRENT_FILE
tp = 0
elif isinstance(c, Folder):
# ROMFS_DIRENT_DIR
tp = 1
else:
assert False, 'Unkown instance:%s' % str(c)
name = bytes(c.bin_name)
name_addr = v_len
v_len += len(name)
data = c.bin_data(base_addr=v_len)
data_addr = v_len
# pad the data to 4 bytes boundary
pad_len = 4
if len(data) % pad_len != 0:
data += '\0' * (pad_len - len(data) % pad_len)
v_len += len(data)
d_li.append(self.bin_fmt.pack(*self.bin_item(
type=tp,
name=name_addr,
data=data_addr,
size=c.entry_size)))
p_li.extend((name, data))
return bytes().join(d_li) + bytes().join(p_li)
def get_c_data(tree):
# Handle the root dirent specially.
root_dirent_fmt = '''/* Generated by mkromfs. Edit with caution. */
#include <rtthread.h>
#include <dfs_romfs.h>
{data}
const struct romfs_dirent {name} = {{
ROMFS_DIRENT_DIR, "/", (rt_uint8_t *){rootdirent}, sizeof({rootdirent})/sizeof({rootdirent}[0])
}};
'''
return root_dirent_fmt.format(name='romfs_root',
rootdirent=tree.c_name,
data=tree.c_data())
def get_bin_data(tree, base_addr):
v_len = base_addr + Folder.bin_fmt.size
name = bytes('/\0\0\0')
name_addr = v_len
v_len += len(name)
data_addr = v_len
# root entry
data = Folder.bin_fmt.pack(*Folder.bin_item(type=1,
name=name_addr,
data=data_addr,
size=tree.entry_size))
return data + name + tree.bin_data(v_len)
if __name__ == '__main__':
args = parser.parse_args()
os.chdir(args.rootdir)
tree = Folder('romfs_root')
tree.walk()
tree.sort()
if args.dump:
tree.dump()
if args.binary:
data = get_bin_data(tree, int(args.addr, 16))
else:
data = get_c_data(tree).encode()
output = args.output
if not output:
output = sys.stdout
output.write(data)
|
{
"content_hash": "556476365f574091df9775d65bc5e3a6",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 105,
"avg_line_length": 30.913857677902623,
"alnum_prop": 0.4986673128180276,
"repo_name": "armink/rt-thread",
"id": "9f4f24d0d67953ec783bccf4ff2a8d861682ace2",
"size": "8254",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/mkromfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "15182128"
},
{
"name": "Batchfile",
"bytes": "183497"
},
{
"name": "C",
"bytes": "698243428"
},
{
"name": "C++",
"bytes": "684162"
},
{
"name": "CMake",
"bytes": "139410"
},
{
"name": "CSS",
"bytes": "22422"
},
{
"name": "GDB",
"bytes": "11796"
},
{
"name": "HTML",
"bytes": "1509964"
},
{
"name": "JavaScript",
"bytes": "637"
},
{
"name": "LLVM",
"bytes": "10344"
},
{
"name": "Lex",
"bytes": "7026"
},
{
"name": "Logos",
"bytes": "7238"
},
{
"name": "M4",
"bytes": "17515"
},
{
"name": "Makefile",
"bytes": "444599"
},
{
"name": "Pawn",
"bytes": "1427"
},
{
"name": "Perl",
"bytes": "16728"
},
{
"name": "Python",
"bytes": "2028276"
},
{
"name": "RPC",
"bytes": "14162"
},
{
"name": "Rich Text Format",
"bytes": "177701"
},
{
"name": "Shell",
"bytes": "415474"
},
{
"name": "Tcl",
"bytes": "179"
},
{
"name": "Yacc",
"bytes": "30555"
}
],
"symlink_target": ""
}
|
import unittest
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable, List, Optional, Union
from huggingface_hub.utils import filter_repo_objects
@dataclass
class DummyObject:
path: Path
DUMMY_FILES = ["not_hidden.pdf", "profile.jpg", ".hidden.pdf", ".hidden_picture.png"]
DUMMY_PATHS = [Path(path) for path in DUMMY_FILES]
DUMMY_OBJECTS = [DummyObject(path=path) for path in DUMMY_FILES]
class TestPathsUtils(unittest.TestCase):
def test_get_all_pdfs(self) -> None:
"""Get all PDFs even hidden ones."""
self._check(
items=DUMMY_FILES,
expected_items=["not_hidden.pdf", ".hidden.pdf"],
allow_patterns=["*.pdf"],
)
def test_get_all_pdfs_except_hidden(self) -> None:
"""Get all PDFs except hidden ones."""
self._check(
items=DUMMY_FILES,
expected_items=["not_hidden.pdf"],
allow_patterns=["*.pdf"],
ignore_patterns=[".*"],
)
def test_get_all_pdfs_except_hidden_using_single_pattern(self) -> None:
"""Get all PDFs except hidden ones, using single pattern."""
self._check(
items=DUMMY_FILES,
expected_items=["not_hidden.pdf"],
allow_patterns="*.pdf", # not a list
ignore_patterns=".*", # not a list
)
def test_get_all_images(self) -> None:
"""Get all images."""
self._check(
items=DUMMY_FILES,
expected_items=["profile.jpg", ".hidden_picture.png"],
allow_patterns=["*.png", "*.jpg"],
)
def test_get_all_images_except_hidden_from_paths(self) -> None:
"""Get all images except hidden ones, from Path list."""
self._check(
items=DUMMY_PATHS,
expected_items=[Path("profile.jpg")],
allow_patterns=["*.png", "*.jpg"],
ignore_patterns=".*",
)
def test_get_all_images_except_hidden_from_objects(self) -> None:
"""Get all images except hidden ones, from object list."""
self._check(
items=DUMMY_OBJECTS,
expected_items=[DummyObject(path="profile.jpg")],
allow_patterns=["*.png", "*.jpg"],
ignore_patterns=".*",
key=lambda x: x.path,
)
def test_filter_objects_key_not_provided(self) -> None:
"""Test ValueError is raised if filtering non-string objects."""
with self.assertRaisesRegex(ValueError, "Please provide `key` argument"):
list(
filter_repo_objects(
items=DUMMY_OBJECTS,
allow_patterns=["*.png", "*.jpg"],
ignore_patterns=".*",
)
)
def _check(
self,
items: List[Any],
expected_items: List[Any],
allow_patterns: Optional[Union[List[str], str]] = None,
ignore_patterns: Optional[Union[List[str], str]] = None,
key: Optional[Callable[[Any], str]] = None,
) -> None:
"""Run `filter_repo_objects` and check output against expected result."""
self.assertListEqual(
list(
filter_repo_objects(
items=items,
allow_patterns=allow_patterns,
ignore_patterns=ignore_patterns,
key=key,
)
),
expected_items,
)
|
{
"content_hash": "f3deb19c1cbed12bc360fa650cc41700",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 85,
"avg_line_length": 33.55339805825243,
"alnum_prop": 0.5439814814814815,
"repo_name": "huggingface/huggingface_hub",
"id": "8c968ad0f3d1755b2463898e9f8390727820bc64",
"size": "3456",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_utils_paths.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "338"
},
{
"name": "Python",
"bytes": "1086946"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('occurrence', '0003_auto_20181022_1215'),
]
operations = [
migrations.AlterModelOptions(
name='areaencounter',
options={'ordering': ['-northern_extent', 'name'], 'verbose_name': 'Area Encounter', 'verbose_name_plural': 'Area Encounters'},
),
migrations.AlterField(
model_name='areaencounter',
name='code',
field=models.CharField(blank=True, help_text='A URL-safe, short code. Multiple records of the same Area will be recognised by the same area type and code.', max_length=1000, null=True, verbose_name='Code'),
),
]
|
{
"content_hash": "b0357ec1fb12e6db12621885d1c660c0",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 218,
"avg_line_length": 36.8,
"alnum_prop": 0.623641304347826,
"repo_name": "parksandwildlife/wastd",
"id": "aeb9e128b13c883d71d4de59445e768ae7bd5028",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "occurrence/migrations/0004_auto_20181022_1250.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9150"
},
{
"name": "HTML",
"bytes": "60851"
},
{
"name": "JavaScript",
"bytes": "18966"
},
{
"name": "Python",
"bytes": "853568"
},
{
"name": "Shell",
"bytes": "4200"
},
{
"name": "TeX",
"bytes": "16951"
}
],
"symlink_target": ""
}
|
'''
Set manipulation routines for the LEMON parser generator.
'''
size = 0
def SetSize(n):
'''Set the set size.'''
global size
size = n + 1
return
def SetNew():
'''Allocate a new set.'''
return [False] * size
def SetAdd(s, e):
'''Add a new element to the set. Return True if the element was
added and False if it was already there.
'''
rv = s[e]
s[e] = True
return not rv
def SetUnion(s1, s2):
'''Add every element of s2 to s1. Return True if s1 changes.'''
progress = False
for i in range(size):
if not s2[i]:
continue
if not s1[i]:
progress = True
s1[i] = True
return progress
def SetFind(X, Y):
'''True if Y is in set X.'''
return X[Y]
|
{
"content_hash": "381b022c613988966836ca0c42c574bd",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 68,
"avg_line_length": 17.65909090909091,
"alnum_prop": 0.5521235521235521,
"repo_name": "probcomp/bayeslite",
"id": "38e9feb54e848e324827de4a913b3cb7b40dc3dc",
"size": "777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "external/lemonade/dist/lemonade/set.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2007"
},
{
"name": "Python",
"bytes": "1079798"
},
{
"name": "Shell",
"bytes": "2560"
},
{
"name": "Yacc",
"bytes": "42578"
}
],
"symlink_target": ""
}
|
"""
Criado em 08 de Novembro de 2016
Descricao: este codigo reune todas as bibliotecas responsaveis para a caracterizacao do ganho do CCD, sao elas:
plotGraph, logfile, makeList_imagesInput e Gain_processesImages. O codigo ira criar duas listas de imagens: flat e bias,
realizando o calculo da instensidade do sinal em funcao da variancia. Esses dados serao usado na plotagem de um grafico linear
e, por meio do coeficiente angular de um ajuste linear calculado, obtem-se o ganho; um segundo grafico e plotado onde
aparece o resultado da subtracao dos dados obtidos pelos valores de um ajuste linear calculado.
@author: Denis Varise Bernardes & Eder Martioli
Laboratorio Nacional de Astrofisica, Brazil.
example: ./ganhoCompleto.py -f'Flat','nImages' -b'Bias'
Esta lista fornecida ao programa deve conter as imagens de bias e as imagens flat associdas em conjunto na forma
biasA,biasB,flatA,flatB.
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
import os, sys
import matplotlib.pyplot as plt
import datetime
from optparse import OptionParser
from plotGraph import Graph_sinal_variance, Graph_residuos
from logfile import logfile
from makeList_imagesInput import criaArq_listaImgInput, LeArquivoReturnLista
from Gain_processesImages import calcXY_YerrorBar_XerrorBar, parametrosCaixaPixels
from criaArq_resultadoCaract import arquivoCaract
from astropy.io import fits
numeroImagens = 5
Flat_name = 'Flat'
Bias_name = 'Bias'
images_path = r'C:\Users\observer\Desktop\Imagens_ECC\Gain'
criaArq_listaImgInput(numeroImagens, Flat_name, images_path)
criaArq_listaImgInput(1, Bias_name, images_path)
listaBias = LeArquivoReturnLista(Bias_name+'list.txt', images_path)
listaFlat = LeArquivoReturnLista(Flat_name+'list.txt', images_path)
#----------------------------------------------------------------------------------------------------------------------
caixa_pixels = '512,512,100'
parametersBox = parametrosCaixaPixels(caixa_pixels, listaFlat[0])
X,Y,SigmaTotal, XsigmaBar, sigmaBias = calcXY_YerrorBar_XerrorBar(listaFlat, listaBias, numeroImagens, parametersBox, images_path)
plt.figure(figsize=(17,8))
ganho = Graph_sinal_variance(X,Y,SigmaTotal, XsigmaBar, sigmaBias)
Graph_residuos(X,Y, SigmaTotal, images_path)
|
{
"content_hash": "342b4dc5a0e3f2ea0546af7c81727565",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 130,
"avg_line_length": 38.31147540983606,
"alnum_prop": 0.730423620025674,
"repo_name": "DBernardes/ProjetoECC",
"id": "e9cb6deca9817965a4ae07ac1657dc59c6ef5c27",
"size": "2380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ganho/Codigo/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "152340"
}
],
"symlink_target": ""
}
|
"""URL constructor class to generate valid API request URLs."""
from __future__ import absolute_import, division, print_function
class URLBuilder(object):
"""Handles generating valid URLs to requests resources from DevRant."""
# pylint: disable=too-many-instance-attributes,too-many-function-args
def __init__(self):
"""Initialize class instance."""
self.app_version = 3
self.base_url = "https://www.devrant.com/api/devrant"
self.rants_url = "%s/rants?sort=%s&limit=%d&skip=%d&app=%d"
self.weekly_rants_url = "%s/weekly-rants?sort=%s&skip=%d&app=%d"
self.rant_url = "%s/rants/%d?app=%d"
self.search_url = "%s/search?term=%s&app=%d"
self.collabs_url = "%s/collabs?app=%d&skip=%d&limit=%d"
self.sort_top = "top"
self.sort_algo = "algo"
self.sort_recent = "recent"
self.valid_sorts = [self.sort_algo, self.sort_recent, self.sort_top]
def get_rants_url(self, sort, limit, skip):
"""Generate a request URL to get available rants."""
sort = self.validate_sort_input(sort)
limit = self.validate_int_input(limit)
skip = self.validate_int_input(skip)
return self.rants_url % (self.base_url, sort, limit, skip, self.app_version)
def get_rant_by_id_url(self, rant_id):
"""Generate a request URL to get a rant by its id."""
return self.rant_url % (self.base_url, rant_id, self.app_version)
def get_weekly_rant_url(self, sort, skip):
"""Generate a request URL to get the weekly rants."""
sort = self.validate_sort_input(sort)
skip = self.validate_int_input(skip)
return self.weekly_rants_url % (self.base_url, sort, skip, self.app_version)
def search_rants_by_keywords(self, keyword):
"""Generate a request URL to search rants by keywords."""
return self.search_url % (self.base_url, keyword, self.app_version)
def get_collabs_url(self, skip, limit):
"""Generate a request URL to get available collabs."""
limit = self.validate_int_input(limit)
skip = self.validate_int_input(skip)
return self.collabs_url % (self.base_url, self.app_version, skip, limit)
def validate_sort_input(self, sort_type):
"""Validate that input for sort has proper type."""
if sort_type == "":
sort_type = "top"
elif sort_type not in self.valid_sorts:
raise ValueError("Invalid Sort type")
return sort_type
@staticmethod
def validate_int_input(some_int):
"""Validate that integer is not negative."""
if some_int >= 0:
return some_int
raise ValueError("Positive integer required")
|
{
"content_hash": "dc6be959bae2bf8ade7111eddcf401b1",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 84,
"avg_line_length": 41.25757575757576,
"alnum_prop": 0.625413147264047,
"repo_name": "aayush26/pirant",
"id": "b4337942f6d17b6e9ed8583f22b34d68d71f9bf7",
"size": "2723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pirant/urlbuilder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47448"
}
],
"symlink_target": ""
}
|
"""
==============================
Generate simulated evoked data
==============================
"""
# Author: Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.time_frequency import fit_iir_model_raw
from mne.viz import plot_sparse_source_estimates
from mne.simulation import simulate_sparse_stc, simulate_evoked
print(__doc__)
###############################################################################
# Load real data as templates
data_path = sample.data_path()
raw = mne.io.read_raw_fif(data_path + '/MEG/sample/sample_audvis_raw.fif')
proj = mne.read_proj(data_path + '/MEG/sample/sample_audvis_ecg-proj.fif')
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = mne.read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
fwd = mne.pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = mne.read_cov(cov_fname)
info = mne.io.read_info(ave_fname)
label_names = ['Aud-lh', 'Aud-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
for ln in label_names]
###############################################################################
# Generate source time courses from 2 dipoles and the correspond evoked data
times = np.arange(300, dtype=np.float) / raw.info['sfreq'] - 0.1
rng = np.random.RandomState(42)
def data_fun(times):
"""Function to generate random source time courses"""
return (1e-9 * np.sin(30. * times) *
np.exp(- (times - 0.15 + 0.05 * rng.randn(1)) ** 2 / 0.01))
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times,
random_state=42, labels=labels, data_fun=data_fun)
###############################################################################
# Generate noisy evoked data
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
iir_filter = fit_iir_model_raw(raw, order=5, picks=picks, tmin=60, tmax=180)[1]
snr = 6. # dB
evoked = simulate_evoked(fwd, stc, info, cov, snr, iir_filter=iir_filter)
###############################################################################
# Plot
plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=True)
plt.figure()
plt.psd(evoked.data[0])
evoked.plot()
|
{
"content_hash": "8ce521fbafe4282d40e004460b26848f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 35.85333333333333,
"alnum_prop": 0.5868352547415396,
"repo_name": "wronk/mne-python",
"id": "0fab94f95b6d2672d2923d2392f183c87121744b",
"size": "2689",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "examples/simulation/plot_simulate_evoked_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3769"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5079143"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
import multiprocessing as mp, numpy as np
import signal
def doit(func, queuein, queout, kw):
signal.signal(signal.SIGINT, signal.SIG_IGN)
while True:
inp = queuein.get()
if inp is None:
break
i, p = inp
res = func(p, **kw)
queout.put((i, res))
class pool:
def __init__(self, func, kw={}, nthreads=1):
self.nthreads = nthreads
self.queuein = mp.Queue()
self.queueout = mp.Queue()
self.procs = [mp.Process(target=doit, name='xpool_%d'%i,
args=(func, self.queuein, self.queueout, kw))
for i in range(nthreads) ]
[_.start() for _ in self.procs]
self.stage = {}
def apply_async(self, i, p):
self.queuein.put((i, p))
def get(self, i):
if i in self.stage:
ret = self.stage[i]
del self.stage[i]
return ret
else:
while True:
iret, ret = self.queueout.get()
if iret != i:
self.stage[iret] = ret
else:
return ret
def get_any(self):
if len(self.stage) != 0:
i, ret = self.stage.popitem()
return i, ret
else:
iret, ret = self.queueout.get()
return iret, ret
def map(self, params):
for i, p in enumerate(params):
self.queuein.put((i, p))
ret = [None] * len(params)
for i in range(len(params)):
resi, res = self.queueout.get()
ret[resi] = res
return ret
def __del__(self):
self.join()
def join(self):
for p in self.procs:
self.queuein.put(None)
for p in self.procs:
p.join()
del self.queuein
del self.queueout
|
{
"content_hash": "7cd463260afb5cc8cd83a2f29376b4e7",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 58,
"avg_line_length": 21.507462686567163,
"alnum_prop": 0.6176266481609993,
"repo_name": "plasidu/phoenix4iraf",
"id": "7e107a8029c1067e5947c3b7288048c28d26cf7a",
"size": "1441",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astrolibpy/utils/workerpool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "155"
},
{
"name": "Python",
"bytes": "258947"
},
{
"name": "Shell",
"bytes": "139"
}
],
"symlink_target": ""
}
|
import contextlib
import io
import os
import sys
import importlib.util
import time
import struct
import zipfile
import unittest
from tempfile import TemporaryFile
from random import randint, random, getrandbits
from test.support import (TESTFN, findfile, unlink, rmtree,
requires_zlib, requires_bz2, requires_lzma,
captured_stdout, check_warnings)
TESTFN2 = TESTFN + "2"
TESTFNDIR = TESTFN + "d"
FIXEDTEST_SIZE = 1000
DATAFILES_DIR = 'zipfile_datafiles'
SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/_ziptest2', 'qawsedrftg'),
('ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
def getrandbytes(size):
return getrandbits(8 * size).to_bytes(size, 'little')
def get_files(test):
yield TESTFN2
with TemporaryFile() as f:
yield f
test.assertFalse(f.closed)
with io.BytesIO() as f:
yield f
test.assertFalse(f.closed)
def openU(zipfp, fn):
with check_warnings(('', DeprecationWarning)):
return zipfp.open(fn, 'rU')
class AbstractTestsWithSourceFile:
@classmethod
def setUpClass(cls):
cls.line_gen = [bytes("Zipfile test line %d. random float: %f\n" %
(i, random()), "ascii")
for i in range(FIXEDTEST_SIZE)]
cls.data = b''.join(cls.line_gen)
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(file=fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
self.assertEqual(b''.join(zipdata2), self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
def zip_read1_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(-1)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1(self):
for f in get_files(self):
self.zip_read1_test(f, self.compression)
def zip_read1_10_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(10)
self.assertLessEqual(len(read_data), 10)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1_10(self):
for f in get_files(self):
self.zip_read1_10_test(f, self.compression)
def zip_readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
data = b''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(100)
if not read:
break
data += read
self.assertEqual(data, self.data)
def test_readline_read(self):
# Issue #7610: calls to readline() interleaved with calls to read().
for f in get_files(self):
self.zip_readline_read_test(f, self.compression)
def zip_readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line)
def test_readline(self):
for f in get_files(self):
self.zip_readline_test(f, self.compression)
def zip_readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
ziplines = zipopen.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line)
def test_readlines(self):
for f in get_files(self):
self.zip_readlines_test(f, self.compression)
def zip_iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line, zipline in zip(self.line_gen, zipopen):
self.assertEqual(zipline, line)
def test_iterlines(self):
for f in get_files(self):
self.zip_iterlines_test(f, self.compression)
def test_low_compression(self):
"""Check for cases where compressed data is larger than original."""
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", self.compression) as zipfp:
zipfp.writestr("strfile", '12')
# Get an open object for strfile
with zipfile.ZipFile(TESTFN2, "r", self.compression) as zipfp:
with zipfp.open("strfile") as openobj:
self.assertEqual(openobj.read(1), b'1')
self.assertEqual(openobj.read(1), b'2')
def test_writestr_compression(self):
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.writestr("b.txt", "hello world", compress_type=self.compression)
info = zipfp.getinfo('b.txt')
self.assertEqual(info.compress_type, self.compression)
def test_read_return_size(self):
# Issue #9837: ZipExtFile.read() shouldn't return more bytes
# than requested.
for test_size in (1, 4095, 4096, 4097, 16384):
file_size = test_size + 1
junk = getrandbytes(file_size)
with zipfile.ZipFile(io.BytesIO(), "w", self.compression) as zipf:
zipf.writestr('foo', junk)
with zipf.open('foo', 'r') as fp:
buf = fp.read(test_size)
self.assertEqual(len(buf), test_size)
def test_truncated_zipfile(self):
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode='w') as zipf:
zipf.writestr('strfile', self.data, compress_type=self.compression)
end_offset = fp.tell()
zipfiledata = fp.getvalue()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
zipopen.read()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read(100):
pass
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read1(100):
pass
def test_repr(self):
fname = 'file.name'
for f in get_files(self):
with zipfile.ZipFile(f, 'w', self.compression) as zipfp:
zipfp.write(TESTFN, fname)
r = repr(zipfp)
self.assertIn("mode='w'", r)
with zipfile.ZipFile(f, 'r') as zipfp:
r = repr(zipfp)
if isinstance(f, str):
self.assertIn('filename=%r' % f, r)
else:
self.assertIn('file=%r' % f, r)
self.assertIn("mode='r'", r)
r = repr(zipfp.getinfo(fname))
self.assertIn('filename=%r' % fname, r)
self.assertIn('filemode=', r)
self.assertIn('file_size=', r)
if self.compression != zipfile.ZIP_STORED:
self.assertIn('compress_type=', r)
self.assertIn('compress_size=', r)
with zipfp.open(fname) as zipopen:
r = repr(zipopen)
self.assertIn('name=%r' % fname, r)
self.assertIn("mode='r'", r)
if self.compression != zipfile.ZIP_STORED:
self.assertIn('compress_type=', r)
self.assertIn('[closed]', repr(zipopen))
self.assertIn('[closed]', repr(zipfp))
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_STORED
test_low_compression = None
def zip_test_writestr_permissions(self, f, compression):
# Make sure that writestr creates files with mode 0600,
# when it is passed a name rather than a ZipInfo instance.
self.make_test_archive(f, compression)
with zipfile.ZipFile(f, "r") as zipfp:
zinfo = zipfp.getinfo('strfile')
self.assertEqual(zinfo.external_attr, 0o600 << 16)
def test_writestr_permissions(self):
for f in get_files(self):
self.zip_test_writestr_permissions(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
def test_append_to_zip_file(self):
"""Test appending to an existing zipfile."""
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("strfile", self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, "strfile"])
def test_append_to_non_zip_file(self):
"""Test appending to an existing file that is not a zipfile."""
# NOTE: this test fails if len(d) < 22 because of the first
# line "fpin.seek(-22, 2)" in _EndRecData
data = b'I am not a ZipFile!'*10
with open(TESTFN2, 'wb') as f:
f.write(data)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'rb') as f:
f.seek(len(data))
with zipfile.ZipFile(f, "r") as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
def test_ignores_newline_at_end(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("\r\n\00\00\00")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
def test_ignores_stuff_appended_past_comments(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.comment = b"this is a comment"
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("abcdef\r\n")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
self.assertEqual(zipfp.comment, b"this is a comment")
def test_write_default_name(self):
"""Check that calling ZipFile.write without arcname specified
produces the expected result."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN)
with open(TESTFN, "rb") as f:
self.assertEqual(zipfp.read(TESTFN), f.read())
def test_write_to_readonly(self):
"""Check that trying to call write() on a readonly ZipFile object
raises a RuntimeError."""
with zipfile.ZipFile(TESTFN2, mode="w") as zipfp:
zipfp.writestr("somefile.txt", "bogus")
with zipfile.ZipFile(TESTFN2, mode="r") as zipfp:
self.assertRaises(RuntimeError, zipfp.write, TESTFN)
def test_add_file_before_1980(self):
# Set atime and mtime to 1970-01-01
os.utime(TESTFN, (0, 0))
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
self.assertRaises(ValueError, zipfp.write, TESTFN)
@requires_zlib
class DeflateTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
def test_per_file_compression(self):
"""Check that files within a Zip archive can have different
compression options."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)
self.assertEqual(dinfo.compress_type, zipfile.ZIP_DEFLATED)
@requires_bz2
class Bzip2TestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class AbstractTestZip64InSmallFiles:
# These tests test the ZIP64 functionality without using large files,
# see test_zipfile64 for proper tests.
@classmethod
def setUpClass(cls):
line_gen = (bytes("Test of zipfile line %d." % i, "ascii")
for i in range(0, FIXEDTEST_SIZE))
cls.data = b'\n'.join(line_gen)
def setUp(self):
self._limit = zipfile.ZIP64_LIMIT
self._filecount_limit = zipfile.ZIP_FILECOUNT_LIMIT
zipfile.ZIP64_LIMIT = 1000
zipfile.ZIP_FILECOUNT_LIMIT = 9
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def zip_test(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression, allowZip64=True) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def test_too_many_files(self):
# This test checks that more than 64k files can be added to an archive,
# and that the resulting archive can be read properly by ZipFile
zipf = zipfile.ZipFile(TESTFN, "w", self.compression,
allowZip64=True)
zipf.debug = 100
numfiles = 15
for i in range(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf2 = zipfile.ZipFile(TESTFN, "r", self.compression)
self.assertEqual(len(zipf2.namelist()), numfiles)
for i in range(numfiles):
content = zipf2.read("foo%08d" % i).decode('ascii')
self.assertEqual(content, "%d" % (i**3 % 57))
zipf2.close()
def test_too_many_files_append(self):
zipf = zipfile.ZipFile(TESTFN, "w", self.compression,
allowZip64=False)
zipf.debug = 100
numfiles = 9
for i in range(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
with self.assertRaises(zipfile.LargeZipFile):
zipf.writestr("foo%08d" % numfiles, b'')
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf = zipfile.ZipFile(TESTFN, "a", self.compression,
allowZip64=False)
zipf.debug = 100
self.assertEqual(len(zipf.namelist()), numfiles)
with self.assertRaises(zipfile.LargeZipFile):
zipf.writestr("foo%08d" % numfiles, b'')
self.assertEqual(len(zipf.namelist()), numfiles)
zipf.close()
zipf = zipfile.ZipFile(TESTFN, "a", self.compression,
allowZip64=True)
zipf.debug = 100
self.assertEqual(len(zipf.namelist()), numfiles)
numfiles2 = 15
for i in range(numfiles, numfiles2):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles2)
zipf.close()
zipf2 = zipfile.ZipFile(TESTFN, "r", self.compression)
self.assertEqual(len(zipf2.namelist()), numfiles2)
for i in range(numfiles2):
content = zipf2.read("foo%08d" % i).decode('ascii')
self.assertEqual(content, "%d" % (i**3 % 57))
zipf2.close()
def tearDown(self):
zipfile.ZIP64_LIMIT = self._limit
zipfile.ZIP_FILECOUNT_LIMIT = self._filecount_limit
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
def large_file_exception_test(self, f, compression):
with zipfile.ZipFile(f, "w", compression, allowZip64=False) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.write, TESTFN, "another.name")
def large_file_exception_test2(self, f, compression):
with zipfile.ZipFile(f, "w", compression, allowZip64=False) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.writestr, "another.name", self.data)
def test_large_file_exception(self):
for f in get_files(self):
self.large_file_exception_test(f, zipfile.ZIP_STORED)
self.large_file_exception_test2(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED,
allowZip64=True) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
@requires_zlib
class DeflateTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2TestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class PyZipFileTests(unittest.TestCase):
def assertCompiledIn(self, name, namelist):
if name + 'o' not in namelist:
self.assertIn(name + 'c', namelist)
def requiresWriteAccess(self, path):
# effective_ids unavailable on windows
if not os.access(path, os.W_OK,
effective_ids=os.access in os.supports_effective_ids):
self.skipTest('requires write access to the installed location')
def test_write_pyfile(self):
self.requiresWriteAccess(os.path.dirname(__file__))
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith('.pyc'):
path_split = fn.split(os.sep)
if os.altsep is not None:
path_split.extend(fn.split(os.altsep))
if '__pycache__' in path_split:
fn = importlib.util.source_from_cache(fn)
else:
fn = fn[:-1]
zipfp.writepy(fn)
bn = os.path.basename(fn)
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith('.pyc'):
fn = fn[:-1]
zipfp.writepy(fn, "testpackage")
bn = "%s/%s" % ("testpackage", os.path.basename(fn))
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
def test_write_python_package(self):
import email
packagedir = os.path.dirname(email.__file__)
self.requiresWriteAccess(packagedir)
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(packagedir)
# Check for a couple of modules at different levels of the
# hierarchy
names = zipfp.namelist()
self.assertCompiledIn('email/__init__.py', names)
self.assertCompiledIn('email/mime/text.py', names)
def test_write_filtered_python_package(self):
import test
packagedir = os.path.dirname(test.__file__)
self.requiresWriteAccess(packagedir)
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
# first make sure that the test folder gives error messages
# (on the badsyntax_... files)
with captured_stdout() as reportSIO:
zipfp.writepy(packagedir)
reportStr = reportSIO.getvalue()
self.assertTrue('SyntaxError' in reportStr)
# then check that the filter works on the whole package
with captured_stdout() as reportSIO:
zipfp.writepy(packagedir, filterfunc=lambda whatever: False)
reportStr = reportSIO.getvalue()
self.assertTrue('SyntaxError' not in reportStr)
# then check that the filter works on individual files
def filter(path):
return not os.path.basename(path).startswith("bad")
with captured_stdout() as reportSIO, self.assertWarns(UserWarning):
zipfp.writepy(packagedir, filterfunc=filter)
reportStr = reportSIO.getvalue()
if reportStr:
print(reportStr)
self.assertTrue('SyntaxError' not in reportStr)
def test_write_with_optimization(self):
import email
packagedir = os.path.dirname(email.__file__)
self.requiresWriteAccess(packagedir)
optlevel = 1 if __debug__ else 0
ext = '.pyc'
with TemporaryFile() as t, \
zipfile.PyZipFile(t, "w", optimize=optlevel) as zipfp:
zipfp.writepy(packagedir)
names = zipfp.namelist()
self.assertIn('email/__init__' + ext, names)
self.assertIn('email/mime/text' + ext, names)
def test_write_python_directory(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with open(os.path.join(TESTFN2, "mod2.py"), "w") as fp:
fp.write("print(42 * 42)\n")
with open(os.path.join(TESTFN2, "mod2.txt"), "w") as fp:
fp.write("bla bla bla\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(TESTFN2)
names = zipfp.namelist()
self.assertCompiledIn('mod1.py', names)
self.assertCompiledIn('mod2.py', names)
self.assertNotIn('mod2.txt', names)
finally:
rmtree(TESTFN2)
def test_write_python_directory_filtered(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with open(os.path.join(TESTFN2, "mod2.py"), "w") as fp:
fp.write("print(42 * 42)\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(TESTFN2, filterfunc=lambda fn:
not fn.endswith('mod2.py'))
names = zipfp.namelist()
self.assertCompiledIn('mod1.py', names)
self.assertNotIn('mod2.py', names)
finally:
rmtree(TESTFN2)
def test_write_non_pyfile(self):
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
with open(TESTFN, 'w') as f:
f.write('most definitely not a python file')
self.assertRaises(RuntimeError, zipfp.writepy, TESTFN)
unlink(TESTFN)
def test_write_pyfile_bad_syntax(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("Bad syntax in python file\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
# syntax errors are printed to stdout
with captured_stdout() as s:
zipfp.writepy(os.path.join(TESTFN2, "mod1.py"))
self.assertIn("SyntaxError", s.getvalue())
# as it will not have compiled the python file, it will
# include the .py file not .pyc
names = zipfp.namelist()
self.assertIn('mod1.py', names)
self.assertNotIn('mod1.pyc', names)
finally:
rmtree(TESTFN2)
class ExtractTests(unittest.TestCase):
def test_extract(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
writtenfile = zipfp.extract(fpath)
# make sure it was written to the right place
correctfile = os.path.join(os.getcwd(), fpath)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
# make sure correct data is in correct file
with open(writtenfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
unlink(writtenfile)
# remove the test file subdirectories
rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def test_extract_all(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
zipfp.extractall()
for fpath, fdata in SMALL_TEST_DATA:
outfile = os.path.join(os.getcwd(), fpath)
with open(outfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
unlink(outfile)
# remove the test file subdirectories
rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def check_file(self, filename, content):
self.assertTrue(os.path.isfile(filename))
with open(filename, 'rb') as f:
self.assertEqual(f.read(), content)
def test_sanitize_windows_name(self):
san = zipfile.ZipFile._sanitize_windows_name
# Passing pathsep in allows this test to work regardless of platform.
self.assertEqual(san(r',,?,C:,foo,bar/z', ','), r'_,C_,foo,bar/z')
self.assertEqual(san(r'a\b,c<d>e|f"g?h*i', ','), r'a\b,c_d_e_f_g_h_i')
self.assertEqual(san('../../foo../../ba..r', '/'), r'foo/ba..r')
def test_extract_hackers_arcnames_common_cases(self):
common_hacknames = [
('../foo/bar', 'foo/bar'),
('foo/../bar', 'foo/bar'),
('foo/../../bar', 'foo/bar'),
('foo/bar/..', 'foo/bar'),
('./../foo/bar', 'foo/bar'),
('/foo/bar', 'foo/bar'),
('/foo/../bar', 'foo/bar'),
('/foo/../../bar', 'foo/bar'),
]
self._test_extract_hackers_arcnames(common_hacknames)
@unittest.skipIf(os.path.sep != '\\', 'Requires \\ as path separator.')
def test_extract_hackers_arcnames_windows_only(self):
"""Test combination of path fixing and windows name sanitization."""
windows_hacknames = [
(r'..\foo\bar', 'foo/bar'),
(r'..\/foo\/bar', 'foo/bar'),
(r'foo/\..\/bar', 'foo/bar'),
(r'foo\/../\bar', 'foo/bar'),
(r'C:foo/bar', 'foo/bar'),
(r'C:/foo/bar', 'foo/bar'),
(r'C://foo/bar', 'foo/bar'),
(r'C:\foo\bar', 'foo/bar'),
(r'//conky/mountpoint/foo/bar', 'foo/bar'),
(r'\\conky\mountpoint\foo\bar', 'foo/bar'),
(r'///conky/mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\\conky\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//conky//mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\conky\\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//?/C:/foo/bar', 'foo/bar'),
(r'\\?\C:\foo\bar', 'foo/bar'),
(r'C:/../C:/foo/bar', 'C_/foo/bar'),
(r'a:b\c<d>e|f"g?h*i', 'b/c_d_e_f_g_h_i'),
('../../foo../../ba..r', 'foo/ba..r'),
]
self._test_extract_hackers_arcnames(windows_hacknames)
@unittest.skipIf(os.path.sep != '/', r'Requires / as path separator.')
def test_extract_hackers_arcnames_posix_only(self):
posix_hacknames = [
('//foo/bar', 'foo/bar'),
('../../foo../../ba..r', 'foo../ba..r'),
(r'foo/..\bar', r'foo/..\bar'),
]
self._test_extract_hackers_arcnames(posix_hacknames)
def _test_extract_hackers_arcnames(self, hacknames):
for arcname, fixedname in hacknames:
content = b'foobar' + arcname.encode()
with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
zinfo = zipfile.ZipInfo()
# preserve backslashes
zinfo.filename = arcname
zinfo.external_attr = 0o600 << 16
zipfp.writestr(zinfo, content)
arcname = arcname.replace(os.sep, "/")
targetpath = os.path.join('target', 'subdir', 'subsub')
correctfile = os.path.join(targetpath, *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname, targetpath)
self.assertEqual(writtenfile, correctfile,
msg='extract %r: %r != %r' %
(arcname, writtenfile, correctfile))
self.check_file(correctfile, content)
rmtree('target')
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall(targetpath)
self.check_file(correctfile, content)
rmtree('target')
correctfile = os.path.join(os.getcwd(), *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname)
self.assertEqual(writtenfile, correctfile,
msg="extract %r" % arcname)
self.check_file(correctfile, content)
rmtree(fixedname.split('/')[0])
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall()
self.check_file(correctfile, content)
rmtree(fixedname.split('/')[0])
unlink(TESTFN2)
class OtherTests(unittest.TestCase):
def test_open_via_zip_info(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("name", "foo")
with self.assertWarns(UserWarning):
zipfp.writestr("name", "bar")
self.assertEqual(zipfp.namelist(), ["name"] * 2)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
infos = zipfp.infolist()
data = b""
for info in infos:
with zipfp.open(info) as zipopen:
data += zipopen.read()
self.assertIn(data, {b"foobar", b"barfoo"})
data = b""
for info in infos:
data += zipfp.read(info)
self.assertIn(data, {b"foobar", b"barfoo"})
def test_universal_deprecation(self):
f = io.BytesIO()
with zipfile.ZipFile(f, "w") as zipfp:
zipfp.writestr('spam.txt', b'ababagalamaga')
with zipfile.ZipFile(f, "r") as zipfp:
for mode in 'U', 'rU':
with self.assertWarns(DeprecationWarning):
zipopen = zipfp.open('spam.txt', mode)
zipopen.close()
def test_universal_readaheads(self):
f = io.BytesIO()
data = b'a\r\n' * 16 * 1024
with zipfile.ZipFile(f, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr(TESTFN, data)
data2 = b''
with zipfile.ZipFile(f, 'r') as zipfp, \
openU(zipfp, TESTFN) as zipopen:
for line in zipopen:
data2 += line
self.assertEqual(data, data2.replace(b'\n', b'\r\n'))
def test_writestr_extended_local_header_issue1202(self):
with zipfile.ZipFile(TESTFN2, 'w') as orig_zip:
for data in 'abcdefghijklmnop':
zinfo = zipfile.ZipInfo(data)
zinfo.flag_bits |= 0x08 # Include an extended local header.
orig_zip.writestr(zinfo, data)
def test_close(self):
"""Check that the zipfile is closed after the 'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
def test_close_on_exception(self):
"""Check that the zipfile is closed if an exception is raised in the
'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
try:
with zipfile.ZipFile(TESTFN2, "r") as zipfp2:
raise zipfile.BadZipFile()
except zipfile.BadZipFile:
self.assertIsNone(zipfp2.fp, 'zipfp is not closed')
def test_unsupported_version(self):
# File has an extract_version of 120
data = (b'PK\x03\x04x\x00\x00\x00\x00\x00!p\xa1@\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00xPK\x01\x02x\x03x\x00\x00\x00\x00'
b'\x00!p\xa1@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00xPK\x05\x06'
b'\x00\x00\x00\x00\x01\x00\x01\x00/\x00\x00\x00\x1f\x00\x00\x00\x00\x00')
self.assertRaises(NotImplementedError, zipfile.ZipFile,
io.BytesIO(data), 'r')
@requires_zlib
def test_read_unicode_filenames(self):
# bug #10801
fname = findfile('zip_cp437_header.zip')
with zipfile.ZipFile(fname) as zipfp:
for name in zipfp.namelist():
zipfp.open(name).close()
def test_write_unicode_filenames(self):
with zipfile.ZipFile(TESTFN, "w") as zf:
zf.writestr("foo.txt", "Test for unicode filename")
zf.writestr("\xf6.txt", "Test for unicode filename")
self.assertIsInstance(zf.infolist()[0].filename, str)
with zipfile.ZipFile(TESTFN, "r") as zf:
self.assertEqual(zf.filelist[0].filename, "foo.txt")
self.assertEqual(zf.filelist[1].filename, "\xf6.txt")
def test_exclusive_create_zip_file(self):
"""Test exclusive creating a new zipfile."""
unlink(TESTFN2)
filename = 'testfile.txt'
content = b'hello, world. this is some content.'
with zipfile.ZipFile(TESTFN2, "x", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr(filename, content)
with self.assertRaises(FileExistsError):
zipfile.ZipFile(TESTFN2, "x", zipfile.ZIP_STORED)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertEqual(zipfp.namelist(), [filename])
self.assertEqual(zipfp.read(filename), content)
def test_create_non_existent_file_for_append(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
filename = 'testfile.txt'
content = b'hello, world. this is some content.'
try:
with zipfile.ZipFile(TESTFN, 'a') as zf:
zf.writestr(filename, content)
except OSError:
self.fail('Could not append data to a non-existent zip file.')
self.assertTrue(os.path.exists(TESTFN))
with zipfile.ZipFile(TESTFN, 'r') as zf:
self.assertEqual(zf.read(filename), content)
def test_close_erroneous_file(self):
# This test checks that the ZipFile constructor closes the file object
# it opens if there's an error in the file. If it doesn't, the
# traceback holds a reference to the ZipFile object and, indirectly,
# the file object.
# On Windows, this causes the os.unlink() call to fail because the
# underlying file is still open. This is SF bug #412214.
#
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
try:
zf = zipfile.ZipFile(TESTFN)
except zipfile.BadZipFile:
pass
def test_is_zip_erroneous_file(self):
"""Check that is_zipfile() correctly identifies non-zip files."""
# - passing a filename
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(TESTFN))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertFalse(zipfile.is_zipfile(fp))
# - passing a file-like object
fp = io.BytesIO()
fp.write(b"this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertFalse(zipfile.is_zipfile(fp))
def test_damaged_zipfile(self):
"""Check that zipfiles with missing bytes at the end raise BadZipFile."""
# - Create a valid zip file
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
zipfiledata = fp.getvalue()
# - Now create copies of it missing the last N bytes and make sure
# a BadZipFile exception is raised when we try to open it
for N in range(len(zipfiledata)):
fp = io.BytesIO(zipfiledata[:N])
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, fp)
def test_is_zip_valid_file(self):
"""Check that is_zipfile() correctly identifies zip files."""
# - passing a filename
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
self.assertTrue(zipfile.is_zipfile(TESTFN))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
zip_contents = fp.read()
# - passing a file-like object
fp = io.BytesIO()
fp.write(zip_contents)
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertTrue(zipfile.is_zipfile(fp))
def test_non_existent_file_raises_OSError(self):
# make sure we don't raise an AttributeError when a partially-constructed
# ZipFile instance is finalized; this tests for regression on SF tracker
# bug #403871.
# The bug we're testing for caused an AttributeError to be raised
# when a ZipFile instance was created for a file that did not
# exist; the .fp member was not initialized but was needed by the
# __del__() method. Since the AttributeError is in the __del__(),
# it is ignored, but the user should be sufficiently annoyed by
# the message on the output that regression will be noticed
# quickly.
self.assertRaises(OSError, zipfile.ZipFile, TESTFN)
def test_empty_file_raises_BadZipFile(self):
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
with open(TESTFN, 'w') as fp:
fp.write("short file")
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
def test_closed_zip_raises_RuntimeError(self):
"""Verify that testzip() doesn't swallow inappropriate exceptions."""
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# This is correct; calling .read on a closed ZipFile should raise
# a RuntimeError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
self.assertRaises(RuntimeError, zipf.read, "foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt")
self.assertRaises(RuntimeError, zipf.testzip)
self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus")
with open(TESTFN, 'w') as f:
f.write('zipfile test data')
self.assertRaises(RuntimeError, zipf.write, TESTFN)
def test_bad_constructor_mode(self):
"""Check that bad modes passed to ZipFile constructor are caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "q")
def test_bad_open_mode(self):
"""Check that bad modes passed to ZipFile.open are caught."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
# read the data to make sure the file is there
zipf.read("foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt", "q")
def test_read0(self):
"""Check that calling read(0) on a ZipExtFile object returns an empty
string and doesn't advance file pointer."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# read the data to make sure the file is there
with zipf.open("foo.txt") as f:
for i in range(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), b'')
self.assertEqual(f.read(), b"O, for a Muse of Fire!")
def test_open_non_existent_item(self):
"""Check that attempting to call open() for an item that doesn't
exist in the archive raises a RuntimeError."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertRaises(KeyError, zipf.open, "foo.txt", "r")
def test_bad_compression_mode(self):
"""Check that bad compression methods passed to ZipFile.open are
caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "w", -1)
def test_unsupported_compression(self):
# data is declared as shrunk, but actually deflated
data = (b'PK\x03\x04.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00'
b'\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00x\x03\x00PK\x01'
b'\x02.\x03.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00\x00\x02\x00\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x80\x01\x00\x00\x00\x00xPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00'
b'/\x00\x00\x00!\x00\x00\x00\x00\x00')
with zipfile.ZipFile(io.BytesIO(data), 'r') as zipf:
self.assertRaises(NotImplementedError, zipf.open, 'x')
def test_null_byte_in_filename(self):
"""Check that a filename containing a null byte is properly
terminated."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt\x00qqq", b"O, for a Muse of Fire!")
self.assertEqual(zipf.namelist(), ['foo.txt'])
def test_struct_sizes(self):
"""Check that ZIP internal structure sizes are calculated correctly."""
self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
def test_comments(self):
"""Check that comments on the archive are handled properly."""
# check default comment is empty
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertEqual(zipf.comment, b'')
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, b'')
# check a simple short comment
comment = b'Bravely taking to his feet, he beat a very brave retreat.'
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipf.comment, comment)
# check a comment of max length
comment2 = ''.join(['%d' % (i**3 % 10) for i in range((1 << 16)-1)])
comment2 = comment2.encode("ascii")
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment2
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check a comment that is too long is truncated
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
with self.assertWarns(UserWarning):
zipf.comment = comment2 + b'oops'
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check that comments are correctly modified in append mode
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"an updated comment"
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"an updated comment")
# check that comments are correctly shortened in append mode
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment that's longer"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"shorter comment"
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"shorter comment")
def test_unicode_comment(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with self.assertRaises(TypeError):
zipf.comment = "this is an error"
def test_change_comment_in_empty_archive(self):
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertFalse(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_change_comment_in_nonempty_archive(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertTrue(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_empty_zipfile(self):
# Check that creating a file in 'w' or 'a' mode and closing without
# adding any files to the archives creates a valid empty ZIP file
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except zipfile.BadZipFile:
self.fail("Unable to create empty ZIP file in 'w' mode")
zipf = zipfile.ZipFile(TESTFN, mode="a")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except:
self.fail("Unable to create empty ZIP file in 'a' mode")
def test_open_empty_file(self):
# Issue 1710703: Check that opening a file with less than 22 bytes
# raises a BadZipFile exception (rather than the previously unhelpful
# OSError)
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN, 'r')
def test_create_zipinfo_before_1980(self):
self.assertRaises(ValueError,
zipfile.ZipInfo, 'seventies', (1979, 1, 1, 0, 0, 0))
def test_zipfile_with_short_extra_field(self):
"""If an extra field in the header is less than 4 bytes, skip it."""
zipdata = (
b'PK\x03\x04\x14\x00\x00\x00\x00\x00\x93\x9b\xad@\x8b\x9e'
b'\xd9\xd3\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x03\x00ab'
b'c\x00\x00\x00APK\x01\x02\x14\x03\x14\x00\x00\x00\x00'
b'\x00\x93\x9b\xad@\x8b\x9e\xd9\xd3\x01\x00\x00\x00\x01\x00\x00'
b'\x00\x03\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00'
b'\x00\x00\x00abc\x00\x00PK\x05\x06\x00\x00\x00\x00'
b'\x01\x00\x01\x003\x00\x00\x00%\x00\x00\x00\x00\x00'
)
with zipfile.ZipFile(io.BytesIO(zipdata), 'r') as zipf:
# testzip returns the name of the first corrupt file, or None
self.assertIsNone(zipf.testzip())
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class AbstractBadCrcTests:
def test_testzip_with_bad_crc(self):
"""Tests that files with bad CRCs return their name from testzip."""
zipdata = self.zip_with_bad_crc
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
# testzip returns the name of the first corrupt file, or None
self.assertEqual('afile', zipf.testzip())
def test_read_with_bad_crc(self):
"""Tests that files with bad CRCs raise a BadZipFile exception when read."""
zipdata = self.zip_with_bad_crc
# Using ZipFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
self.assertRaises(zipfile.BadZipFile, zipf.read, 'afile')
# Using ZipExtFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
self.assertRaises(zipfile.BadZipFile, corrupt_file.read)
# Same with small reads (in order to exercise the buffering logic)
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
corrupt_file.MIN_READ_SIZE = 2
with self.assertRaises(zipfile.BadZipFile):
while corrupt_file.read(2):
pass
class StoredBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_STORED
zip_with_bad_crc = (
b'PK\003\004\024\0\0\0\0\0 \213\212;:r'
b'\253\377\f\0\0\0\f\0\0\0\005\0\0\000af'
b'ilehello,AworldP'
b'K\001\002\024\003\024\0\0\0\0\0 \213\212;:'
b'r\253\377\f\0\0\0\f\0\0\0\005\0\0\0\0'
b'\0\0\0\0\0\0\0\200\001\0\0\0\000afi'
b'lePK\005\006\0\0\0\0\001\0\001\0003\000'
b'\0\0/\0\0\0\0\0')
@requires_zlib
class DeflateBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
zip_with_bad_crc = (
b'PK\x03\x04\x14\x00\x00\x00\x08\x00n}\x0c=FA'
b'KE\x10\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\xc9\xa0'
b'=\x13\x00PK\x01\x02\x14\x03\x14\x00\x00\x00\x08\x00n'
b'}\x0c=FAKE\x10\x00\x00\x00n\x00\x00\x00\x05'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00'
b'\x00afilePK\x05\x06\x00\x00\x00\x00\x01\x00'
b'\x01\x003\x00\x00\x003\x00\x00\x00\x00\x00')
@requires_bz2
class Bzip2BadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_BZIP2
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0c\x00nu\x0c=FA'
b'KE8\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ileBZh91AY&SY\xd4\xa8\xca'
b'\x7f\x00\x00\x0f\x11\x80@\x00\x06D\x90\x80 \x00 \xa5'
b'P\xd9!\x03\x03\x13\x13\x13\x89\xa9\xa9\xc2u5:\x9f'
b'\x8b\xb9"\x9c(HjTe?\x80PK\x01\x02\x14'
b'\x03\x14\x03\x00\x00\x0c\x00nu\x0c=FAKE8'
b'\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00 \x80\x80\x81\x00\x00\x00\x00afilePK'
b'\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00\x00[\x00'
b'\x00\x00\x00\x00')
@requires_lzma
class LzmaBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_LZMA
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\t\x04\x05\x00]\x00\x00\x00\x04\x004\x19I'
b'\xee\x8d\xe9\x17\x89:3`\tq!.8\x00PK'
b'\x01\x02\x14\x03\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00 \x80\x80\x81\x00\x00\x00\x00afil'
b'ePK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00'
b'\x00>\x00\x00\x00\x00\x00')
class DecryptionTests(unittest.TestCase):
"""Check that ZIP decryption works. Since the library does not
support encryption at the moment, we use a pre-generated encrypted
ZIP file."""
data = (
b'PK\x03\x04\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00\x1a\x00'
b'\x00\x00\x08\x00\x00\x00test.txt\xfa\x10\xa0gly|\xfa-\xc5\xc0=\xf9y'
b'\x18\xe0\xa8r\xb3Z}Lg\xbc\xae\xf9|\x9b\x19\xe4\x8b\xba\xbb)\x8c\xb0\xdbl'
b'PK\x01\x02\x14\x00\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00'
b'\x1a\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x01\x00 \x00\xb6\x81'
b'\x00\x00\x00\x00test.txtPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x006\x00'
b'\x00\x00L\x00\x00\x00\x00\x00' )
data2 = (
b'PK\x03\x04\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02'
b'\x00\x00\x04\x00\x15\x00zeroUT\t\x00\x03\xd6\x8b\x92G\xda\x8b\x92GUx\x04'
b'\x00\xe8\x03\xe8\x03\xc7<M\xb5a\xceX\xa3Y&\x8b{oE\xd7\x9d\x8c\x98\x02\xc0'
b'PK\x07\x08xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00PK\x01\x02\x17\x03'
b'\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00'
b'\x04\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00ze'
b'roUT\x05\x00\x03\xd6\x8b\x92GUx\x00\x00PK\x05\x06\x00\x00\x00\x00\x01'
b'\x00\x01\x00?\x00\x00\x00[\x00\x00\x00\x00\x00' )
plain = b'zipfile.py encryption test'
plain2 = b'\x00'*512
def setUp(self):
with open(TESTFN, "wb") as fp:
fp.write(self.data)
self.zip = zipfile.ZipFile(TESTFN, "r")
with open(TESTFN2, "wb") as fp:
fp.write(self.data2)
self.zip2 = zipfile.ZipFile(TESTFN2, "r")
def tearDown(self):
self.zip.close()
os.unlink(TESTFN)
self.zip2.close()
os.unlink(TESTFN2)
def test_no_password(self):
# Reading the encrypted file without password
# must generate a RunTime exception
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def test_bad_password(self):
self.zip.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.zip2.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
@requires_zlib
def test_good_password(self):
self.zip.setpassword(b"python")
self.assertEqual(self.zip.read("test.txt"), self.plain)
self.zip2.setpassword(b"12345")
self.assertEqual(self.zip2.read("zero"), self.plain2)
def test_unicode_password(self):
self.assertRaises(TypeError, self.zip.setpassword, "unicode")
self.assertRaises(TypeError, self.zip.read, "test.txt", "python")
self.assertRaises(TypeError, self.zip.open, "test.txt", pwd="python")
self.assertRaises(TypeError, self.zip.extract, "test.txt", pwd="python")
class AbstractTestsWithRandomBinaryFiles:
@classmethod
def setUpClass(cls):
datacount = randint(16, 64)*1024 + randint(1, 1024)
cls.data = b''.join(struct.pack('<f', random()*randint(-1000, 1000))
for i in range(datacount))
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
testdata = zipfp.read(TESTFN)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
def test_read(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
testdata1 = b''.join(zipdata1)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
testdata2 = b''.join(zipdata2)
self.assertEqual(len(testdata2), len(self.data))
self.assertEqual(testdata2, self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
testdata = b''.join(zipdata1)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
class StoredTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib
class DeflateTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2TestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
# Privide the tell() method but not seek()
class Tellable:
def __init__(self, fp):
self.fp = fp
self.offset = 0
def write(self, data):
n = self.fp.write(data)
self.offset += n
return n
def tell(self):
return self.offset
def flush(self):
self.fp.flush()
class Unseekable:
def __init__(self, fp):
self.fp = fp
def write(self, data):
return self.fp.write(data)
def flush(self):
self.fp.flush()
class UnseekableTests(unittest.TestCase):
def test_writestr(self):
for wrapper in (lambda f: f), Tellable, Unseekable:
with self.subTest(wrapper=wrapper):
f = io.BytesIO()
f.write(b'abc')
bf = io.BufferedWriter(f)
with zipfile.ZipFile(wrapper(bf), 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr('ones', b'111')
zipfp.writestr('twos', b'222')
self.assertEqual(f.getvalue()[:5], b'abcPK')
with zipfile.ZipFile(f, mode='r') as zipf:
with zipf.open('ones') as zopen:
self.assertEqual(zopen.read(), b'111')
with zipf.open('twos') as zopen:
self.assertEqual(zopen.read(), b'222')
def test_write(self):
for wrapper in (lambda f: f), Tellable, Unseekable:
with self.subTest(wrapper=wrapper):
f = io.BytesIO()
f.write(b'abc')
bf = io.BufferedWriter(f)
with zipfile.ZipFile(wrapper(bf), 'w', zipfile.ZIP_STORED) as zipfp:
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'wb') as f2:
f2.write(b'111')
zipfp.write(TESTFN, 'ones')
with open(TESTFN, 'wb') as f2:
f2.write(b'222')
zipfp.write(TESTFN, 'twos')
self.assertEqual(f.getvalue()[:5], b'abcPK')
with zipfile.ZipFile(f, mode='r') as zipf:
with zipf.open('ones') as zopen:
self.assertEqual(zopen.read(), b'111')
with zipf.open('twos') as zopen:
self.assertEqual(zopen.read(), b'222')
@requires_zlib
class TestsWithMultipleOpens(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data1 = b'111' + getrandbytes(10000)
cls.data2 = b'222' + getrandbytes(10000)
def make_test_archive(self, f):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr('ones', self.data1)
zipfp.writestr('twos', self.data2)
def test_same_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
for f in get_files(self):
self.make_test_archive(f)
with zipfile.ZipFile(f, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('ones') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read()
data2 += zopen2.read()
self.assertEqual(data1, data2)
self.assertEqual(data1, self.data1)
def test_different_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
for f in get_files(self):
self.make_test_archive(f)
with zipfile.ZipFile(f, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read()
data2 += zopen2.read()
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_interleaved(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
for f in get_files(self):
self.make_test_archive(f)
with zipfile.ZipFile(f, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read()
data2 += zopen2.read()
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_read_after_close(self):
for f in get_files(self):
self.make_test_archive(f)
with contextlib.ExitStack() as stack:
with zipfile.ZipFile(f, 'r') as zipf:
zopen1 = stack.enter_context(zipf.open('ones'))
zopen2 = stack.enter_context(zipf.open('twos'))
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read()
data2 += zopen2.read()
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_read_after_write(self):
for f in get_files(self):
with zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED) as zipf:
zipf.writestr('ones', self.data1)
zipf.writestr('twos', self.data2)
with zipf.open('ones') as zopen1:
data1 = zopen1.read(500)
self.assertEqual(data1, self.data1[:500])
with zipfile.ZipFile(f, 'r') as zipf:
data1 = zipf.read('ones')
data2 = zipf.read('twos')
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_write_after_read(self):
for f in get_files(self):
with zipfile.ZipFile(f, "w", zipfile.ZIP_DEFLATED) as zipf:
zipf.writestr('ones', self.data1)
with zipf.open('ones') as zopen1:
zopen1.read(500)
zipf.writestr('twos', self.data2)
with zipfile.ZipFile(f, 'r') as zipf:
data1 = zipf.read('ones')
data2 = zipf.read('twos')
self.assertEqual(data1, self.data1)
self.assertEqual(data2, self.data2)
def test_many_opens(self):
# Verify that read() and open() promptly close the file descriptor,
# and don't rely on the garbage collector to free resources.
self.make_test_archive(TESTFN2)
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
for x in range(100):
zipf.read('ones')
with zipf.open('ones') as zopen1:
pass
with open(os.devnull) as f:
self.assertLess(f.fileno(), 100)
def tearDown(self):
unlink(TESTFN2)
class TestWithDirectory(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN2)
def test_extract_dir(self):
with zipfile.ZipFile(findfile("zipdir.zip")) as zipf:
zipf.extractall(TESTFN2)
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a")))
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a", "b")))
self.assertTrue(os.path.exists(os.path.join(TESTFN2, "a", "b", "c")))
def test_bug_6050(self):
# Extraction should succeed if directories already exist
os.mkdir(os.path.join(TESTFN2, "a"))
self.test_extract_dir()
def test_write_dir(self):
dirpath = os.path.join(TESTFN2, "x")
os.mkdir(dirpath)
mode = os.stat(dirpath).st_mode & 0xFFFF
with zipfile.ZipFile(TESTFN, "w") as zipf:
zipf.write(dirpath)
zinfo = zipf.filelist[0]
self.assertTrue(zinfo.filename.endswith("/x/"))
self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10)
zipf.write(dirpath, "y")
zinfo = zipf.filelist[1]
self.assertTrue(zinfo.filename, "y/")
self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10)
with zipfile.ZipFile(TESTFN, "r") as zipf:
zinfo = zipf.filelist[0]
self.assertTrue(zinfo.filename.endswith("/x/"))
self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10)
zinfo = zipf.filelist[1]
self.assertTrue(zinfo.filename, "y/")
self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10)
target = os.path.join(TESTFN2, "target")
os.mkdir(target)
zipf.extractall(target)
self.assertTrue(os.path.isdir(os.path.join(target, "y")))
self.assertEqual(len(os.listdir(target)), 2)
def test_writestr_dir(self):
os.mkdir(os.path.join(TESTFN2, "x"))
with zipfile.ZipFile(TESTFN, "w") as zipf:
zipf.writestr("x/", b'')
zinfo = zipf.filelist[0]
self.assertEqual(zinfo.filename, "x/")
self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10)
with zipfile.ZipFile(TESTFN, "r") as zipf:
zinfo = zipf.filelist[0]
self.assertTrue(zinfo.filename.endswith("x/"))
self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10)
target = os.path.join(TESTFN2, "target")
os.mkdir(target)
zipf.extractall(target)
self.assertTrue(os.path.isdir(os.path.join(target, "x")))
self.assertEqual(os.listdir(target), ["x"])
def tearDown(self):
rmtree(TESTFN2)
if os.path.exists(TESTFN):
unlink(TESTFN)
class AbstractUniversalNewlineTests:
@classmethod
def setUpClass(cls):
cls.line_gen = [bytes("Test of zipfile line %d." % i, "ascii")
for i in range(FIXEDTEST_SIZE)]
cls.seps = (b'\r', b'\r\n', b'\n')
cls.arcdata = {}
for n, s in enumerate(cls.seps):
cls.arcdata[s] = s.join(cls.line_gen) + s
def setUp(self):
self.arcfiles = {}
for n, s in enumerate(self.seps):
self.arcfiles[s] = '%s-%d' % (TESTFN, n)
with open(self.arcfiles[s], "wb") as f:
f.write(self.arcdata[s])
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
for fn in self.arcfiles.values():
zipfp.write(fn, fn)
def read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as fp:
zipdata = fp.read()
self.assertEqual(self.arcdata[sep], zipdata)
def test_read(self):
for f in get_files(self):
self.read_test(f, self.compression)
def readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as zipopen:
data = b''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(5)
if not read:
break
data += read
self.assertEqual(data, self.arcdata[b'\n'])
def test_readline_read(self):
for f in get_files(self):
self.readline_read_test(f, self.compression)
def readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + b'\n')
def test_readline(self):
for f in get_files(self):
self.readline_test(f, self.compression)
def readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as fp:
ziplines = fp.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + b'\n')
def test_readlines(self):
for f in get_files(self):
self.readlines_test(f, self.compression)
def iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as fp:
for line, zipline in zip(self.line_gen, fp):
self.assertEqual(zipline, line + b'\n')
def test_iterlines(self):
for f in get_files(self):
self.iterlines_test(f, self.compression)
def tearDown(self):
for sep, fn in self.arcfiles.items():
unlink(fn)
unlink(TESTFN)
unlink(TESTFN2)
class StoredUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib
class DeflateUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2UniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "faaeaaa012688aacb849c69eca85cf25",
"timestamp": "",
"source": "github",
"line_count": 2068,
"max_line_length": 92,
"avg_line_length": 39.749032882011605,
"alnum_prop": 0.573508838091994,
"repo_name": "juanyaw/python",
"id": "67e85704aabfee6e3bdf94293b01cdc79a7812f6",
"size": "82201",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "cpython/Lib/test/test_zipfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "470920"
},
{
"name": "Batchfile",
"bytes": "35551"
},
{
"name": "C",
"bytes": "16518323"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "343272"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "Groff",
"bytes": "254942"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "Makefile",
"bytes": "25026"
},
{
"name": "Objective-C",
"bytes": "1390263"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24911704"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import unittest
import testRObject
import testVector
import testArray
import testDataFrame
import testFormula
import testFunction
import testEnvironment
import testRobjects
import testMethods
import testPackages
# wrap this nicely so a warning is issued if no numpy present
import testNumpyConversions
def suite():
suite_RObject = testRObject.suite()
suite_Vector = testVector.suite()
suite_Array = testArray.suite()
suite_DataFrame = testDataFrame.suite()
suite_Function = testFunction.suite()
suite_Environment = testEnvironment.suite()
suite_Formula = testFormula.suite()
suite_Robjects = testRobjects.suite()
suite_NumpyConversions = testNumpyConversions.suite()
suite_Methods = testMethods.suite()
suite_Packages = testPackages.suite()
alltests = unittest.TestSuite([suite_RObject,
suite_Vector,
suite_Array,
suite_DataFrame,
suite_Function,
suite_Environment,
suite_Formula,
suite_Robjects,
suite_Methods,
suite_NumpyConversions,
suite_Packages
])
return alltests
def main():
r = unittest.TestResult()
suite().run(r)
return r
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
suite = suite()
tr.run(suite)
|
{
"content_hash": "86cfb63ea4e1ead9820d34b336983629",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 67,
"avg_line_length": 31.80392156862745,
"alnum_prop": 0.5561035758323057,
"repo_name": "lbouma/Cyclopath",
"id": "79c47a65f0fa9c126569ca56da5de2ff5e1b13e0",
"size": "1622",
"binary": false,
"copies": "1",
"ref": "refs/heads/release",
"path": "pyserver/bin/rpy2/robjects/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "3369673"
},
{
"name": "ApacheConf",
"bytes": "46372"
},
{
"name": "C",
"bytes": "281248"
},
{
"name": "CSS",
"bytes": "36786"
},
{
"name": "Gnuplot",
"bytes": "14865"
},
{
"name": "HTML",
"bytes": "203213"
},
{
"name": "Java",
"bytes": "688800"
},
{
"name": "JavaScript",
"bytes": "60678"
},
{
"name": "M4",
"bytes": "35700"
},
{
"name": "Makefile",
"bytes": "8036"
},
{
"name": "PHP",
"bytes": "18399"
},
{
"name": "PLSQL",
"bytes": "451"
},
{
"name": "PLpgSQL",
"bytes": "1407944"
},
{
"name": "Perl",
"bytes": "669009"
},
{
"name": "Python",
"bytes": "5830046"
},
{
"name": "Shell",
"bytes": "639435"
}
],
"symlink_target": ""
}
|
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='lioncore',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='BSD License',
description='Core components for the lionschool project',
long_description=README,
url='https://www.example.com/',
author='Leander Lismond',
# author_email='yourname@example.com', # no, creep
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires=[
'Django~=1.10'
]
)
|
{
"content_hash": "0a4c0eda8ee2286d1997319c2f21074b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 33.26315789473684,
"alnum_prop": 0.615506329113924,
"repo_name": "Leo2807/lioncore",
"id": "6a64396736582ab17e9d54461fca108f97dabec7",
"size": "1264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9222"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from operator import attrgetter
from django.apps import apps
from django.core import checks
from django.db import connection, connections, router, transaction
from django.db.backends import utils
from django.db.models import signals, Q
from django.db.models.deletion import SET_NULL, SET_DEFAULT, CASCADE
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.lookups import IsNull
from django.db.models.related import RelatedObject, PathInfo
from django.db.models.query import QuerySet
from django.db.models.sql.datastructures import Col
from django.utils.encoding import smart_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry, cached_property
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
try:
model = cls._meta.apps.get_registered_model(app_label, model_name)
except LookupError:
key = (app_label, model_name)
value = (cls, field, operation)
cls._meta.apps._pending_lookups.setdefault(key, []).append(value)
else:
operation(field, model, cls)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in sender._meta.apps._pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
class RelatedField(Field):
def check(self, **kwargs):
errors = super(RelatedField, self).check(**kwargs)
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_relation_model_exists(self):
rel_is_missing = self.rel.to not in apps.get_models()
rel_is_string = isinstance(self.rel.to, six.string_types)
model_name = self.rel.to if rel_is_string else self.rel.to._meta.object_name
if rel_is_missing and (rel_is_string or not self.rel.to._meta.swapped):
return [
checks.Error(
("Field defines a relation with model '%s', which "
"is either not installed, or is abstract.") % model_name,
hint=None,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.rel.to not in apps.get_models() and
not isinstance(self.rel.to, six.string_types) and
self.rel.to._meta.swapped):
model = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name
)
return [
checks.Error(
("Field defines a relation with the model '%s', "
"which has been swapped out.") % model,
hint="Update the relation to point at 'settings.%s'." % self.rel.to._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
""" Check accessor and reverse query name clashes. """
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.rel.to` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.rel.to, ModelBase):
return []
# If the field doesn't install backward relation on the target model (so
# `is_hidden` returns True), then there are no clashes to check and we
# can skip these fields.
if self.rel.is_hidden():
return []
try:
self.related
except AttributeError:
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
rel_opts = self.rel.to._meta
# rel_opts.object_name == "Target"
rel_name = self.related.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name,
self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i. e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name,
clash_field.name) # i. e. "Target.model_set"
if clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = rel_opts.get_all_related_many_to_many_objects()
potential_clashes += rel_opts.get_all_related_objects()
potential_clashes = (r for r in potential_clashes
if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.model._meta.object_name,
clash_field.field.name)
if clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
'''By default related field will not have a column
as it relates columns to another table'''
return None
def contribute_to_class(self, cls, name, virtual_only=False):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name, virtual_only=virtual_only)
if not cls._meta.abstract and self.rel.related_name:
related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.rel.related_name = related_name
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
@property
def swappable_setting(self):
"""
Gets the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
to_string = self.rel.to
else:
to_string = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name,
)
# See if anything swapped/swappable matches
for model in apps.get_models(include_swapped=True):
if model._meta.swapped:
if model._meta.swapped == to_string:
return model._meta.swappable
if ("%s.%s" % (model._meta.app_label, model._meta.object_name)) == to_string and model._meta.swappable:
return model._meta.swappable
return None
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def get_limit_choices_to(self):
"""Returns 'limit_choices_to' for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.rel.limit_choices_to):
return self.rel.limit_choices_to()
return self.rel.limit_choices_to
def formfield(self, **kwargs):
"""Passes ``limit_choices_to`` to field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.rel, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.rel.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super(RelatedField, self).formfield(**defaults)
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ReverseSingleRelatedObjectDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
# Gotcha: we return a `Manager` instance (i.e. not a `QuerySet`)!
return self.related.model._base_manager.db_manager(hints=hints)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
# Despite its name `get_queryset()` returns an instance of
# `Manager`, therefore we call `all()` to normalize to `QuerySet`.
queryset = self.get_queryset().all()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {}
for lh_field, rh_field in self.related.field.related_fields:
params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
try:
rel_obj = self.get_queryset(instance=instance).get(**params)
except self.related.model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif value is not None and not isinstance(value, self.related.model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.opts.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
if None in related_pk:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, instance._meta.object_name)
)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.to` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.rel.to.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
rel_mgr = self.field.rel.to._default_manager.db_manager(hints=hints)
# If the related manager indicates that it should be used for
# related fields, respect that.
if getattr(rel_mgr, 'use_for_related_fields', False):
# Gotcha: we return a `Manager` instance (i.e. not a `QuerySet`)!
return rel_mgr
else:
return QuerySet(self.field.rel.to, hints=hints)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
# Despite its name `get_queryset()` may return an instance of
# `Manager`, therefore we call `all()` to normalize to `QuerySet`.
queryset = self.get_queryset().all()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.rel.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.related.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
params = dict(
(rh_field.attname, getattr(instance, lh_field.attname))
for lh_field, rh_field in self.field.related_fields)
qs = self.get_queryset(instance=instance)
extra_filter = self.field.get_extra_descriptor_filter(instance)
if isinstance(extra_filter, dict):
params.update(extra_filter)
qs = qs.filter(**params)
else:
qs = qs.filter(extra_filter, **params)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.rel.multiple:
setattr(rel_obj, self.field.related.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.rel.to._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.related.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
pk = value._get_pk_val()
if pk is None:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, self.field.rel.to._meta.object_name)
)
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
def create_foreign_related_manager(superclass, rel_field, rel_model):
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters = {'%s__exact' % rel_field.name: instance}
self.model = rel_model
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_foreign_related_manager(manager.__class__, rel_field, rel_model)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in rel_field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % rel_field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" %
(self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = rel_field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if rel_field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{rel_field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, rel_field.name, None)
obj.save(update_fields=[rel_field.name])
_clear.alters_data = True
return RelatedManager
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.model, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
return create_foreign_related_manager(
self.related.model._default_manager.__class__,
self.related.field,
self.related.model,
)
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
source_field = through._meta.get_field(source_field_name)
source_related_fields = source_field.related_fields
self.core_filters = {}
for lh_field, rh_field in source_related_fields:
self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.instance = instance
self.symmetrical = symmetrical
self.source_field = source_field
self.target_field = through._meta.get_field(target_field_name)
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self.related_val = source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_many_related_manager(manager.__class__, rel)
return manager_class(
model=self.model,
query_field_name=self.query_field_name,
instance=self.instance,
symmetrical=self.symmetrical,
source_field_name=self.source_field_name,
target_field_name=self.target_field_name,
reverse=self.reverse,
through=self.through,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields))
return (queryset,
lambda result: tuple(getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields),
lambda inst: tuple(getattr(inst, f.attname) for f in fk.foreign_related_fields),
False,
self.prefetch_cache_name)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.related_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.through, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.through, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
manager.add(*value)
class ForeignObjectRel(object):
def __init__(self, field, to, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.field = field
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookup_type,
raw_value):
return self.field.get_lookup_constraint(constraint_class, alias, targets, sources,
lookup_type, raw_value)
class ManyToOneRel(ForeignObjectRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(ManyToOneRel, self).__init__(
field, to, related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.field_name = field_name
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
def set_field_name(self):
self.field_name = self.field_name or self.to._meta.pk.name
class OneToOneRel(ManyToOneRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(OneToOneRel, self).__init__(field, to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None, through_fields=None,
db_constraint=True, related_query_name=None):
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
if through_fields and not through:
raise ValueError("Cannot specify through_fields without a through model")
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
self.through_fields = through_fields
self.db_constraint = db_constraint
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignObject(RelatedField):
requires_unique_target = True
generate_reverse_relation = True
related_accessor_class = ForeignRelatedObjectsDescriptor
def __init__(self, to, from_fields, to_fields, swappable=True, **kwargs):
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
if 'rel' not in kwargs:
kwargs['rel'] = ForeignObjectRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
super(ForeignObject, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ForeignObject, self).check(**kwargs)
errors.extend(self._check_unique_target())
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.rel.to, six.string_types)
if rel_is_string or not self.requires_unique_target:
return []
# Skip if the
try:
self.foreign_related_fields
except FieldDoesNotExist:
return []
try:
self.related
except AttributeError:
return []
has_unique_field = any(rel_field.unique
for rel_field in self.foreign_related_fields)
if not has_unique_field and len(self.foreign_related_fields) > 1:
field_combination = ', '.join("'%s'" % rel_field.name
for rel_field in self.foreign_related_fields)
model_name = self.rel.to.__name__
return [
checks.Error(
"None of the fields %s on model '%s' have a unique=True constraint." % (field_combination, model_name),
hint=None,
obj=self,
id='fields.E310',
)
]
elif not has_unique_field:
field_name = self.foreign_related_fields[0].name
model_name = self.rel.to.__name__
return [
checks.Error(
("'%s.%s' must set unique=True "
"because it is referenced by a foreign key.") % (model_name, field_name),
hint=None,
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignObject, self).deconstruct()
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError("Cannot deconstruct a ForeignKey pointing to a model that is swapped in place of more than one model (%s and %s)" % (kwargs['to'].setting_name, swappable_setting))
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.rel.to, six.string_types):
raise ValueError('Related model %r cannot be resolved' % self.rel.to)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field_by_name(from_field_name)[0])
to_field = (self.rel.to._meta.pk if to_field_name is None
else self.rel.to._meta.get_field_by_name(to_field_name)[0])
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Returns an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Returns a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(qn, connection) method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookups,
raw_value):
from django.db.models.sql.where import SubqueryConstraint, AND, OR
root_constraint = constraint_class()
assert len(targets) == len(sources)
if len(lookups) > 1:
raise exceptions.FieldError('Relation fields do not support nested lookups')
lookup_type = lookups[0]
def get_normalized_value(value):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
for source in sources:
# Account for one-to-one relations when sent a different model
while not isinstance(value, source.model) and source.rel:
source = source.rel.to._meta.get_field(source.rel.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
elif not isinstance(value, tuple):
return (value,)
return value
is_multicolumn = len(self.related_fields) > 1
if (hasattr(raw_value, '_as_sql') or
hasattr(raw_value, 'get_compiler')):
root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
[source.name for source in sources], raw_value),
AND)
elif lookup_type == 'isnull':
root_constraint.add(IsNull(Col(alias, targets[0], sources[0]), raw_value), AND)
elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
and not is_multicolumn)):
value = get_normalized_value(raw_value)
for target, source, val in zip(targets, sources, value):
lookup_class = target.get_lookup(lookup_type)
root_constraint.add(
lookup_class(Col(alias, target, source), val), AND)
elif lookup_type in ['range', 'in'] and not is_multicolumn:
values = [get_normalized_value(value) for value in raw_value]
value = [val[0] for val in values]
lookup_class = targets[0].get_lookup(lookup_type)
root_constraint.add(lookup_class(Col(alias, targets[0], sources[0]), value), AND)
elif lookup_type == 'in':
values = [get_normalized_value(value) for value in raw_value]
for value in values:
value_constraint = constraint_class()
for source, target, val in zip(sources, targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(Col(alias, target, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
return root_constraint
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
class ForeignKey(ForeignObject):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.db_constraint = db_constraint
kwargs['rel'] = rel_class(
self, to, to_field,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
def check(self, **kwargs):
errors = super(ForeignKey, self).check(**kwargs)
errors.extend(self._check_on_delete())
return errors
def _check_on_delete(self):
on_delete = getattr(self.rel, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
if self.rel.on_delete is not CASCADE:
kwargs['on_delete'] = self.rel.on_delete
# Rel needs more work.
to_meta = getattr(self.rel.to, "_meta", None)
if self.rel.field_name and (not to_meta or (to_meta.pk and self.rel.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.rel.field_name
return name, path, args, kwargs
@property
def related_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.rel.to._meta.verbose_name, 'pk': value,
'field': self.rel.field_name, 'value': value,
}, # 'pk' is included for backwards compatibilty
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.related_field.name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.related_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.related_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.related_field.get_db_prep_save(value, connection=connection)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
related_accessor_class = SingleRelatedObjectDescriptor
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.model_name
to = to.lower()
meta = type(str('Meta'), (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint),
to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint)
})
class ManyToManyField(RelatedField):
description = _("Many-to-many relationship")
def __init__(self, to, db_constraint=True, swappable=True, **kwargs):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it here to break early if there's a problem.
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
through_fields=kwargs.pop('through_fields', None),
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(ManyToManyField, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ManyToManyField, self).check(**kwargs)
errors.extend(self._check_unique(**kwargs))
errors.extend(self._check_relationship_model(**kwargs))
errors.extend(self._check_ignored_options(**kwargs))
return errors
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=self,
id='fields.E330',
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.null:
warnings.append(
checks.Warning(
'null has no effect on ManyToManyField.',
hint=None,
obj=self,
id='fields.W340',
)
)
if len(self._validators) > 0:
warnings.append(
checks.Warning(
'ManyToManyField does not support validators.',
hint=None,
obj=self,
id='fields.W341',
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.rel.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.rel.through._meta.app_label, self.rel.through.__name__)
else:
qualified_model_name = self.rel.through
errors = []
if self.rel.through not in apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
("Field specifies a many-to-many relation through model "
"'%s', which has not been installed.") %
qualified_model_name,
hint=None,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
# Set some useful local variables
to_model = self.rel.to
from_model_name = from_model._meta.object_name
if isinstance(to_model, six.string_types):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.rel.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.rel.symmetrical and
not self.rel.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_self > 2 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=("Use through_fields to specify which two "
"foreign keys Django should use."),
obj=self.rel.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
seen_to = sum(to_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_from > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=('If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, '
'through="%s").') % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, to_model_name),
hint=('If you want to create a recursive '
'relationship, use ForeignKey("self", '
'symmetrical=False, through="%s").') % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'.") % (
self, from_model_name, to_model_name
),
hint=None,
obj=self.rel.through,
id='fields.E336',
)
)
# Validate `through_fields`
if self.rel.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy"
if not (len(self.rel.through_fields) >= 2 and
self.rel.through_fields[0] and self.rel.through_fields[1]):
errors.append(
checks.Error(
("Field specifies 'through_fields' but does not "
"provide the names of the two link fields that should be "
"used for the relation through model "
"'%s'.") % qualified_model_name,
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
source, through, target = from_model, self.rel.through, self.rel.to
source_field_name, target_field_name = self.rel.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'rel') and getattr(f.rel, 'to', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = ("Did you mean one of the following foreign "
"keys to '%s': %s?") % (related_model._meta.object_name,
', '.join(possible_field_names))
else:
hint = None
try:
field = through._meta.get_field(field_name)
except FieldDoesNotExist:
errors.append(
checks.Error(
("The intermediary model '%s' has no field '%s'.") % (
qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'rel') and
getattr(field.rel, 'to', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments
if self.rel.db_constraint is not True:
kwargs['db_constraint'] = self.rel.db_constraint
if "help_text" in kwargs:
del kwargs['help_text']
# Rel needs more work.
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
if getattr(self.rel, 'through', None) is not None:
if isinstance(self.rel.through, six.string_types):
kwargs['through'] = self.rel.through
elif not self.rel.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (self.rel.through._meta.app_label, self.rel.through._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError("Cannot deconstruct a ManyToManyField pointing to a model that is swapped in place of more than one model (%s and %s)" % (kwargs['to'].setting_name, swappable_setting))
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.rel.through
linkfield1 = int_model._meta.get_field_by_name(self.m2m_field_name())[0]
linkfield2 = int_model._meta.get_field_by_name(self.m2m_reverse_field_name())[0]
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return utils.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[0]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.model and \
(link_field_name is None or link_field_name == f.name):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[1]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model:
if link_field_name is None and related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db),
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
|
{
"content_hash": "9699f0d61904dc49111708e546ed2e27",
"timestamp": "",
"source": "github",
"line_count": 2346,
"max_line_length": 228,
"avg_line_length": 45.21227621483376,
"alnum_prop": 0.5696345740468379,
"repo_name": "dhoffman34/django",
"id": "1dc5ff9b1db9ae0f9c8c5c4f8143d234052ab1ff",
"size": "106068",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/models/fields/related.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import sys
import bz2
import gzip as gz
from zipfile import ZipFile
from tarfile import TarFile
from rarfile import RarFile
change_coding = 'utf-8'
def getpath(file):
"""\
Get file's path, if failed it will return current working directory's path
file: the name of the file which needs to process
"""
try:
return os.path.dirname(file)
except:
return '.'
def zip_with_coding(zf_file):
name_list = zf_file.namelist()
dir_set = set()
name_tran_list = [ item.encode(change_coding).decode('utf-8') for item in name_list ]
for name_temp in name_tran_list:
dir_set.add( os.path.dirname(name_temp) )
for dirname in dir_set:
try:
os.makedirs(dirname)
except:
pass
for number in list( range(len(name_list)) ):
content = zf_file.read( name_list[number] )
try:
with open( name_tran_list[number], 'wb' ) as f:
f.write(content)
except:
pass
def zip(file):
f = ZipFile(file)
if True:
f.extractall(path = getpath(file))
f.close()
else:
zip_with_coding(f)
def rar(file):
f = RarFile(file, charset = change_coding)
f.extractall(path = getpath(file))
f.close()
def tar(file):
f = TarFile(file, encoding = change_coding)
f.extractall(path = getpath(file))
f.close()
def bzip2(file):
with bz2.open(file, 'rb') as zip_file:
with open(file+'.finish', 'wb') as plain_file:
while True:
content = zip_file.read(1024)
if content != b'':
plain_file.write(content)
else:
break
def gzip(file):
with gz.open(file, 'rb') as zip_file:
with open(file+'.finish', 'wb') as plain_file:
while True:
content = zip_file.read(1024)
if content != b'':
plain_file.write(content)
else:
break
|
{
"content_hash": "479a69b33a289e76062eedc0dcf34717",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 89,
"avg_line_length": 25.469135802469136,
"alnum_prop": 0.5419292292777509,
"repo_name": "catonlyonce/Py-uncompress",
"id": "92b3355a5044ae59efd3bcf1aef38c6cf814142f",
"size": "2669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uncompress.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "64543"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._available_ground_stations_operations import build_get_request, build_list_by_capability_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailableGroundStationsOperations:
"""AvailableGroundStationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.orbital.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_capability(
self,
capability: Union[str, "_models.CapabilityParameter"] = "EarthObservation",
**kwargs: Any
) -> AsyncIterable["_models.AvailableGroundStationListResult"]:
"""Returns list of available ground stations.
:param capability: Ground Station Capability. Default value is "EarthObservation".
:type capability: str or ~azure.mgmt.orbital.models.CapabilityParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableGroundStationListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.orbital.models.AvailableGroundStationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-03-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableGroundStationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_capability_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
capability=capability,
template_url=self.list_by_capability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_capability_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
capability=capability,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailableGroundStationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_capability.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Orbital/availableGroundStations"} # type: ignore
@distributed_trace_async
async def get(
self,
ground_station_name: str,
**kwargs: Any
) -> "_models.AvailableGroundStation":
"""Gets the specified available ground station.
:param ground_station_name: Ground Station name.
:type ground_station_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailableGroundStation, or the result of cls(response)
:rtype: ~azure.mgmt.orbital.models.AvailableGroundStation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableGroundStation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-03-01") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
ground_station_name=ground_station_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailableGroundStation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Orbital/availableGroundStations/{groundStationName}"} # type: ignore
|
{
"content_hash": "7235bfbe414482932f5f4c574291946a",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 149,
"avg_line_length": 43.1,
"alnum_prop": 0.6549747509212501,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d770f914a8e9fd6ed65853347eba4dc7853f770f",
"size": "7827",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/orbital/azure-mgmt-orbital/azure/mgmt/orbital/aio/operations/_available_ground_stations_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = "ucldc-iiif",
version = "0.0.1",
description = ("International Image Interoperability Framework (IIIF) implementation code for the UCLDC project"),
long_description=read('README.md'),
author='Barbara Hui',
author_email='barbara.hui@ucop.edu',
dependency_links=[
'https://github.com/ucldc/pynux/archive/master.zip#egg=pynux',
'https://github.com/barbarahui/nuxeo-calisphere/archive/master.zip#egg=UCLDC-Deep-Harvester'
],
install_requires=[
'boto==2.49.0',
'pynux',
'python-magic==0.4.15',
'UCLDC-Deep-Harvester'
],
packages=['ucldc_iiif'],
test_suite='tests'
)
|
{
"content_hash": "070177b14afd18f66a3481f1e9467ee5",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 118,
"avg_line_length": 31.517241379310345,
"alnum_prop": 0.6575492341356673,
"repo_name": "barbarahui/ucldc-iiif",
"id": "2f0f88663011d4eebb3d274f9fc9324eba5fd874",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1894"
},
{
"name": "Python",
"bytes": "21477"
},
{
"name": "Shell",
"bytes": "5070"
}
],
"symlink_target": ""
}
|
import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs', sources=['sparsefuncs.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("random",
sources=["random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
{
"content_hash": "b57df42adfe8a7629c3c83d0915b98ce",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 70,
"avg_line_length": 34.64473684210526,
"alnum_prop": 0.5074060007595899,
"repo_name": "JT5D/scikit-learn",
"id": "e617b95e1aa135436c883d4a7488b60b4273bc1e",
"size": "2633",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/utils/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class Ranking(models.Model):
nth = models.PositiveSmallIntegerField()
nyear = models.PositiveSmallIntegerField()
rank = models.PositiveSmallIntegerField()
previous_rank = models.PositiveSmallIntegerField(null=True)
first_appearance = models.PositiveSmallIntegerField(null=True)
first_rank = models.PositiveSmallIntegerField(null=True)
name = models.CharField(max_length=64, null=True, blank=True)
computer = models.CharField(max_length=256)
site = models.CharField(max_length=64)
manufacturer = models.CharField(max_length=32)
reference = models.CharField(max_length=8)
city = models.CharField(max_length=32, null=True, blank=True)
year = models.CharField(max_length=32)
segment = models.CharField(max_length=32)
total_cores = models.IntegerField()
accelerator_cores = models.IntegerField(null=True)
rmax = models.FloatField()
rpeak = models.FloatField()
efficiency = models.FloatField()
nmax = models.IntegerField(null=True)
nhalf = models.IntegerField(null=True)
power = models.FloatField(null=True)
mflops_watt = models.FloatField(null=True)
architecture = models.CharField(max_length=32, null=True, blank=True)
processor = models.CharField(max_length=32, null=True, blank=True)
processor_technology = models.CharField(max_length=32, null=True, blank=True)
processor_speed = models.PositiveSmallIntegerField(null=True)
operating_system = models.CharField(max_length=32, null=True, blank=True)
os_family = models.CharField(max_length=12, null=True, blank=True)
accelerator = models.CharField(max_length=32, null=True, blank=True)
cores_per_socket = models.PositiveSmallIntegerField(null=True)
processor_generation = models.CharField(max_length=32, null=True, blank=True)
system_model = models.CharField(max_length=32, null=True, blank=True)
system_family = models.CharField(max_length=32, null=True, blank=True)
interconnect_family = models.CharField(max_length=32, null=True, blank=True)
interconnect = models.CharField(max_length=32, null=True, blank=True)
region = models.CharField(max_length=32, null=True, blank=True)
def __unicode__(self):
return self.computer
|
{
"content_hash": "34d07d2758f5543ff872ae27541eb352",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 48.22222222222222,
"alnum_prop": 0.7769585253456222,
"repo_name": "hustyuantao/hpc",
"id": "f7a35b4288351eb78fc99017dde237231893a8a7",
"size": "2170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "top100/models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70567"
},
{
"name": "JavaScript",
"bytes": "19761"
},
{
"name": "Python",
"bytes": "16576"
}
],
"symlink_target": ""
}
|
import argparse
import csv
from tables import parameters
from .. import pairs
from ..db import FragmentsDb
from ..frozen import FrozenSimilarityMatrix
from ..hdf5 import SimilarityMatrix
def make_similarities_parser(subparsers):
"""Creates a parser for similarities sub commands
Args:
subparsers (argparse.ArgumentParser): Parser to which to add sub commands to
"""
sc = subparsers.add_parser('similarities', help='Similarity matrix').add_subparsers()
similar_sc(sc)
merge_pairs_sc(sc)
simmatrix_export_sc(sc)
simmatrix_import_sc(sc)
simmatrix_filter_sc(sc)
similarity_freeze_sc(sc)
similarity_thaw_sc(sc)
fpneigh2tsv_sc(sc)
histogram_sc(sc)
def similar_sc(subparsers):
sc_help = 'Find the fragments closets to query based on similarity matrix'
sc = subparsers.add_parser('similar', help=sc_help)
sc.add_argument('pairsdbfn', type=str, help='hdf5 similarity matrix file or base url of kripodb webservice')
sc.add_argument('query', type=str, help='Query fragment identifier')
sc.add_argument('--out', type=argparse.FileType('w'), default='-',
help='Output file tab delimited (query, hit, similarity score)')
sc.add_argument('--cutoff',
type=float,
default=0.55,
help='Similarity cutoff (default: %(default)s)')
sc.set_defaults(func=pairs.similar_run)
def merge_pairs_sc(subparsers):
sc = subparsers.add_parser('merge', help='Combine pairs files into a new file')
sc.add_argument('ins', help='Input pair file in hdf5_compact format', nargs='+')
sc.add_argument('out', help='Output pair file in hdf5_compact format')
sc.set_defaults(func=pairs.merge)
def simmatrix_export_sc(subparsers):
sc = subparsers.add_parser('export', help='Export similarity matrix to tab delimited file')
sc.add_argument('simmatrixfn', type=str, help='Compact hdf5 similarity matrix filename')
sc.add_argument('outputfile', type=argparse.FileType('w'),
help='Tab delimited output file, use - for stdout')
sc.add_argument('--no_header', action='store_true', help='Output no header (default: %(default)s)')
sc.add_argument('--frag1', action='store_true', help='Only output *frag1 fragments (default: %(default)s)')
pdbhelp = 'Only output fragments which are from pdb code in file, one pdb code per line (default: %(default)s)'
sc.add_argument('--pdb', type=argparse.FileType('r'), help=pdbhelp)
sc.set_defaults(func=simmatrix_export_run)
def load_pdb_filter_file(pdbs_file):
pdbs = set()
for line in pdbs_file:
pdbs.add(line.strip().lower())
return pdbs
def pdb_filter(rows, pdbs):
for row in rows:
if row[0][:4] in pdbs and row[1][:4] in pdbs:
yield row
def frag1_filter(rows):
for row in rows:
if row[0].endswith('frag1') and row[1].endswith('frag1'):
yield row
def simmatrix_export_run(simmatrixfn, outputfile, no_header, frag1, pdb):
"""Export similarity matrix to tab delimited file
Args:
simmatrixfn (str): (Compact) hdf5 similarity matrix filename
outputfile (file): Tab delimited output file
no_header (bool): Output no header
frag1 (bool): Only output \*frag1
pdb (str): Filename with pdb codes inside
"""
simmatrix = pairs.open_similarity_matrix(simmatrixfn)
if pdb:
pdbs = load_pdb_filter_file(pdb)
else:
pdbs = None
writer = csv.writer(outputfile, delimiter="\t", lineterminator='\n')
with_header = not no_header
if with_header:
writer.writerow(['frag_id1', 'frag_id2', 'score'])
if frag1 and pdb:
writer.writerows(pdb_filter(frag1_filter(simmatrix), pdbs))
elif frag1:
writer.writerows(frag1_filter(simmatrix))
elif pdb:
writer.writerows(pdb_filter(simmatrix, pdbs))
else:
writer.writerows(simmatrix)
simmatrix.close()
def simmatrix_import_sc(subparsers):
sc = subparsers.add_parser('import',
help='Import similarity matrix from tab delimited file',
description='''When input has been split into chunks,
use `--ignore_upper_triangle` flag for similarities between same chunk.
This prevents storing pair a->b also as b->a.''')
sc.add_argument('inputfile', type=argparse.FileType('r'),
help='Input file, use - for stdin')
sc.add_argument('fragmentsdb',
default='fragments.db',
help='Name of fragments db file (default: %(default)s)')
sc.add_argument('simmatrixfn', type=str, help='Compact hdf5 similarity matrix file, will overwrite file if it exists')
sc.add_argument('--inputformat',
choices=['tsv', 'fpneigh'],
default='fpneigh',
help='tab delimited (tsv) or fpneigh formatted input (default: %(default)s)')
# Have to ask, because inputfile can be stdin so can't do 2 passes through file
sc.add_argument('--nrrows',
type=int,
default=2**16,
help='Number of rows in inputfile (default: %(default)s)')
sc.add_argument('--ignore_upper_triangle',
action='store_true',
help='Ignore upper triangle (default: %(default)s)')
sc.set_defaults(func=simmatrix_import_run)
def simmatrix_import_run(inputfile, fragmentsdb, simmatrixfn, inputformat, nrrows, ignore_upper_triangle=False):
if inputformat == 'tsv':
simmatrix_import_tsv(inputfile, fragmentsdb, simmatrixfn, nrrows, ignore_upper_triangle)
elif inputformat == 'fpneigh':
simmatrix_importfpneigh_run(inputfile, fragmentsdb, simmatrixfn, nrrows, ignore_upper_triangle)
def simmatrix_import_tsv(inputfile, fragmentsdb, simmatrixfn, nrrows, ignore_upper_triangle=False):
frags = FragmentsDb(fragmentsdb)
label2id = frags.label2id().materialize()
simmatrix = SimilarityMatrix(simmatrixfn, 'w',
expectedlabelrows=len(label2id),
expectedpairrows=nrrows)
reader = csv.reader(inputfile, delimiter="\t")
# ignore header
next(reader)
# simmatrix wants score as float instead of str
def csv_iter(rows):
for row in rows:
if row[0] == row[1]:
continue
if ignore_upper_triangle and row[0] > row[1]:
continue
row[2] = float(row[2])
yield row
simmatrix.update(csv_iter(reader), label2id)
simmatrix.close()
def simmatrix_importfpneigh_run(inputfile, fragmentsdb, simmatrixfn, nrrows, ignore_upper_triangle=False):
frags = FragmentsDb(fragmentsdb)
label2id = frags.label2id().materialize()
simmatrix = SimilarityMatrix(simmatrixfn, 'w',
expectedlabelrows=len(label2id),
expectedpairrows=nrrows)
simmatrix.update(read_fpneighpairs_file(inputfile, ignore_upper_triangle), label2id)
simmatrix.close()
def simmatrix_filter_sc(subparsers):
sc = subparsers.add_parser('filter', help='Filter similarity matrix')
sc.add_argument('input', type=str,
help='Input hdf5 similarity matrix file')
sc.add_argument('output', type=str,
help='Output hdf5 similarity matrix file, will overwrite file if it exists')
group = sc.add_mutually_exclusive_group()
group.add_argument('--fragmentsdb',
help='Name of fragments db file, '
'fragments in it will be kept as well as their pair counter parts.')
group.add_argument('--skip', type=argparse.FileType('r'), help='File with fragment identifiers on each line to skip')
sc.set_defaults(func=simmatrix_filter)
def simmatrix_filter(input, output, fragmentsdb, skip):
simmatrix_in = SimilarityMatrix(input)
if fragmentsdb:
frags = FragmentsDb(fragmentsdb)
expectedlabelrows = len(frags)
labelsin = len(simmatrix_in.labels)
expectedpairrows = int(len(simmatrix_in.pairs) * (float(expectedlabelrows) / labelsin))
simmatrix_out = SimilarityMatrix(output,
'w',
expectedlabelrows=expectedlabelrows,
expectedpairrows=expectedpairrows,
)
frag_labels2keep = set(frags.id2label().values())
simmatrix_in.keep(simmatrix_out, frag_labels2keep)
if skip:
labels2skip = set()
for line in skip:
labels2skip.add(line.strip())
labelsin = len(simmatrix_in.labels)
expectedlabelrows = labelsin - len(labels2skip)
expectedpairrows = int(len(simmatrix_in.pairs) * (float(expectedlabelrows) / labelsin))
simmatrix_out = SimilarityMatrix(output,
'w',
expectedlabelrows=expectedlabelrows,
expectedpairrows=expectedpairrows,
)
simmatrix_in.skip(simmatrix_out, labels2skip)
simmatrix_in.close()
simmatrix_out.close()
def similarity_freeze_sc(subparsers):
sc = subparsers.add_parser('freeze', help='Optimize similarity matrix for reading')
sc.add_argument('in_fn', type=str, help='Input pairs file')
sc.add_argument('out_fn', type=str, help='Output array file, file is overwritten')
sc.add_argument('-f', '--frame_size', type=int, default=10**8, help='Size of frame (default: %(default)s)')
sc.add_argument('-m', '--memory', type=int, default=1, help='Memory cache in Gigabytes (default: %(default)s)')
sc.add_argument('-l', '--limit', type=int, help='Number of pairs to copy, None for no limit (default: %(default)s)')
sc.add_argument('-s', '--single_sided', action='store_true', help='Store half matrix (default: %(default)s)')
sc.set_defaults(func=similarity_freeze_run)
def similarity_freeze_run(in_fn, out_fn, frame_size, memory, limit, single_sided):
dm = SimilarityMatrix(in_fn, 'r')
parameters.CHUNK_CACHE_SIZE = memory * 1024 ** 3
parameters.CHUNK_CACHE_NELMTS = 2 ** 14
dfm = FrozenSimilarityMatrix(out_fn, 'w')
dfm.from_pairs(dm, frame_size, limit, single_sided)
dm.close()
dfm.close()
def similarity_thaw_sc(subparsers):
sc = subparsers.add_parser('thaw', help='Optimize similarity matrix for writing')
sc.add_argument('in_fn', type=str, help='Input packed frozen matrix file')
sc.add_argument('out_fn', type=str, help='Output pairs file, file is overwritten')
sc.add_argument('--nonzero_fraction',
type=float,
default=0.012,
help='Fraction of pairs which have score above threshold (default: %(default)s)')
sc.set_defaults(func=similarity_thaw_run)
def similarity_thaw_run(in_fn, out_fn, nonzero_fraction):
fsm = FrozenSimilarityMatrix(in_fn, 'r')
nr_scores = int(fsm.scores.shape[0] * fsm.scores.shape[1] * nonzero_fraction)
nr_labels = fsm.labels.shape[0]
sm = SimilarityMatrix(out_fn, 'w', expectedpairrows=nr_scores, expectedlabelrows=nr_labels)
fsm.to_pairs(sm)
sm.close()
fsm.close()
def read_fpneighpairs_file(inputfile, ignore_upper_triangle=False):
"""Read fpneigh formatted similarity matrix file.
Args:
inputfile (File): File object to read
ignore_upper_triangle (bool): Ignore upper triangle of input
Yields:
Tuple((Str,Str,Float)): List of (query fragment identifier, hit fragment identifier, similarity score)
"""
current_query = None
reader = csv.reader(inputfile, delimiter=' ', skipinitialspace=True)
for row in reader:
if len(row) == 2 and current_query != row[0]:
if ignore_upper_triangle and current_query > row[0]:
continue
yield (current_query, row[0], float(row[1]))
elif len(row) == 4:
current_query = row[3][:-1]
def fpneigh2tsv_sc(subparsers):
sc = subparsers.add_parser('fpneigh2tsv', help='Convert fpneigh formatted file to tab delimited file')
sc.add_argument('inputfile', type=argparse.FileType('r'),
help='Input file, use - for stdin')
sc.add_argument('outputfile', type=argparse.FileType('w'),
help='Tab delimited output file, use - for stdout')
sc.set_defaults(func=fpneigh2tsv_run)
def fpneigh2tsv_run(inputfile, outputfile):
reader = read_fpneighpairs_file(inputfile)
writer = csv.writer(outputfile, delimiter="\t", lineterminator='\n')
writer.writerow(['frag_id1', 'frag_id2', 'score'])
writer.writerows(reader)
def histogram_sc(subparsers):
sc = subparsers.add_parser('histogram', help='Distribution of similarity scores')
sc.add_argument('inputfile', type=str, help='Filename of similarity matrix hdf5 file')
sc.add_argument('outputfile', type=argparse.FileType('w'),
help='Tab delimited output file, use - for stdout')
sc.add_argument('-f', '--frame_size', type=int, default=10**8, help='Size of frame (default: %(default)s)')
sc.add_argument('-r', '--raw_score',
action='store_true',
help='Return raw score (16 bit integer) instead of fraction score')
sc.add_argument('-l', '--lower_triangle',
action='store_true',
help='Return scores from lower triangle else return scores from upper triangle')
sc.set_defaults(func=histogram)
def histogram(inputfile, outputfile, frame_size, raw_score, lower_triangle):
matrix = pairs.open_similarity_matrix(inputfile)
counts = matrix.count(frame_size=frame_size, raw_score=raw_score, lower_triangle=lower_triangle)
writer = csv.writer(outputfile, delimiter="\t", lineterminator='\n')
writer.writerow(['score', 'count'])
writer.writerows(counts)
matrix.close()
|
{
"content_hash": "d7701cbdfdac94f83def9756e39c004d",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 122,
"avg_line_length": 42.205357142857146,
"alnum_prop": 0.6321133911571821,
"repo_name": "3D-e-Chem/python-modified-tanimoto",
"id": "36764815fa60854d25f5cf6f5acb2f74c7f100fc",
"size": "14181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kripodb/script/similarities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "107247"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('achat', '0006_achat_taxable'),
]
operations = [
migrations.AlterModelOptions(
name='achat',
options={'ordering': ['-date', '-pk']},
),
]
|
{
"content_hash": "bc9fb884719bf596b172b449afb1fbf8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 51,
"avg_line_length": 20.294117647058822,
"alnum_prop": 0.5739130434782609,
"repo_name": "kilisimba/gluten-free-tax",
"id": "434626f2e5c596991b8f59107c5eb47c4736e936",
"size": "369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "achat/migrations/0007_auto_20150415_2258.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1802"
},
{
"name": "HTML",
"bytes": "7222"
},
{
"name": "JavaScript",
"bytes": "4022"
},
{
"name": "Python",
"bytes": "82074"
}
],
"symlink_target": ""
}
|
"""
This package contains all the tests of the nstl.sema subpackage.
"""
__all__ = ['test_scope']
if __name__ == "__main__":
pass
|
{
"content_hash": "cbc646b3e12ae8bec809038ab353c7e9",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 64,
"avg_line_length": 15.222222222222221,
"alnum_prop": 0.5766423357664233,
"repo_name": "ldionne/nstl-lang",
"id": "74a89645f9a9c82ca2f94dd32db8bd379ab776d6",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/sema/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "61512"
},
{
"name": "Python",
"bytes": "282833"
}
],
"symlink_target": ""
}
|
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.9.2"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import urllib
import base64
import os
import copy
import calendar
import time
import random
import errno
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except (ImportError, AttributeError):
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = [
'Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation',
'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError',
'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 1
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
try:
# Users can optionally provide a module that tells us where the CA_CERTS
# are located.
import ca_certs_locater
CA_CERTS = ca_certs_locater.get()
except ImportError:
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filesha1 = _sha(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filesha1))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _sha("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (
self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'])
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(self, proxy_type, proxy_host, proxy_port,
proxy_rdns=True, proxy_user=None, proxy_pass=None):
"""
Args:
proxy_type: The type of proxy server. This must be set to one of
socks.PROXY_TYPE_XXX constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
proxy_host='localhost', proxy_port=8000)
proxy_host: The hostname or IP address of the proxy server.
proxy_port: The port that the proxy server is running on.
proxy_rdns: If True (default), DNS queries will not be performed
locally, and instead, handed to the proxy to resolve. This is useful
if the network does not allow resolution of non-local names. In
httplib2 0.9 and earlier, this defaulted to False.
proxy_user: The username used to authenticate with the proxy server.
proxy_pass: The password used to authenticate with the proxy server.
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port,
self.proxy_rdns, self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
bypass = False
for domain in self.bypass_hosts:
if hostname.endswith(domain):
bypass = True
return bypass
def proxy_info_from_environment(method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ['http', 'https']:
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
pi = proxy_info_from_url(url, method)
no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
bypass_hosts = []
if no_proxy:
bypass_hosts = no_proxy.split(',')
# special case, no_proxy=* means all hosts bypassed
if no_proxy == '*':
bypass_hosts = AllHosts
pi.bypass_hosts = bypass_hosts
return pi
def proxy_info_from_url(url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return ProxyInfo(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s) ************" % (self.host, self.port)
if use_proxy:
print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
self.sock.connect((self.host, self.port) + sa[2:])
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
for family, socktype, proto, canonname, sockaddr in address_info:
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
except (ImportError, AttributeError):
from google3.apphosting.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google3.apphosting.api.urlfetch import fetch
from google3.apphosting.api.urlfetch import InvalidURLError
def _new_fixed_fetch(validate_certificate):
def fixed_fetch(url, payload=None, method="GET", headers={},
allow_truncated=False, follow_redirects=True,
deadline=None):
if deadline is None:
deadline = socket.getdefaulttimeout() or 5
return fetch(url, payload=payload, method=method, headers=headers,
allow_truncated=allow_truncated,
follow_redirects=follow_redirects, deadline=deadline,
validate_certificate=validate_certificate)
return fixed_fetch
class AppEngineHttpConnection(httplib.HTTPConnection):
"""Use httplib on App Engine, but compensate for its weirdness.
The parameters key_file, cert_file, proxy_info, ca_certs, and
disable_ssl_certificate_validation are all dropped on the ground.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPConnection.__init__(self, host, port=port,
strict=strict, timeout=timeout)
class AppEngineHttpsConnection(httplib.HTTPSConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict,
timeout=timeout)
self._fetch = _new_fixed_fetch(
not disable_ssl_certificate_validation)
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except (ImportError, AttributeError):
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=proxy_info_from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
proxy_nfo_from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
# In case request is augmented by some foreign object such as
# credentials which handle auth
if 'request' in state_dict:
del state_dict['request']
if 'connections' in state_dict:
del state_dict['connections']
return state_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.connections = {}
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
i = 0
seen_bad_status_line = False
while i < RETRIES:
i += 1
try:
if hasattr(conn, 'sock') and conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES:
continue # retry on potentially transient socket errors
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if hasattr(conn, 'sock') and conn.sock is None:
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES-1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except httplib.BadStatusLine:
# If we get a BadStatusLine on the first try then that means
# the connection just went stale, so retry regardless of the
# number of RETRIES set.
if not seen_bad_status_line and i == 1:
i = 0
seen_bad_status_line = True
conn.close()
conn.connect()
continue
else:
conn.close()
raise
except (socket.error, httplib.HTTPException):
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if 'authorization' in headers and not self.forward_authorization_headers:
del headers['authorization']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
if 'content-length' in headers:
del headers['content-length']
redirect_method = "GET"
body = None
(response, content) = self.request(
location, method=redirect_method,
body=body, headers=headers,
redirections=redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin with either
'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
etc. There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a
string object.
Any extra headers that are to be sent with the request should be
provided in the 'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == 'https':
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri.encode('utf-8')
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(
info['-x-permanent-redirect-url'], method='GET',
headers=headers, redirections=redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response({
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response({
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if (hasattr(proxy_info, 'applies_to')
and not proxy_info.applies_to(hostname)):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
self.reason = self.get('reason', self.reason)
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
|
{
"content_hash": "b10208b5e19734d9576a0cd330d29729",
"timestamp": "",
"source": "github",
"line_count": 1698,
"max_line_length": 235,
"avg_line_length": 41.957597173144876,
"alnum_prop": 0.5764836337095054,
"repo_name": "SWENG500-Team1/FitnessForSplunk",
"id": "7b9aa8426a121f049adf40412b5834b0dcf249ac",
"size": "71244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fitness_for_splunk/bin/httplib2/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "141"
},
{
"name": "CSS",
"bytes": "1022"
},
{
"name": "Go",
"bytes": "42675"
},
{
"name": "HTML",
"bytes": "31344"
},
{
"name": "JavaScript",
"bytes": "140687"
},
{
"name": "Python",
"bytes": "1570400"
},
{
"name": "Shell",
"bytes": "237"
}
],
"symlink_target": ""
}
|
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import warnings
import numpy as np
import pandas._libs.index as libindex
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import is_integer, is_scalar
from pandas.core.dtypes.generic import ABCSeries, ABCSparseSeries
from pandas.core.dtypes.missing import isna, notna
from pandas.core import generic
from pandas.core.arrays import SparseArray
from pandas.core.arrays.sparse import SparseAccessor
from pandas.core.index import Index
from pandas.core.internals import SingleBlockManager
import pandas.core.ops as ops
from pandas.core.series import Series
from pandas.core.sparse.scipy_sparse import (
_coo_to_sparse_series, _sparse_series_to_coo)
_shared_doc_kwargs = dict(axes='index', klass='SparseSeries',
axes_single_arg="{0, 'index'}",
optional_labels='', optional_axis='')
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# TODO: Most of this should be refactored and shared with Series
# 1. BlockManager -> array
# 2. Series.index, Series.name, index, name reconciliation
# 3. Implicit reindexing
# 4. Implicit broadcasting
# 5. Dict construction
if data is None:
data = []
elif isinstance(data, SingleBlockManager):
index = data.index
data = data.blocks[0].values
elif isinstance(data, (ABCSeries, ABCSparseSeries)):
index = data.index if index is None else index
dtype = data.dtype if dtype is None else dtype
name = data.name if name is None else name
if index is not None:
data = data.reindex(index)
elif isinstance(data, compat.Mapping):
data, index = Series()._init_dict(data, index=index)
elif is_scalar(data) and index is not None:
data = np.full(len(index), fill_value=data)
super(SparseSeries, self).__init__(
SparseArray(data,
sparse_index=sparse_index,
kind=kind,
dtype=dtype,
fill_value=fill_value,
copy=copy),
index=index, name=name,
copy=False, fastpath=fastpath
)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# avoid infinite recursion for other SparseSeries inputs
inputs = tuple(
x.values if isinstance(x, type(self)) else x
for x in inputs
)
result = self.values.__array_ufunc__(ufunc, method, *inputs, **kwargs)
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=result.fill_value,
copy=False).__finalize__(self)
def __array_wrap__(self, result, context=None):
"""
Gets called prior to a ufunc (and after)
See SparseArray.__array_wrap__ for detail.
"""
result = self.values.__array_wrap__(result, context=context)
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=result.fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
# unary ops
# TODO: See if this can be shared
def __pos__(self):
result = self.values.__pos__()
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=result.fill_value,
copy=False).__finalize__(self)
def __neg__(self):
result = self.values.__neg__()
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=result.fill_value,
copy=False).__finalize__(self)
def __invert__(self):
result = self.values.__invert__()
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=result.fill_value,
copy=False).__finalize__(self)
@property
def block(self):
warnings.warn("SparseSeries.block is deprecated.", FutureWarning,
stacklevel=2)
return self._data._block
@property
def fill_value(self):
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
self.values.fill_value = v
@property
def sp_index(self):
return self.values.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.values.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""Construct SparseSeries from array.
.. deprecated:: 0.23.0
Use the pd.SparseSeries(..) constructor instead.
"""
warnings.warn("'from_array' is deprecated and will be removed in a "
"future version. Please use the pd.SparseSeries(..) "
"constructor instead.", FutureWarning, stacklevel=2)
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def _constructor_expanddim(self):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '{series}\n{index!r}'.format(series=series_rep,
index=self.sp_index)
return rep
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis)
else:
return self._get_val_at(i)
def _get_val_at(self, loc):
""" forward to the array """
return self.values._get_val_at(loc)
def __getitem__(self, key):
# TODO: Document difference from Series.__getitem__, deprecate,
# and remove!
if is_integer(key) and key not in self.index:
return self._get_val_at(key)
else:
return super(SparseSeries, self).__getitem__(key)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self[indexer]
def _set_with_engine(self, key, value):
return self._set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: same type as caller
"""
return self._constructor(np.abs(self.values),
index=self.index).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable)
def _get_value(self, label, takeable=False):
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
_get_value.__doc__ = get_value.__doc__
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(label, value, takeable=takeable)
def _set_value(self, label, value, takeable=False):
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibly change the index
new_values = values._set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
_set_value.__doc__ = set_value.__doc__
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = libindex.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self):
"""
Convert SparseSeries to a Series.
Returns
-------
s : Series
"""
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
return self.values.density
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
# TODO: https://github.com/pandas-dev/pandas/issues/22314
# We skip the block manager till that is resolved.
new_data = self.values.copy(deep=deep)
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value,
index=self.index.copy(),
name=self.name).__finalize__(self)
@Substitution(**_shared_doc_kwargs)
@Appender(generic.NDFrame.reindex.__doc__)
def reindex(self, index=None, method=None, copy=True, limit=None,
**kwargs):
# TODO: remove?
return super(SparseSeries, self).reindex(index=index, method=method,
copy=copy, limit=limit,
**kwargs)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError("new index must be a SparseIndex")
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
values = SparseArray(values,
sparse_index=new_index,
fill_value=self.values.fill_value)
return self._constructor(values, index=self.index).__finalize__(self)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseSeries will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : {0}
Returns
-------
cumsum : SparseSeries
"""
nv.validate_cumsum(args, kwargs)
# Validate axis
if axis is not None:
self._get_axis_number(axis)
new_array = self.values.cumsum()
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
# TODO: SparseSeries.isna is Sparse, while Series.isna is dense
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
arr = SparseArray(isna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=isna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
isnull = isna
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
arr = SparseArray(notna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=notna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
notnull = notna
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
"""
# TODO: make more efficient
# Validate axis
self._get_axis_number(axis or 0)
dense_valid = self.to_dense().dropna()
if inplace:
raise NotImplementedError("Cannot perform inplace dropna"
" operations on a SparseSeries")
if isna(self.fill_value):
return dense_valid
else:
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
@Appender(SparseAccessor.to_coo.__doc__)
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
A, rows, columns = _sparse_series_to_coo(self, row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
@classmethod
@Appender(SparseAccessor.from_coo.__doc__)
def from_coo(cls, A, dense_index=False):
return _coo_to_sparse_series(A, dense_index=dense_index)
# overwrite series methods with unaccelerated Sparse-specific versions
ops.add_flex_arithmetic_methods(SparseSeries)
ops.add_special_arithmetic_methods(SparseSeries)
|
{
"content_hash": "fd0f982e56c8ff031604fbc98175ddc3",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 79,
"avg_line_length": 34.65878378378378,
"alnum_prop": 0.5670630665756896,
"repo_name": "GuessWhoSamFoo/pandas",
"id": "db4d3e876dec57fd24f346a9740b545a2cd911d1",
"size": "20518",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/core/sparse/series.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "556"
},
{
"name": "Python",
"bytes": "14926624"
},
{
"name": "Shell",
"bytes": "29351"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
AF_CHOICES = (
(4, 'IPv4'),
(6, 'IPv6'),
)
# Prefix statuses
PREFIX_STATUS_CONTAINER = 0
PREFIX_STATUS_ACTIVE = 1
PREFIX_STATUS_RESERVED = 2
PREFIX_STATUS_DEPRECATED = 3
PREFIX_STATUS_CHOICES = (
(PREFIX_STATUS_CONTAINER, 'Container'),
(PREFIX_STATUS_ACTIVE, 'Active'),
(PREFIX_STATUS_RESERVED, 'Reserved'),
(PREFIX_STATUS_DEPRECATED, 'Deprecated')
)
# IP address statuses
IPADDRESS_STATUS_ACTIVE = 1
IPADDRESS_STATUS_RESERVED = 2
IPADDRESS_STATUS_DEPRECATED = 3
IPADDRESS_STATUS_DHCP = 5
IPADDRESS_STATUS_CHOICES = (
(IPADDRESS_STATUS_ACTIVE, 'Active'),
(IPADDRESS_STATUS_RESERVED, 'Reserved'),
(IPADDRESS_STATUS_DEPRECATED, 'Deprecated'),
(IPADDRESS_STATUS_DHCP, 'DHCP')
)
# IP address roles
IPADDRESS_ROLE_LOOPBACK = 10
IPADDRESS_ROLE_SECONDARY = 20
IPADDRESS_ROLE_ANYCAST = 30
IPADDRESS_ROLE_VIP = 40
IPADDRESS_ROLE_VRRP = 41
IPADDRESS_ROLE_HSRP = 42
IPADDRESS_ROLE_GLBP = 43
IPADDRESS_ROLE_CARP = 44
IPADDRESS_ROLE_CHOICES = (
(IPADDRESS_ROLE_LOOPBACK, 'Loopback'),
(IPADDRESS_ROLE_SECONDARY, 'Secondary'),
(IPADDRESS_ROLE_ANYCAST, 'Anycast'),
(IPADDRESS_ROLE_VIP, 'VIP'),
(IPADDRESS_ROLE_VRRP, 'VRRP'),
(IPADDRESS_ROLE_HSRP, 'HSRP'),
(IPADDRESS_ROLE_GLBP, 'GLBP'),
(IPADDRESS_ROLE_CARP, 'CARP'),
)
IPADDRESS_ROLES_NONUNIQUE = (
# IPAddress roles which are exempt from unique address enforcement
IPADDRESS_ROLE_ANYCAST,
IPADDRESS_ROLE_VIP,
IPADDRESS_ROLE_VRRP,
IPADDRESS_ROLE_HSRP,
IPADDRESS_ROLE_GLBP,
IPADDRESS_ROLE_CARP,
)
# VLAN statuses
VLAN_STATUS_ACTIVE = 1
VLAN_STATUS_RESERVED = 2
VLAN_STATUS_DEPRECATED = 3
VLAN_STATUS_CHOICES = (
(VLAN_STATUS_ACTIVE, 'Active'),
(VLAN_STATUS_RESERVED, 'Reserved'),
(VLAN_STATUS_DEPRECATED, 'Deprecated')
)
# Bootstrap CSS classes
STATUS_CHOICE_CLASSES = {
0: 'default',
1: 'primary',
2: 'info',
3: 'danger',
4: 'warning',
5: 'success',
}
ROLE_CHOICE_CLASSES = {
10: 'default',
20: 'primary',
30: 'warning',
40: 'success',
41: 'success',
42: 'success',
43: 'success',
44: 'success',
}
# IP protocols (for services)
IP_PROTOCOL_TCP = 6
IP_PROTOCOL_UDP = 17
IP_PROTOCOL_CHOICES = (
(IP_PROTOCOL_TCP, 'TCP'),
(IP_PROTOCOL_UDP, 'UDP'),
)
|
{
"content_hash": "07ae59dfcd3f55443fda4b9dfc3bf69c",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 70,
"avg_line_length": 23.59375,
"alnum_prop": 0.6635761589403973,
"repo_name": "lampwins/netbox",
"id": "eeb17eddd4b8d25441280ef6e5833de4f52bc492",
"size": "2288",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/ipam/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815169"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
}
|
import re
import unicodedata
from django.template.defaultfilters import stringfilter, register
from django.utils import six
from django.utils.functional import allow_lazy
from django.utils.safestring import mark_safe
def _slugify(value):
"""
django.utils.text.slugify that allows to use dot(.) in a slug field.
This method is a copy of django.utils.text.slugify.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\.^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
_slugify = allow_lazy(_slugify, six.text_type)
@register.filter(is_safe=True)
@stringfilter
def slugify_allow_dots(value):
return _slugify(value)
|
{
"content_hash": "3226ae53e938c9d5c786db55bb77d1d6",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 90,
"avg_line_length": 29.68,
"alnum_prop": 0.7008086253369272,
"repo_name": "stasiek/rfdocs",
"id": "912a832ebdfa98dcd855d68a1c6cd4ac5d95849d",
"size": "789",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rfdocs/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "241041"
},
{
"name": "HTML",
"bytes": "49232"
},
{
"name": "JavaScript",
"bytes": "230219"
},
{
"name": "Python",
"bytes": "154559"
}
],
"symlink_target": ""
}
|
import hashlib
from social_core.exceptions import AuthAlreadyAssociated, AuthException
def auto_logout(*args, **kwargs):
"""Do not compare current user with new one"""
return {'user': None}
def check_email_present(backend, uid, user=None, *args, **kwargs):
if not kwargs['details'].get('email'):
raise AuthException(backend, "Email wasn't provided by oauth provider")
def social_user(backend, uid, user=None, *args, **kwargs):
provider = backend.name
social = backend.strategy.storage.user.get_social_auth(provider, uid)
if social:
# can happen when user has multiple accounts with same email (apply email uniqueness strictly)
if user and social.user != user:
msg = 'This {0} account is already in use.'.format(provider)
raise AuthAlreadyAssociated(backend, msg)
elif not user:
user = social.user
return {'social': social,
'user': user,
'is_new': user is None,
'new_association': social is None}
def save_avatar(strategy, details, user=None, *args, **kwargs):
"""Get user avatar from social provider."""
if user:
backend_name = kwargs['backend'].__class__.__name__.lower()
response = kwargs.get('response', {})
avatar = None
if 'google-oauth2' in backend_name and response.get('image', {}).get('url'):
avatar = response['image']['url'].split('?')[0]
else:
avatar = 'http://www.gravatar.com/avatar/'
avatar += hashlib.md5(user.email.lower().encode('utf8')).hexdigest()
avatar += '?size=100'
if avatar and user.avatar != avatar:
user.avatar = avatar
strategy.storage.user.changed(user)
|
{
"content_hash": "872805a01709eed55e88caf7d292a866",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 102,
"avg_line_length": 35.14,
"alnum_prop": 0.614114968696642,
"repo_name": "crowd-course/scholars",
"id": "35166c5e9887d63bb9da0af6d8e3815f44c27d43",
"size": "1757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scholars/authentication/pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "418417"
},
{
"name": "HTML",
"bytes": "552468"
},
{
"name": "JavaScript",
"bytes": "531650"
},
{
"name": "Python",
"bytes": "222619"
}
],
"symlink_target": ""
}
|
from django.views.generic import TemplateView
from userprofiles.contrib.accountverification.models import AccountVerification
from userprofiles.settings import up_settings
class RegistrationActivateView(TemplateView):
template_name = 'userprofiles/registration_activate.html'
def get_context_data(self, **kwargs):
activation_key = kwargs['activation_key'].lower()
account = AccountVerification.objects.activate_user(activation_key)
return {
'account': account,
'expiration_days': up_settings.ACCOUNT_VERIFICATION_DAYS
}
registration_activate = RegistrationActivateView.as_view()
|
{
"content_hash": "1b9a17c200e3fc7096072d60817610fb",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 34.1578947368421,
"alnum_prop": 0.7411402157164869,
"repo_name": "tripathy/django-userprofiles",
"id": "00dd9fd937c31594ec4961093ffa9b27ffed66b9",
"size": "673",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "userprofiles/contrib/accountverification/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7838"
},
{
"name": "Makefile",
"bytes": "212"
},
{
"name": "Python",
"bytes": "57337"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import get_request_site_address, encode
from frappe.model.document import Document
from six.moves.urllib.parse import quote
from frappe.website.router import resolve_route
from frappe.website.doctype.website_theme.website_theme import add_website_theme
class WebsiteSettings(Document):
def validate(self):
self.validate_top_bar_items()
self.validate_footer_items()
self.validate_home_page()
def validate_home_page(self):
if frappe.flags.in_install:
return
if self.home_page and not resolve_route(self.home_page):
frappe.msgprint(_("Invalid Home Page") + " (Standard pages - index, login, products, blog, about, contact)")
self.home_page = ''
def validate_top_bar_items(self):
"""validate url in top bar items"""
for top_bar_item in self.get("top_bar_items"):
if top_bar_item.parent_label:
parent_label_item = self.get("top_bar_items", {"label": top_bar_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(top_bar_item.parent_label, top_bar_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(top_bar_item.parent_label,
top_bar_item.idx))
def validate_footer_items(self):
"""validate url in top bar items"""
for footer_item in self.get("footer_items"):
if footer_item.parent_label:
parent_label_item = self.get("footer_items", {"label": footer_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(footer_item.parent_label, footer_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(footer_item.parent_label,
footer_item.idx))
def on_update(self):
self.clear_cache()
def clear_cache(self):
# make js and css
# clear web cache (for menus!)
frappe.clear_cache(user = 'Guest')
from frappe.website.render import clear_cache
clear_cache()
# clears role based home pages
frappe.clear_cache()
def get_website_settings():
hooks = frappe.get_hooks()
context = frappe._dict({
'top_bar_items': get_items('top_bar_items'),
'footer_items': get_items('footer_items'),
"post_login": [
{"label": _("My Account"), "url": "/me"},
{"label": _("Logout"), "url": "/?cmd=web_logout"}
]
})
settings = frappe.get_single("Website Settings")
for k in ["banner_html", "brand_html", "copyright", "twitter_share_via",
"facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup", "hide_footer_signup", "head_html", "title_prefix",
"navbar_search"]:
if hasattr(settings, k):
context[k] = settings.get(k)
if settings.address:
context["footer_address"] = settings.address
for k in ["facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup"]:
context[k] = int(context.get(k) or 0)
if frappe.request:
context.url = quote(str(get_request_site_address(full_address=True)), safe="/:")
context.encoded_title = quote(encode(context.title or ""), str(""))
for update_website_context in hooks.update_website_context or []:
frappe.get_attr(update_website_context)(context)
context.web_include_js = hooks.web_include_js or []
context.web_include_css = hooks.web_include_css or []
via_hooks = frappe.get_hooks("website_context")
for key in via_hooks:
context[key] = via_hooks[key]
if key not in ("top_bar_items", "footer_items", "post_login") \
and isinstance(context[key], (list, tuple)):
context[key] = context[key][-1]
add_website_theme(context)
if not context.get("favicon"):
context["favicon"] = "/assets/frappe/images/favicon.png"
if settings.favicon and settings.favicon != "attach_files:":
context["favicon"] = settings.favicon
return context
def get_items(parentfield):
all_top_items = frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield= %s
order by idx asc""", parentfield, as_dict=1)
top_items = all_top_items[:]
# attach child items to top bar
for d in all_top_items:
if d['parent_label']:
for t in top_items:
if t['label']==d['parent_label']:
if not 'child_items' in t:
t['child_items'] = []
t['child_items'].append(d)
break
return top_items
@frappe.whitelist(allow_guest=True)
def is_chat_enabled():
return bool(frappe.db.get_single_value('Website Settings', 'chat_enable'))
|
{
"content_hash": "a387b425ceca40e0e2dc9a804b5766d0",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 111,
"avg_line_length": 32.73426573426573,
"alnum_prop": 0.6851100192266609,
"repo_name": "vjFaLk/frappe",
"id": "64558e751aa416bd3f39403cafa61bf1af971c2e",
"size": "4782",
"binary": false,
"copies": "1",
"ref": "refs/heads/parsimony-production",
"path": "frappe/website/doctype/website_settings/website_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "290337"
},
{
"name": "HTML",
"bytes": "179507"
},
{
"name": "JavaScript",
"bytes": "2179734"
},
{
"name": "Less",
"bytes": "146135"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2774237"
},
{
"name": "SCSS",
"bytes": "15721"
},
{
"name": "Shell",
"bytes": "3875"
},
{
"name": "Vue",
"bytes": "95109"
}
],
"symlink_target": ""
}
|
from ingresso import ControllerIngresso
def IngressoMeia():
cod_sessao = int(input("Código Sessão: "))
ControllerIngresso.IngressoMeia(cod_sessao)
def Ingresso():
cod_sessao = int(input("Código Sessão: "))
ControllerIngresso.Ingresso(cod_sessao)
def ListarIngressoVendidos():
cod_sessao = int(input("Código Sessão: "))
ControllerIngresso.ListarIngressoVendidos(cod_sessao)
def ListarIngresso():
ControllerIngresso.ListarIngresso()
def BuscaIngresso():
cod_ingresso = int(input("Código Ingresso: "))
ControllerIngresso.BuscaIngresso(cod_ingresso)
def RemoverIngresso():
cod_ingresso = int(input("Código Ingresso: "))
ControllerIngresso.RemoverIngresso(cod_ingresso)
def RemoverTodosIngresso():
ControllerIngresso.RemoverTodosIngresso()
|
{
"content_hash": "4f37bc32adb6a4ae1035d12bf682397e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 57,
"avg_line_length": 31.115384615384617,
"alnum_prop": 0.7330037082818294,
"repo_name": "ygorclima/apd",
"id": "15fc0d6860f7026b55efdb90d4d55037c60d8e53",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ingresso/ingresso.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "27319"
}
],
"symlink_target": ""
}
|
"Functions that help with dynamically creating decorators for views."
# For backwards compatibility in Django 2.0.
from contextlib import ContextDecorator # noqa
from functools import WRAPPER_ASSIGNMENTS, update_wrapper, wraps
class classonlymethod(classmethod):
def __get__(self, instance, cls=None):
if instance is not None:
raise AttributeError("This method is available only on the class, not on instances.")
return super().__get__(instance, cls)
def method_decorator(decorator, name=''):
"""
Converts a function decorator into a method decorator
"""
# 'obj' can be a class or a function. If 'obj' is a function at the time it
# is passed to _dec, it will eventually be a method of the class it is
# defined on. If 'obj' is a class, the 'name' is required to be the name
# of the method that will be decorated.
def _dec(obj):
is_class = isinstance(obj, type)
if is_class:
if name and hasattr(obj, name):
func = getattr(obj, name)
if not callable(func):
raise TypeError(
"Cannot decorate '{0}' as it isn't a callable "
"attribute of {1} ({2})".format(name, obj, func)
)
else:
raise ValueError(
"The keyword argument `name` must be the name of a method "
"of the decorated class: {0}. Got '{1}' instead".format(
obj, name,
)
)
else:
func = obj
def decorate(function):
"""
Apply a list/tuple of decorators if decorator is one. Decorator
functions are applied so that the call order is the same as the
order in which they appear in the iterable.
"""
if hasattr(decorator, '__iter__'):
for dec in decorator[::-1]:
function = dec(function)
return function
return decorator(function)
def _wrapper(self, *args, **kwargs):
@decorate
def bound_func(*args2, **kwargs2):
return func.__get__(self, type(self))(*args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorate
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
if is_class:
setattr(obj, name, _wrapper)
return obj
return _wrapper
# Don't worry about making _dec look similar to a list/tuple as it's rather
# meaningless.
if not hasattr(decorator, '__iter__'):
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
if hasattr(decorator, '__name__'):
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
else:
_dec.__name__ = 'method_decorator(%s)' % decorator.__class__.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
# Unused, for backwards compatibility in Django 2.0.
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This was required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
return WRAPPER_ASSIGNMENTS
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception as e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
def callback(response):
return middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return _wrapped_view
return _decorator
return _make_decorator
class classproperty:
def __init__(self, method=None):
self.fget = method
def __get__(self, instance, cls=None):
return self.fget(cls)
def getter(self, method):
self.fget = method
return self
|
{
"content_hash": "40e5dabf7ffa1362371b8e059a9202f3",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 97,
"avg_line_length": 38.63068181818182,
"alnum_prop": 0.5705250772172378,
"repo_name": "bak1an/django",
"id": "87a20cf23b7f07168c1948280e82d1a334eecbde",
"size": "6799",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/utils/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52098"
},
{
"name": "HTML",
"bytes": "174451"
},
{
"name": "JavaScript",
"bytes": "251434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11328645"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
"""Helpers for tests."""
import json
import pytest
from .common import MQTTMessage
from tests.async_mock import patch
from tests.common import load_fixture
@pytest.fixture(name="generic_data", scope="session")
def generic_data_fixture():
"""Load generic MQTT data and return it."""
return load_fixture("ozw/generic_network_dump.csv")
@pytest.fixture(name="fan_data", scope="session")
def fan_data_fixture():
"""Load fan MQTT data and return it."""
return load_fixture("ozw/fan_network_dump.csv")
@pytest.fixture(name="light_data", scope="session")
def light_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_network_dump.csv")
@pytest.fixture(name="light_new_ozw_data", scope="session")
def light_new_ozw_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_new_ozw_network_dump.csv")
@pytest.fixture(name="light_pure_rgb_dimmer_data", scope="session")
def light_pure_rgb_dimmer_data_fixture():
"""Load light rgb and dimmer MQTT data and return it."""
return load_fixture("ozw/light_pure_rgb_dimmer_dump.csv")
@pytest.fixture(name="light_no_rgb_data", scope="session")
def light_no_rgb_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_no_rgb_network_dump.csv")
@pytest.fixture(name="light_no_ww_data", scope="session")
def light_no_ww_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_no_ww_network_dump.csv")
@pytest.fixture(name="light_no_cw_data", scope="session")
def light_no_cw_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_no_cw_network_dump.csv")
@pytest.fixture(name="light_wc_data", scope="session")
def light_wc_only_data_fixture():
"""Load light dimmer MQTT data and return it."""
return load_fixture("ozw/light_wc_network_dump.csv")
@pytest.fixture(name="cover_data", scope="session")
def cover_data_fixture():
"""Load cover MQTT data and return it."""
return load_fixture("ozw/cover_network_dump.csv")
@pytest.fixture(name="cover_gdo_data", scope="session")
def cover_gdo_data_fixture():
"""Load cover_gdo MQTT data and return it."""
return load_fixture("ozw/cover_gdo_network_dump.csv")
@pytest.fixture(name="climate_data", scope="session")
def climate_data_fixture():
"""Load climate MQTT data and return it."""
return load_fixture("ozw/climate_network_dump.csv")
@pytest.fixture(name="lock_data", scope="session")
def lock_data_fixture():
"""Load lock MQTT data and return it."""
return load_fixture("ozw/lock_network_dump.csv")
@pytest.fixture(name="string_sensor_data", scope="session")
def string_sensor_fixture():
"""Load string sensor MQTT data and return it."""
return load_fixture("ozw/sensor_string_value_network_dump.csv")
@pytest.fixture(name="sent_messages")
def sent_messages_fixture():
"""Fixture to capture sent messages."""
sent_messages = []
with patch(
"homeassistant.components.mqtt.async_publish",
side_effect=lambda hass, topic, payload: sent_messages.append(
{"topic": topic, "payload": json.loads(payload)}
),
):
yield sent_messages
@pytest.fixture(name="fan_msg")
async def fan_msg_fixture(hass):
"""Return a mock MQTT msg with a fan actuator message."""
fan_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/fan.json")
)
message = MQTTMessage(topic=fan_json["topic"], payload=fan_json["payload"])
message.encode()
return message
@pytest.fixture(name="light_msg")
async def light_msg_fixture(hass):
"""Return a mock MQTT msg with a light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
@pytest.fixture(name="light_no_rgb_msg")
async def light_no_rgb_msg_fixture(hass):
"""Return a mock MQTT msg with a light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light_no_rgb.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
@pytest.fixture(name="light_rgb_msg")
async def light_rgb_msg_fixture(hass):
"""Return a mock MQTT msg with a light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light_rgb.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
@pytest.fixture(name="light_pure_rgb_msg")
async def light_pure_rgb_msg_fixture(hass):
"""Return a mock MQTT msg with a pure rgb light actuator message."""
light_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/light_pure_rgb.json")
)
message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"])
message.encode()
return message
@pytest.fixture(name="switch_msg")
async def switch_msg_fixture(hass):
"""Return a mock MQTT msg with a switch actuator message."""
switch_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/switch.json")
)
message = MQTTMessage(topic=switch_json["topic"], payload=switch_json["payload"])
message.encode()
return message
@pytest.fixture(name="sensor_msg")
async def sensor_msg_fixture(hass):
"""Return a mock MQTT msg with a sensor change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/sensor.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="binary_sensor_msg")
async def binary_sensor_msg_fixture(hass):
"""Return a mock MQTT msg with a binary_sensor change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/binary_sensor.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="binary_sensor_alt_msg")
async def binary_sensor_alt_msg_fixture(hass):
"""Return a mock MQTT msg with a binary_sensor change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/binary_sensor_alt.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="cover_msg")
async def cover_msg_fixture(hass):
"""Return a mock MQTT msg with a cover level change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/cover.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="cover_gdo_msg")
async def cover_gdo_msg_fixture(hass):
"""Return a mock MQTT msg with a cover barrier state change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/cover_gdo.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="climate_msg")
async def climate_msg_fixture(hass):
"""Return a mock MQTT msg with a climate mode change message."""
sensor_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/climate.json")
)
message = MQTTMessage(topic=sensor_json["topic"], payload=sensor_json["payload"])
message.encode()
return message
@pytest.fixture(name="lock_msg")
async def lock_msg_fixture(hass):
"""Return a mock MQTT msg with a lock actuator message."""
lock_json = json.loads(
await hass.async_add_executor_job(load_fixture, "ozw/lock.json")
)
message = MQTTMessage(topic=lock_json["topic"], payload=lock_json["payload"])
message.encode()
return message
|
{
"content_hash": "91c90e60d79cc929ddb97b23b0495bc2",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 85,
"avg_line_length": 33.12,
"alnum_prop": 0.68756038647343,
"repo_name": "titilambert/home-assistant",
"id": "3e30b60129be1cea8268a019feb9381e6daf8d32",
"size": "8280",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/ozw/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
}
|
import json
from flask import Response
"""
Import shared GCP helper modules
"""
# TODO: Add pubsub to import list
from quiz.gcp import datastore, pubsub
# END TODO
"""
Gets list of questions from datastore
- Create query
- Filter on quiz
- Call the datastore helper to get back JSON
- Pretty print JSON
- Set header and return the response
"""
def get_questions(quiz_name):
questions = datastore.list_entities(quiz_name)
payload = {'questions': list(questions)}
payload = json.dumps(payload, indent=2, sort_keys=True)
response = Response(payload)
response.headers['Content-Type'] = 'application/json'
return response
"""
Grades submitted answers
- Get list of questions with correct answers from datastore
- Iterate through questions, find any submitted answers that match
- Count total number of questions for which there is >0 correct answers
- Compose and pretty print payload
- Compose and return response
"""
def get_grade(quiz_name, answers):
questions = datastore.list_entities(quiz_name, False)
score = len(list(filter(lambda x: x > 0,
list(map(lambda q:
len(list(filter(lambda answer:
answer['id'] == q['id'] and
int(answer['answer']) == q['correctAnswer'],
answers)))
, questions))
)))
payload = {'correct': score, 'total': len(questions)}
payload = json.dumps(payload, indent=2, sort_keys=True)
for a in answers:
question = filter(lambda q, a = a:
q['id'] == a['id'], questions)[0]
print(question)
a['correct'] = question['correctAnswer']
a['quiz'] = question['quiz']
pubsub.publish_answer(a)
response = Response(payload)
response.headers['Content-Type'] = 'application/json'
return response
"""
Publish feedback
- Call pubsub helper
- Compose and return response
"""
def publish_feedback(feedback):
# TODO: Publish the feedback using your pubsub module, return the result
result = pubsub.publish_feedback(feedback)
response = Response(json.dumps(result, indent=2, sort_keys=True))
response.headers['Content-Type'] = 'application/json'
return response
# END TODO
|
{
"content_hash": "f4fa861e159a1433bbef4e5fbc791bf4",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 76,
"avg_line_length": 30.105263157894736,
"alnum_prop": 0.6429195804195804,
"repo_name": "GoogleCloudPlatform/training-data-analyst",
"id": "44a01944ffd3d8f64e2e6e1065bc5701b803e5dc",
"size": "2864",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "courses/developingapps/v1.3/python/pubsub-languageapi-spanner/bonus/quiz/api/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39536"
},
{
"name": "C#",
"bytes": "23445"
},
{
"name": "C++",
"bytes": "30926"
},
{
"name": "CSS",
"bytes": "53087"
},
{
"name": "Dockerfile",
"bytes": "90856"
},
{
"name": "Go",
"bytes": "93755"
},
{
"name": "HCL",
"bytes": "73891"
},
{
"name": "HTML",
"bytes": "2342167"
},
{
"name": "Java",
"bytes": "2441030"
},
{
"name": "JavaScript",
"bytes": "3957504"
},
{
"name": "Jinja",
"bytes": "257585"
},
{
"name": "Jsonnet",
"bytes": "5696"
},
{
"name": "Jupyter Notebook",
"bytes": "242016061"
},
{
"name": "Makefile",
"bytes": "12642"
},
{
"name": "PigLatin",
"bytes": "11558"
},
{
"name": "Pug",
"bytes": "457977"
},
{
"name": "Python",
"bytes": "18543833"
},
{
"name": "R",
"bytes": "68"
},
{
"name": "Scala",
"bytes": "27161"
},
{
"name": "Shell",
"bytes": "763259"
},
{
"name": "TypeScript",
"bytes": "66858"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class BoxpointsValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="boxpoints", parent_name="box", **kwargs):
super(BoxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values", ["all", "outliers", "suspectedoutliers", False]
),
**kwargs,
)
|
{
"content_hash": "6c9e340951a139d0be47e7696f81398c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 77,
"avg_line_length": 37.357142857142854,
"alnum_prop": 0.5869980879541109,
"repo_name": "plotly/plotly.py",
"id": "448f9d16c4de2b149f229ac2f1852e565d139d6d",
"size": "523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/box/_boxpoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import random
from twisted.trial import unittest
from .. import logic
from .. import mathematics
from ...workspace import Workspace, Block
from .... import data
class CompareBlockTestCase (unittest.TestCase):
def setUp(self):
self.workspace = Workspace()
self.block: Block = logic.logic_compare(self.workspace, 1)
self.inputA = mathematics.math_number(self.workspace, 2)
self.inputA.setFieldValue('NUM', 10)
self.inputB = mathematics.math_number(self.workspace, 2)
self.inputB.setFieldValue('NUM', 20)
self.block.connectInput('A', self.inputA, "value")
self.block.connectInput('B', self.inputB, "value")
def test_compare_gt(self):
self.block.setFieldValue('OP', 'GT')
result = self.block.eval()
self.assertEqual(self.successResultOf(result), False)
return result
def test_compare_lt(self):
self.block.setFieldValue('OP', 'LT')
result = self.block.eval()
self.assertEqual(self.successResultOf(result), True)
return result
def test_compare_eq(self):
self.block.setFieldValue('OP', 'EQ')
result = self.block.eval()
self.assertEqual(self.successResultOf(result), False)
return result
def test_compare_neq(self):
self.block.setFieldValue('OP', 'NEQ')
result = self.block.eval()
self.assertEqual(self.successResultOf(result), True)
return result
class LexicalVariableCompareBlockTestCase (unittest.TestCase):
def setUp(self):
self.variable = data.Variable(int)
self.workspace = Workspace()
self.workspace.variables.add('global.global::test_var', self.variable)
self.block: Block = logic.lexical_variable_compare(self.workspace, 1)
self.block.setFieldValue('VAR', 'global.global::test_var')
def test_variable(self):
self.assertIs(self.block._getVariable(), self.variable)
def test_compare_eq(self):
self.variable.set(11)
self.block.setFieldValue('VALUE', 10)
self.block.setFieldValue('OP', 'EQ')
result = self.block.eval()
self.assertEqual(self.successResultOf(result), False)
return result
def test_compare_unit(self):
self.variable.set(11)
self.block.setFieldValue('VALUE', 10)
self.block.setFieldValue('UNIT', 100)
self.block.setFieldValue('OP', 'GT')
result = self.block.eval()
self.assertEqual(self.successResultOf(result), False)
return result
|
{
"content_hash": "3bb572cbe24597d73479141e22072dbc",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 31.8375,
"alnum_prop": 0.6497840596780526,
"repo_name": "richardingham/octopus",
"id": "f0454632d28021d2be9ef3aa115f41ae03d40b30",
"size": "2548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octopus/blocktopus/blocks/test/test_logic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43592"
},
{
"name": "Dockerfile",
"bytes": "2450"
},
{
"name": "Java",
"bytes": "80744"
},
{
"name": "JavaScript",
"bytes": "865827"
},
{
"name": "Less",
"bytes": "14374"
},
{
"name": "Python",
"bytes": "417950"
},
{
"name": "Shell",
"bytes": "1130"
}
],
"symlink_target": ""
}
|
import ftplib
import os # noqa: F401
import shutil
import time
import unittest
from configparser import ConfigParser
from os import environ
import requests
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import ThreadedFTPServer
import threading
import socket
from biokbase.workspace.client import Workspace as workspaceService
from mock import patch
from installed_clients.DataFileUtilClient import DataFileUtil
from kb_uploadmethods.Utils.UnpackFileUtil import UnpackFileUtil
from kb_uploadmethods.authclient import KBaseAuth as _KBaseAuth
from kb_uploadmethods.kb_uploadmethodsImpl import kb_uploadmethods
from kb_uploadmethods.kb_uploadmethodsServer import MethodContext
from installed_clients.AbstractHandleClient import AbstractHandle as HandleService
class kb_uploadmethods_unpack_Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_uploadmethods'):
cls.cfg[nameval[0]] = nameval[1]
authServiceUrl = cls.cfg.get('auth-service-url',
"https://kbase.us/services/authorization/Sessions/Login")
auth_client = _KBaseAuth(authServiceUrl)
cls.user_id = auth_client.get_user(cls.token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': cls.token,
'user_id': cls.user_id,
'provenance': [
{'service': 'kb_uploadmethods',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=cls.token)
cls.serviceImpl = kb_uploadmethods(cls.cfg)
cls.dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'], token=cls.token)
cls.hs = HandleService(url=cls.cfg['handle-service-url'],
token=cls.token)
cls.scratch = cls.cfg['scratch']
cls.shockURL = cls.cfg['shock-url']
cls.ftp_domain = socket.gethostbyname(socket.gethostname())
cls.ftp_port = 21
thread = threading.Thread(target=cls.start_ftp_service,
args=(cls.ftp_domain, cls.ftp_port))
thread.daemon = True
thread.start()
time.sleep(5)
small_file = os.path.join(cls.scratch, 'test.txt')
with open(small_file, "w") as f:
f.write("empty content")
cls.test_shock = cls.dfu.file_to_shock({'file_path': small_file, 'make_handle': True})
cls.handles_to_delete = []
cls.nodes_to_delete = []
cls.handles_to_delete.append(cls.test_shock['handle']['hid'])
cls.nodes_to_delete.append(cls.test_shock['shock_id'])
@classmethod
def start_ftp_service(cls, domain, port):
print('starting ftp service')
authorizer = DummyAuthorizer()
authorizer.add_anonymous(os.getcwd(), perm='elradfmwMT')
handler = FTPHandler
handler.authorizer = authorizer
address = (domain, port)
with ThreadedFTPServer(address, handler) as server:
server.serve_forever()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
if hasattr(cls, 'nodes_to_delete'):
for node in cls.nodes_to_delete:
cls.delete_shock_node(node)
if hasattr(cls, 'handles_to_delete'):
cls.hs.delete_handles(cls.hs.hids_to_handles(cls.handles_to_delete))
print('Deleted handles ' + str(cls.handles_to_delete))
@classmethod
def make_ref(self, objinfo):
return str(objinfo[6]) + '/' + str(objinfo[0]) + '/' + str(objinfo[4])
@classmethod
def delete_shock_node(cls, node_id):
header = {'Authorization': 'Oauth {0}'.format(cls.token)}
requests.delete(cls.shockURL + '/node/' + node_id, headers=header,
allow_redirects=True)
print(('Deleted shock node ' + node_id))
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_kb_uploadmethods_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
def mock_file_to_staging(file_path_list, subdir_folder=None):
print('Mocking _file_to_staging')
print("Mocking uploaded files to staging area:\n{}".format('\n'.join(file_path_list)))
def mock_file_to_staging_direct(file_path_list, subdir_folder=''):
print('Mocking _file_to_staging_direct')
print("Mocking uploaded files to staging area:\n{}".format('\n'.join(file_path_list)))
def mock_download_staging_file(params):
print('Mocking DataFileUtilClient.download_staging_file')
print(params)
fq_filename = params.get('staging_file_subdir_path')
fq_path = os.path.join('/kb/module/work/tmp', fq_filename)
shutil.copy(os.path.join("data", fq_filename), fq_path)
return {'copy_file_path': fq_path}
def mock_file_to_shock(params):
print('Mocking DataFileUtilClient.file_to_shock')
print(params)
return kb_uploadmethods_unpack_Test().test_shock
@patch.object(UnpackFileUtil, "_file_to_staging_direct", side_effect=mock_file_to_staging_direct)
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_unpack_web_file_direct_download_trailing_space(self, _file_to_staging_direct,
file_to_shock):
file_url = 'https://anl.box.com/shared/static/'
file_url += 'g0064wasgaoi3sax4os06paoyxay4l3r.zip '
params = {
'download_type': 'Direct Download',
'file_url': file_url,
'workspace_name': self.getWsName()
}
ref = self.getImpl().unpack_web_file(self.getContext(), params)
self.assertTrue('unpacked_file_path' in ref[0])
self.assertTrue('report_ref' in ref[0])
self.assertTrue('report_name' in ref[0])
self.assertEqual(6, len(ref[0].get('unpacked_file_path').split(',')))
for file_path in ref[0].get('unpacked_file_path').split(','):
self.assertRegex(
os.path.basename(file_path),
'file[1-6]\.txt')
@patch.object(UnpackFileUtil, "_file_to_staging_direct", side_effect=mock_file_to_staging_direct)
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_unpack_web_file_direct_download_multiple_urls(self, _file_to_staging_direct,
file_to_shock):
file_url = ' https://anl.box.com/shared/static/'
file_url += 'g0064wasgaoi3sax4os06paoyxay4l3r.zip'
params = {
'download_type': 'Direct Download',
'workspace_name': self.getWsName(),
'urls_to_add_web_unpack': [
{
'file_url': file_url
},
{
'file_url': file_url
}
]
}
ref = self.getImpl().unpack_web_file(self.getContext(), params)
self.assertTrue('unpacked_file_path' in ref[0])
self.assertTrue('report_ref' in ref[0])
self.assertTrue('report_name' in ref[0])
self.assertEqual(12, len(ref[0].get('unpacked_file_path').split(',')))
for file_path in ref[0].get('unpacked_file_path').split(','):
self.assertRegex(
os.path.basename(file_path),
'file[1-6]\.txt')
@patch.object(UnpackFileUtil, "_file_to_staging_direct", side_effect=mock_file_to_staging_direct)
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_unpack_web_file_dropbox(self, _file_to_staging_direct, file_to_shock):
params = {
'download_type': 'DropBox',
'file_url': 'https://www.dropbox.com/s/cbiywh2aihjxdf5/Archive.zip?dl=0',
'workspace_name': self.getWsName()
}
ref = self.getImpl().unpack_web_file(self.getContext(), params)
self.assertTrue('unpacked_file_path' in ref[0])
self.assertTrue('report_ref' in ref[0])
self.assertTrue('report_name' in ref[0])
self.assertEqual(6, len(ref[0].get('unpacked_file_path').split(',')))
for file_path in ref[0].get('unpacked_file_path').split(','):
self.assertRegex(
os.path.basename(file_path),
'file[1-6]\.txt')
@patch.object(UnpackFileUtil, "_file_to_staging_direct", side_effect=mock_file_to_staging_direct)
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_unpack_web_file_ftp(self, _file_to_staging_direct, file_to_shock):
# copy test file to FTP
fq_filename = "Archive.zip"
with ftplib.FTP(self.ftp_domain) as ftp_connection:
ftp_connection.login('anonymous', 'anonymous@domain.com')
if fq_filename not in ftp_connection.nlst():
with open(os.path.join("data", fq_filename), 'rb') as fh:
ftp_connection.storbinary('STOR {}'.format(fq_filename), fh)
params = {
'download_type': 'FTP',
'file_url': 'ftp://{}/{} '.format(self.ftp_domain, fq_filename),
'workspace_name': self.getWsName()
}
ref = self.getImpl().unpack_web_file(self.getContext(), params)
self.assertTrue('unpacked_file_path' in ref[0])
self.assertTrue('report_ref' in ref[0])
self.assertTrue('report_name' in ref[0])
self.assertEqual(6, len(ref[0].get('unpacked_file_path').split(',')))
for file_path in ref[0].get('unpacked_file_path').split(','):
self.assertRegex(
os.path.basename(file_path),
'file[1-6]\.txt')
@unittest.skip("skip for now")
@patch.object(UnpackFileUtil, "_file_to_staging_direct", side_effect=mock_file_to_staging_direct)
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_unpack_web_file_google_drive(self, _file_to_staging_direct, file_to_shock):
file_url = 'https://drive.google.com/open?id=0B0exSa7ebQ0qSlJiWEVWYU5rYWM'
params = {
'download_type': 'Google Drive',
'file_url': file_url,
'workspace_name': self.getWsName()
}
ref = self.getImpl().unpack_web_file(self.getContext(), params)
self.assertTrue('unpacked_file_path' in ref[0])
self.assertTrue('report_ref' in ref[0])
self.assertTrue('report_name' in ref[0])
self.assertEqual(6, len(ref[0].get('unpacked_file_path').split(',')))
for file_path in ref[0].get('unpacked_file_path').split(','):
self.assertRegex(
os.path.basename(file_path),
'file[1-6]\.txt')
@patch.object(DataFileUtil, "download_staging_file", side_effect=mock_download_staging_file)
@patch.object(UnpackFileUtil, "_file_to_staging_direct", side_effect=mock_file_to_staging_direct)
@patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock)
def test_unpack_staging_file(self, _file_to_staging_direct, download_staging_file,
file_to_shock):
params = {
'staging_file_subdir_path': 'Archive.zip',
'workspace_name': self.getWsName()
}
ref = self.getImpl().unpack_staging_file(self.getContext(), params)
self.assertTrue('unpacked_file_path' in ref[0])
self.assertTrue('report_ref' in ref[0])
self.assertTrue('report_name' in ref[0])
self.assertEqual(6, len(ref[0].get('unpacked_file_path').split(',')))
for file_path in ref[0].get('unpacked_file_path').split(','):
self.assertRegex(
os.path.basename(file_path),
'file[1-6]\.txt')
|
{
"content_hash": "e6daa5a6432e4c360e7ce8c6650549a9",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 101,
"avg_line_length": 44.074829931972786,
"alnum_prop": 0.5980089519987652,
"repo_name": "kbaseapps/kb_uploadmethods",
"id": "cdb09bf6ec9cf2ee3d3eafe32099362841bb0d1c",
"size": "12982",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/unpack_staging_and_web_file_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1293"
},
{
"name": "HTML",
"bytes": "23646"
},
{
"name": "Makefile",
"bytes": "2944"
},
{
"name": "Python",
"bytes": "1509565"
},
{
"name": "Ruby",
"bytes": "21352"
},
{
"name": "Shell",
"bytes": "932"
}
],
"symlink_target": ""
}
|
"""Test the Pentair ScreenLogic config flow."""
from unittest.mock import patch
from screenlogicpy import ScreenLogicError
from screenlogicpy.const import (
SL_GATEWAY_IP,
SL_GATEWAY_NAME,
SL_GATEWAY_PORT,
SL_GATEWAY_SUBTYPE,
SL_GATEWAY_TYPE,
)
from homeassistant import config_entries, setup
from homeassistant.components.dhcp import HOSTNAME, IP_ADDRESS
from homeassistant.components.screenlogic.config_flow import (
GATEWAY_MANUAL_ENTRY,
GATEWAY_SELECT_KEY,
)
from homeassistant.components.screenlogic.const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
MIN_SCAN_INTERVAL,
)
from homeassistant.const import CONF_IP_ADDRESS, CONF_PORT, CONF_SCAN_INTERVAL
from tests.common import MockConfigEntry
async def test_flow_discovery(hass):
"""Test the flow works with basic discovery."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
return_value=[
{
SL_GATEWAY_IP: "1.1.1.1",
SL_GATEWAY_PORT: 80,
SL_GATEWAY_TYPE: 12,
SL_GATEWAY_SUBTYPE: 2,
SL_GATEWAY_NAME: "Pentair: 01-01-01",
},
],
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "gateway_select"
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={GATEWAY_SELECT_KEY: "00:c0:33:01:01:01"}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Pentair: 01-01-01"
assert result2["data"] == {
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_flow_discover_none(hass):
"""Test when nothing is discovered."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
return_value=[],
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "gateway_entry"
async def test_flow_discover_error(hass):
"""Test when discovery errors."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
side_effect=ScreenLogicError("Fake error"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "gateway_entry"
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"homeassistant.components.screenlogic.config_flow.login.create_socket",
return_value=True,
), patch(
"homeassistant.components.screenlogic.config_flow.login.gateway_connect",
return_value="00-C0-33-01-01-01",
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "Pentair: 01-01-01"
assert result3["data"] == {
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_dhcp(hass):
"""Test DHCP discovery flow."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
HOSTNAME: "Pentair: 01-01-01",
IP_ADDRESS: "1.1.1.1",
},
)
assert result["type"] == "form"
assert result["step_id"] == "gateway_entry"
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"homeassistant.components.screenlogic.config_flow.login.create_socket",
return_value=True,
), patch(
"homeassistant.components.screenlogic.config_flow.login.gateway_connect",
return_value="00-C0-33-01-01-01",
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "Pentair: 01-01-01"
assert result3["data"] == {
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_manual_entry(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
return_value=[
{
SL_GATEWAY_IP: "1.1.1.1",
SL_GATEWAY_PORT: 80,
SL_GATEWAY_TYPE: 12,
SL_GATEWAY_SUBTYPE: 2,
SL_GATEWAY_NAME: "Pentair: 01-01-01",
},
],
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "gateway_select"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={GATEWAY_SELECT_KEY: GATEWAY_MANUAL_ENTRY}
)
assert result2["type"] == "form"
assert result2["errors"] == {}
assert result2["step_id"] == "gateway_entry"
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"homeassistant.components.screenlogic.config_flow.login.create_socket",
return_value=True,
), patch(
"homeassistant.components.screenlogic.config_flow.login.gateway_connect",
return_value="00-C0-33-01-01-01",
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "Pentair: 01-01-01"
assert result3["data"] == {
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
with patch(
"homeassistant.components.screenlogic.config_flow.discovery.async_discover",
return_value=[],
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.screenlogic.config_flow.login.create_socket",
return_value=None,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_IP_ADDRESS: "1.1.1.1",
CONF_PORT: 80,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {CONF_IP_ADDRESS: "cannot_connect"}
async def test_option_flow(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
), patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_SCAN_INTERVAL: 15},
)
assert result["type"] == "create_entry"
assert result["data"] == {CONF_SCAN_INTERVAL: 15}
async def test_option_flow_defaults(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
), patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
}
async def test_option_flow_input_floor(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.screenlogic.async_setup", return_value=True
), patch(
"homeassistant.components.screenlogic.async_setup_entry",
return_value=True,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 1}
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_SCAN_INTERVAL: MIN_SCAN_INTERVAL,
}
|
{
"content_hash": "d381d8eb569c8bbea6d445abfb0a3cfd",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 84,
"avg_line_length": 32.87068965517241,
"alnum_prop": 0.6160503540519277,
"repo_name": "Danielhiversen/home-assistant",
"id": "a24ce36e7a1abc6327e95c3f437e1dfcae03dcd8",
"size": "11439",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/screenlogic/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
BOT_NAME = 'spojCrawler'
SPIDER_MODULES = ['crawler.spiders']
NEWSPIDER_MODULE = 'crawler.spiders'
ITEM_PIPELINES = {
'crawler.pipelines.UserscrawlerPipeline': 300,
}
LOG_LEVEL = 'INFO'
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'pt',
}
DEPTH_PRIORITY = 1
SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'recBot (+http://index-spojrec.rhcloud.com/)'
|
{
"content_hash": "23ee7f2178ee3a42db4916ceda3fc5d7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 80,
"avg_line_length": 27.772727272727273,
"alnum_prop": 0.7266775777414075,
"repo_name": "ederfmartins/spojrec",
"id": "3a4002080e456639e3169bdd549f747b6b8a7703",
"size": "873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawler/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4330"
},
{
"name": "Python",
"bytes": "76966"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
}
|
"""plist module tests."""
import base64
import datetime
import pprint
import mox
import stubout
from google.apputils import app
from google.apputils import basetest
from simian.mac.munki import plist
class PlistModuleTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testUpdateIterable(self):
"""Test UpdateIterable()."""
d = {
'foo': [0, 1],
}
array_add = lambda a, v: a.append(v)
dict_add = lambda d, v: d.update(v)
plist.UpdateIterable(d, 'foo', 2, default=[], op=array_add)
self.assertEqual(d['foo'], [0, 1, 2])
plist.UpdateIterable(d, 'simple', 'hello')
self.assertEqual(d['simple'], 'hello')
plist.UpdateIterable(d, 'simple', 'hello', op=lambda d, v: v.upper())
self.assertEqual(d['simple'], 'HELLO')
plist.UpdateIterable(d, 'newd', default={})
self.assertEqual(d['newd'], {})
plist.UpdateIterable(d, 'newd', {'newv':1}, op=dict_add)
self.assertEqual(
d,
{
'foo': [0, 1, 2],
'simple': 'HELLO',
'newd': {'newv': 1},
},
)
def testEscapeString(self):
"""Test EscapeString()."""
self.mox.StubOutWithMock(plist.xml.sax.saxutils, 'escape')
plist.xml.sax.saxutils.escape('notescaped').AndReturn('escaped')
self.mox.ReplayAll()
self.assertEqual('escaped', plist.EscapeString('notescaped'))
self.mox.VerifyAll()
class ApplePlistTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.apl = plist.ApplePlist()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def assertPlistEquals(self, plist_dict):
"""Higher level assert for Plist equality testing.
Args:
plist_dict: dict, expected dictionary output from Plist()
"""
failure_str = '\n\nPlist output = \n%s\n\n!= Expected dictionary\n%s' % (
pprint.pformat(self.apl.GetContents()),
pprint.pformat(plist_dict),
)
self.assertEquals(self.apl.GetContents(), plist_dict, failure_str)
def testParseInvalidPlist(self):
"""Test Parse() where the given plist is not even XML."""
self.apl._plist_xml = 'asdf'
self.mox.ReplayAll()
self.assertRaises(plist.MalformedPlistError, self.apl.Parse)
self.mox.VerifyAll()
def testParseDate(self):
"""Test _ParseDate()."""
mock_dt = 'dt'
date_str = '12345'
self.stubs.Set(
plist.datetime, 'datetime',
self.mox.CreateMock(plist.datetime.datetime))
plist.datetime.datetime.strptime(
date_str, plist.PLIST_DATE_FORMAT).AndReturn(mock_dt)
self.mox.ReplayAll()
self.assertEqual(self.apl._ParseDate(date_str), mock_dt)
self.mox.VerifyAll()
def testParseData(self):
"""Test _ParseData()."""
mock_data = 'decoded data'
data_str = 'b64 encoded data'
self.mox.StubOutWithMock(plist.base64, 'b64decode')
plist.base64.b64decode(data_str).AndReturn(mock_data)
self.mox.ReplayAll()
self.assertEqual(self.apl._ParseData(data_str), mock_data)
self.mox.VerifyAll()
def testValidateBasic(self):
"""Test _ValidateBasic."""
self.apl._plist = {'findthis': 12345}
config = {'findthis': int}
self.apl._ValidateBasic(config)
self.apl._plist = {'findthis': 'evil string'}
self.assertRaises(
plist.InvalidPlistError, self.apl._ValidateBasic, config)
self.apl._plist = {'unknown': 1}
self.assertRaises(
plist.InvalidPlistError, self.apl._ValidateBasic, config)
def testValidateInvalidPlists(self):
"""Test Validate() with None and empty plists."""
self.assertRaises(plist.PlistNotParsedError, self.apl.Validate)
def testValidateSuccessWithAddedHook(self):
"""Test Validate() with a success."""
self.apl._VALIDATE_BASIC_CONFIG = {'not empty': True}
self.mox.StubOutWithMock(
self.apl, '_ValidateBasic', self.mox.CreateMockAnything())
self.apl._ValidateBasic(self.apl._VALIDATE_BASIC_CONFIG).AndReturn(None)
self.apl._plist = {'something': 1}
self.apl.ValidateFoo = self.mox.CreateMockAnything()
self.apl.ValidateFoo().AndReturn(None)
self.apl.AddValidationHook(self.apl.ValidateFoo)
self.mox.ReplayAll()
self.apl.Validate()
self.mox.VerifyAll()
def testEncodeXmlError(self):
"""Test Validate() with invalid/unknown encoding."""
self.apl._plist_xml = 'crazy encoded string'
self.mox.StubOutWithMock(
self.apl, 'GetEncoding', self.mox.CreateMockAnything())
self.apl.GetEncoding().AndReturn('INVALID ENCODING!!!')
self.mox.ReplayAll()
self.assertRaises(plist.InvalidPlistError, self.apl.EncodeXml)
self.mox.VerifyAll()
def testGetContents(self):
"""Test GetContents()."""
self.assertRaises(plist.PlistNotParsedError, self.apl.GetContents)
self.apl._plist = None
self.assertEqual(None, self.apl.GetContents())
self.apl._plist = {}
self.assertEqual({}, self.apl.GetContents())
def PlistTest(self, plist_xml, plist_dict=None, exc=None):
"""Test invoking Parse().
Args:
plist_xml: str, XML document
plist_dict: dict, optional, expected dictionary output from Plist
exc: Exception, optional, expected exception when calling Parse()
"""
self.apl.LoadPlist(plist_xml)
if exc is not None:
self.assertRaises(exc, self.apl.Parse)
self.assertFalse(hasattr(self.apl, '_plist'))
else:
self.apl.Parse()
self.assertPlistEquals(plist_dict)
def testBasicParseChangesLost(self):
"""Test that Parse() will not wipe direct set values."""
self.apl.LoadPlist('<plist version="1.0"><dict></dict></plist>')
self.apl.Parse()
self.assertFalse('foo' in self.apl)
self.apl['foo'] = 'bar'
self.assertRaises(plist.PlistAlreadyParsedError, self.apl.Parse)
self.assertEqual(self.apl['foo'], 'bar')
def _testBasicEmptyDict(self):
"""Test a basic plist doc."""
self.apl.LoadPlist('<plist version="1.0">\n\n </plist>')
self.assertRaises(plist.InvalidPlistError, self.apl.Parse)
self.apl._plist = {}
self.assertEqual(None, self.apl.GetXml())
self.assertEqual(None, self.apl.GetXmlContent())
self.assertEqual(None, self.apl.GetEncoding())
def testBasicBroken(self):
"""Test a basic broken plist doc."""
self.PlistTest(
'<key>omg</key>',
exc=plist.MalformedPlistError)
def testBasicBrokenKey(self):
"""Test another type of broken plist, dict with value but no key."""
self.PlistTest(
'<string>omg no key</string>',
exc=plist.MalformedPlistError)
def testTypicalEmptyPlist(self):
"""Test an empty plist as it usually appears -- a dict item exists."""
xml = '%s<dict/>%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT)
self.PlistTest(xml, {})
def testEmptyPlist(self):
"""Test with a truly empty plist, a plist node with no contents."""
xml = '%s%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT)
self.PlistTest(xml, None)
def testBasicCdata(self):
"""Test with some simple CDATA."""
xml = ('%s<dict><key>cdata</key>'
'<string>line1\nline2\n</string></dict>%s') % (
plist.PLIST_HEAD, plist.PLIST_FOOT)
self.PlistTest(xml, {'cdata': 'line1\nline2\n'})
def testBasic(self):
"""Test with a plist that should parse OK."""
xml = ('%s <dict>\n <key>foo</key>\n <string>bar</string>\n '
'</dict>%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT))
self.PlistTest(xml, {'foo': 'bar'})
self.assertEqual(xml, self.apl.GetXml())
self.assertEqual(self.apl.GetEncoding(), 'utf-8')
self.assertEqual(
'<dict>\n <key>foo</key>\n <string>bar</string>\n</dict>',
self.apl.GetXmlContent())
def testBasicData(self):
xml = ('%s <dict>\n <key>foo</key>\n '
'<data>aGVsbG8gdGhlcmU=</data>\n '
'</dict>%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT))
self.PlistTest(xml, {'foo': 'hello there'})
def testBasicDataTwoLines(self):
xml = ('%s <dict>\n <key>foo</key>\n '
'<data>aGVs\nbG8gdGhlcmU=</data>\n '
'</dict>%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT))
self.PlistTest(xml, {'foo': 'hello there'})
def testBasicDataEmptyKey(self):
xml = ('%s <dict>\n <key></key>\n '
'<data>d2hhdCBhIGJ1ZyB0aGlzIHdhcw==</data>\n '
'</dict>%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT))
self.PlistTest(xml, {'': 'what a bug this was'})
def testBasicEmptyString(self):
xml = '%s<dict><key>foo</key><string></string><key>bar</key><string/></dict>%s' % (
plist.PLIST_HEAD, plist.PLIST_FOOT)
plist2 = plist.ApplePlist(xml)
plist2.Parse()
self.PlistTest(xml, {'foo': '', 'bar': ''})
def testBasicNested(self):
xml = ('%s <dict>\n <key>foo</key>\n <string>bar</string>\n '
'</dict>%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT))
xml2 = ('%s <dict>\n <key>subway</key>\n <string>BDFM</string>\n '
'</dict>%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT))
nested_xml = ('%s <dict>\n <key>foo</key>\n <dict>\n'
' <key>subway</key>\n'
' <string>BDFM</string>\n'
' </dict>\n'
' </dict>%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT))
plist2 = plist.ApplePlist(xml2)
plist2.Parse()
self.PlistTest(xml, {'foo': 'bar'})
self.apl.GetContents()['foo'] = plist2
self.assertEqual(nested_xml, self.apl.GetXml())
def testDictToXml(self):
"""Test DictToXml().
Tests integers, strings, booleans, None, as well as nested dicts/arrays.
"""
d = {'foo': [1, 'two', [False]],
'bar': {'foobar': None},
'outside': True,
'floattest': 12.3456,
}
out = ('<dict>\n '
'<key>bar</key>\n <dict>\n '
'<key>foobar</key>\n <string></string>\n </dict>\n '
'<key>floattest</key>\n <real>12.345600</real>\n '
'<key>foo</key>\n <array>\n <integer>1</integer>\n '
'<string>two</string>\n <array>\n <false/>\n </array>\n '
'</array>\n <key>outside</key>\n <true/>\n'
'</dict>')
self.assertEquals(out, plist.DictToXml(d, indent_num=0))
def testSequenceToXml(self):
"""Test SequenceToXml()."""
seq = [[1, 2, 3], 4, 5, 6]
out = ('<array>\n <array>\n <integer>1</integer>\n '
'<integer>2</integer>\n <integer>3</integer>\n </array>\n '
'<integer>4</integer>\n <integer>5</integer>\n '
'<integer>6</integer>\n</array>')
self.assertEquals(out, plist.SequenceToXml(seq, indent_num=0))
def testSequenceToXmlWhenAppleUid(self):
"""Test SequenceToXml()."""
seq = [plist.AppleUid(999)]
out = ('<array>\n <dict>'
'<key>CF$UID</key><integer>999</integer></dict>\n</array>')
self.assertEquals(out, plist.SequenceToXml(seq, indent_num=0))
def testSequenceToXmlWhenAppleData(self):
seq = [plist.AppleData('hello')]
out = '<array>\n <data>aGVsbG8=</data>\n</array>'
self.assertEquals(out, plist.SequenceToXml(seq, indent_num=0))
def testBinaryInvalid(self):
"""Test with a broken binary plist."""
plist_bin = "bplist00\x00\x00\x00otherstuff"
self.PlistTest(plist_bin, exc=plist.MalformedPlistError)
def testBinaryInvalidVersion(self):
"""Test with a broken binary plist that has a weird version."""
plist_bin = "bplist01\x00\x00hello"
self.PlistTest(plist_bin, exc=plist.BinaryPlistVersionError)
def testBinaryInvalidSlightly(self):
"""Test with a broken binary plist."""
plist_bin = base64.b64decode("""
YnBsaXN0MDDYAQIDBAUGBwgJCgsMDQ4PEFNYgjWK+lgulSlZJWlzUGlXaXNGYWxzZVZpc0RhdGFT
Zm9vV2lzVG9kYXlXaXNBcnJheRAJCSJASPXDCEQBAgP/U2JhcjNBtGtLgAAAAKMREhNRMVEyUTMI
GR0kKTE4PERMTk9UVVpeZ2ttbwAAAAAAAAEBAAAAAAAAABQAAAAAAAAAAAAAAAAAAABx""")
self.PlistTest(plist_bin, exc=plist.MalformedPlistError)
def testBinary(self):
"""Test with a binary plist."""
plist_bin = base64.b64decode("""
YnBsaXN0MDDYAQIDBAUGBwgJCgsMDQ4PEFNpczlWaXNUcnVlVGlzUGlXaXNGYWxzZVZpc0RhdGFT
Zm9vV2lzVG9kYXlXaXNBcnJheRAJCSJASPXDCEQBAgP/U2JhcjNBtGtLgAAAAKMREhNRMVEyUTMI
GR0kKTE4PERMTk9UVVpeZ2ttbwAAAAAAAAEBAAAAAAAAABQAAAAAAAAAAAAAAAAAAABx""")
plist_dict = {
'foo': 'bar',
'is9': 9,
'isArray': ['1', '2', '3'],
'isData': plist.AppleData('\x01\x02\x03\xff'),
'isFalse': False,
'isTrue': True,
'isPi': 3.1400001049041748,
'isToday': datetime.datetime(2011, 11, 10, 0, 0, tzinfo=plist.UTC()),
}
self.PlistTest(plist_bin, plist_dict)
def testBinaryNoneAndUid(self):
"""Test with a binary plist.
Note this test data includes a "null" value, which we translate
into "None" in Python.
Note also that expected isTrue is None in plist_dict. This is
intentional. In the process of creating test data for this unit test
with OSX defaults(1), I used a boolean data container for "isNone", set
it to True, and then hexedited it to becoming a null value. (See the
structure relationship in _BinLoadSimple to see why). Unfortunately
defaults had tried to save space and assigned isTrue and isNone to
the same value container, so isTrue also became None when I did this.
For the purposes of the test I left everything that way instead of
fighting defaults.
"""
plist_bin = base64.b64decode("""
YnBsaXN0MDDaAQIDBAUGBwgJCgsMDQ4PEAwSExdTaXM5VmlzVHJ1ZVVpc1VpZFRpc1BpVmlzRGF0
YVdpc0ZhbHNlVmlzTm9uZVNmb29XaXNBcnJheVdpc1RvZGF5EAkAgTA5I0AJHrhgAAAARAECA/8I
CVNiYXKjFBUWUTFRMlEzM0G0a0uAAAAACB0hKC4zOkJJTVVdX2BjbHFyc3d7fX+BAAAAAAAAAQEA
AAAAAAAAGAAAAAAAAAAAAAAAAAAAAIo=""")
plist_dict = {
'foo': 'bar',
'is9': 9,
'isUid': plist.AppleUid(12345),
'isArray': ['1', '2', '3'],
'isData': plist.AppleData('\x01\x02\x03\xff'),
'isFalse': False,
'isTrue': None, # see docstring
'isPi': 3.1400001049041748,
'isToday': datetime.datetime(2011, 11, 10, 0, 0, tzinfo=plist.UTC()),
'isNone': None, # see docstring
}
self.PlistTest(plist_bin, plist_dict)
def testIntegrationTestBinaryToXML(self):
"""Test binary load and XML output.
Integration test between binary load and XML output.
"""
plist_bin = base64.b64decode("""
YnBsaXN0MDDZAQIDBAUGBwgJCgsMDQ4PEBESU2lzOVZpc1RydWVVaXNVaWRUaXNQaVdpc0ZhbHNl
VmlzRGF0YVNmb29XaXNUb2RheVdpc0FycmF5EAkJgTA5IkBI9cMIRAECA/9TYmFyM0G0a0uAAAAA
oxMUFVExUTJRMwgbHyYsMTlARExUVldaX2BlaXJ2eHoAAAAAAAABAQAAAAAAAAAWAAAAAAAAAAAA
AAAAAAAAfA==""")
plist_xml = (
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" '
'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n'
'<plist version="1.0">\n'
' <dict>\n'
' <key>foo</key>\n'
' <string>bar</string>\n'
' <key>is9</key>\n'
' <integer>9</integer>\n'
' <key>isArray</key>\n'
' <array>\n'
' <string>1</string>\n'
' <string>2</string>\n'
' <string>3</string>\n'
' </array>\n'
' <key>isData</key>\n'
' <data>AQID/w==</data>\n'
' <key>isFalse</key>\n'
' <false/>\n'
' <key>isPi</key>\n'
' <real>3.140000</real>\n'
' <key>isToday</key>\n'
' <date>2011-11-10T00:00:00Z</date>\n'
' <key>isTrue</key>\n'
' <true/>\n'
' <key>isUid</key>\n'
' <dict><key>CF$UID</key><integer>12345</integer></dict>\n'
' </dict>\n'
'</plist>\n'
)
self.apl.LoadPlist(plist_bin)
self.apl.Parse()
self.assertEqual(plist_xml, self.apl.GetXml())
def testGetXmlWithTypicalEmptyPlist(self):
"""Test GetXml() with a typical empty plist."""
plist_xml = '%s <dict>\n </dict>%s' % (
plist.PLIST_HEAD, plist.PLIST_FOOT)
self.apl._plist = {}
self.assertEqual(plist_xml, self.apl.GetXml())
def testGetXmlWithEmptyPlist(self):
"""Test GetXml() with empty plist."""
self.apl._plist = None
empty_plist_xml = '%s%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT)
self.assertEqual(empty_plist_xml, self.apl.GetXml())
def testGetXmlWithNoPlistProperty(self):
"""Test GetXml() in the case that a _plist property does not exist."""
if hasattr(self.apl, '_plist'):
del(self.apl._plist)
empty_plist_xml = '%s%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT)
self.assertEqual(empty_plist_xml, self.apl.GetXml())
def testGetXmlWithUnparsedPlistXml(self):
"""Test GetXml() with an unparsed plist_xml."""
if hasattr(self.apl, '_plist'):
del(self.apl._plist)
plist_xml = '%s<dict><key>foo</key><string>bar</string>%s' % (
plist.PLIST_HEAD, plist.PLIST_FOOT)
self.apl._plist_xml = plist_xml
self.assertEqual(plist_xml, self.apl.GetXml())
def testGetXmlWithUnparsedButEmptyPlistXml(self):
"""Test GetXml() with an unparsed plist_xml."""
if hasattr(self.apl, '_plist'):
del(self.apl._plist)
self.apl._plist_xml = None
empty_plist_xml = '%s%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT)
self.assertEqual(empty_plist_xml, self.apl.GetXml())
def testGetXmlXmlDocFalseWithUnparsedPlistXml(self):
"""Test GetXml() with an unparsed plist_xml."""
if hasattr(self.apl, '_plist'):
del(self.apl._plist)
content_xml = '<dict>\n <key>foo</key>\n <string>bar</string>\n</dict>'
plist_xml = ''.join([plist.PLIST_HEAD, content_xml, plist.PLIST_FOOT])
self.apl._plist_xml = plist_xml
self.assertEqual(content_xml, self.apl.GetXml(xml_doc=False))
def testLessBasic(self):
"""Test with a more complex plist that should parse OK."""
plist_xml = """
<plist>
<dict>
<key>receipts</key>
<array>
<dict>
<key>foo</key>
<string>bar</string>
</dict>
<dict>
<key>zoo</key>
<string>omg</string>
<key>floattest</key>
<real>123.456</real>
</dict>
<dict>
<key>hoo</key>
<date>2010-10-21T16:30:32Z</date>
</dict>
</array>
</dict>
</plist>
"""
plist_dict = {
'receipts': [
{'foo': 'bar'},
{'zoo': 'omg', 'floattest': 123.456},
{'hoo': plist.datetime.datetime(2010, 10, 21, 16, 30, 32)},
]
}
self.PlistTest(plist_xml, plist_dict)
def testReal(self):
"""Test with a real plist from Munki."""
plist_xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//XXXXX Computer//DTD PLIST 1.0//EN" "http://www.aaaaa.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>RestartAction</key>
<string>RequireRestart</string>
<key>catalogs</key>
<array>
<string>production</string>
</array>
<key>description</key>
<string>Includes PhotoI 08.</string>
<key>display_name</key>
<string>PhotoI 08</string>
<key>installed_size</key>
<integer>534412</integer>
<key>installer_item_location</key>
<string>apps/LifeI08/LifeI08.dmg</string>
<key>installer_item_size</key>
<integer>3708030</integer>
<key>minimum_os_version</key>
<string>10.4.0</string>
<key>name</key>
<string>LifeI08_PhotoI</string>
<key>installer_choices_xml</key>
<array>
<dict>
<key>attributeSetting</key>
<false/>
<key>choiceAttribute</key>
<string>selected</string>
<key>choiceIdentifier</key>
<string>BandRecorder</string>
</dict>
<dict>
<key>attributeSetting</key>
<false/>
<key>choiceAttribute</key>
<string>selected</string>
<key>choiceIdentifier</key>
<string>TunesI</string>
</dict>
<dict>
<key>attributeSetting</key>
<true/>
<key>choiceAttribute</key>
<string>selected</string>
<key>choiceIdentifier</key>
<string>PhotoI</string>
</dict>
<dict>
<key>attributeSetting</key>
<false/>
<key>choiceAttribute</key>
<string>selected</string>
<key>choiceIdentifier</key>
<string>MovieI</string>
</dict>
<dict>
<key>attributeSetting</key>
<false/>
<key>choiceAttribute</key>
<string>selected</string>
<key>choiceIdentifier</key>
<string>DVDI</string>
</dict>
<dict>
<key>attributeSetting</key>
<false/>
<key>choiceAttribute</key>
<string>selected</string>
<key>choiceIdentifier</key>
<string>DVDIExtraContent</string>
</dict>
<dict>
<key>attributeSetting</key>
<false/>
<key>choiceAttribute</key>
<string>selected</string>
<key>choiceIdentifier</key>
<string>WebI</string>
</dict>
<dict>
<key>attributeSetting</key>
<false/>
<key>choiceAttribute</key>
<string>selected</string>
<key>choiceIdentifier</key>
<string>BandRecorderExtraContent</string>
</dict>
<dict>
<key>attributeSetting</key>
<false/>
<key>choiceAttribute</key>
<string>selected</string>
<key>choiceIdentifier</key>
<string>LifeISoundEffects</string>
</dict>
</array>
<key>receipts</key>
<array>
<dict>
<key>installed_size</key>
<integer>139130</integer>
<key>packageid</key>
<string>com.aaaaa.pkg.PhotoI</string>
<key>version</key>
<string>8.0.0.0.0</string>
</dict>
<dict>
<key>installed_size</key>
<integer>392900</integer>
<key>packageid</key>
<string>com.aaaaa.pkg.PhotoIContent</string>
<key>version</key>
<string>8.0.0.0.0</string>
</dict>
<dict>
<key>installed_size</key>
<integer>10</integer>
<key>packageid</key>
<string>com.aaaaa.pkg.LifeI08</string>
<key>version</key>
<string>8.0.0.0.0</string>
</dict>
<dict>
<key>installed_size</key>
<integer>10</integer>
<key>packageid</key>
<string>com.aaaaa.pkg.LifeICookie</string>
<key>version</key>
<string>8.0.0.0.0</string>
</dict>
<dict>
<key>installed_size</key>
<integer>321</integer>
<key>packageid</key>
<string>com.aaaaa.pkg.XXXXXIntermediateCodec</string>
<key>version</key>
<string>1.2.0.0.0</string>
</dict>
<dict>
<key>installed_size</key>
<integer>2041</integer>
<key>packageid</key>
<string>com.aaaaa.pkg.LifeIMediaBrowser</string>
<key>version</key>
<string>2.0.0.0.0</string>
</dict>
</array>
<key>uninstall_method</key>
<string>removepackages</string>
<key>uninstallable</key>
<true/>
<key>version</key>
<string>7.0.0.0.0</string>
</dict>
</plist>
"""
plist_dict = {
'RestartAction':
'RequireRestart',
'catalogs':
[
'production',
],
'description':
'Includes PhotoI 08.',
'display_name':
'PhotoI 08',
'installed_size':
534412,
'installer_item_location':
'apps/LifeI08/LifeI08.dmg',
'installer_item_size':
3708030,
'minimum_os_version':
'10.4.0',
'name':
'LifeI08_PhotoI',
'installer_choices_xml':
[
{
'attributeSetting':
False,
'choiceAttribute':
'selected',
'choiceIdentifier':
'BandRecorder',
},
{
'attributeSetting':
False,
'choiceAttribute':
'selected',
'choiceIdentifier':
'TunesI',
},
{
'attributeSetting':
True,
'choiceAttribute':
'selected',
'choiceIdentifier':
'PhotoI',
},
{
'attributeSetting':
False,
'choiceAttribute':
'selected',
'choiceIdentifier':
'MovieI',
},
{
'attributeSetting':
False,
'choiceAttribute':
'selected',
'choiceIdentifier':
'DVDI',
},
{
'attributeSetting':
False,
'choiceAttribute':
'selected',
'choiceIdentifier':
'DVDIExtraContent',
},
{
'attributeSetting':
False,
'choiceAttribute':
'selected',
'choiceIdentifier':
'WebI',
},
{
'attributeSetting':
False,
'choiceAttribute':
'selected',
'choiceIdentifier':
'BandRecorderExtraContent',
},
{
'attributeSetting':
False,
'choiceAttribute':
'selected',
'choiceIdentifier':
'LifeISoundEffects',
},
],
'receipts':
[
{
'installed_size':
139130,
'packageid':
'com.aaaaa.pkg.PhotoI',
'version':
'8.0.0.0.0',
},
{
'installed_size':
392900,
'packageid':
'com.aaaaa.pkg.PhotoIContent',
'version':
'8.0.0.0.0',
},
{
'installed_size':
10,
'packageid':
'com.aaaaa.pkg.LifeI08',
'version':
'8.0.0.0.0',
},
{
'installed_size':
10,
'packageid':
'com.aaaaa.pkg.LifeICookie',
'version':
'8.0.0.0.0',
},
{
'installed_size':
321,
'packageid':
'com.aaaaa.pkg.XXXXXIntermediateCodec',
'version':
'1.2.0.0.0',
},
{
'installed_size':
2041,
'packageid':
'com.aaaaa.pkg.LifeIMediaBrowser',
'version':
'2.0.0.0.0',
},
],
'uninstall_method':
'removepackages',
'uninstallable':
True,
'version':
'7.0.0.0.0',
}
self.PlistTest(plist_xml, plist_dict)
def testSetContentsBasic(self):
"""Test SetContents() with basic input."""
self.mox.StubOutWithMock(self.apl, 'Validate')
d = {'foo': 'bar'}
self.apl.Validate().AndReturn(None)
self.mox.ReplayAll()
self.apl.SetContents(d)
self.assertEqual(self.apl._plist, d)
self.mox.VerifyAll()
def testSetContentsNestedXml(self):
"""Test SetContents() with evil nested XML."""
d = {'foo': '<xml>up up up and away</xml>'}
self.apl.SetContents(d)
self.assertEqual(self.apl._plist, d, str(self.apl._plist))
def testEqual(self):
"""Tests Equal()."""
pl = plist.ApplePlist()
pl._plist = {'foo': 1, 'bar': True}
other = plist.ApplePlist()
other._plist = {'foo': 1, 'bar': True}
self.assertTrue(pl.Equal(other))
self.assertEquals(pl, other)
self.assertFalse(pl != other)
def testEqualWithIgnoreKeysReturningTrue(self):
"""Tests Equal() with ignore_keys, returning True."""
pl = plist.ApplePlist()
pl._plist = {'foo': 1, 'bar': True}
other = plist.ApplePlist()
other._plist = {'foo': 1, 'bar': False}
self.assertTrue(pl.Equal(other, ignore_keys=['bar']))
def testEqualWithIgnoreKeysReturningFalse(self):
"""Tests Equal() false."""
pl = plist.ApplePlist()
pl._plist = {'foo': 1, 'bar': True}
other = plist.ApplePlist()
other._plist = {'foo': 2, 'bar': True}
self.assertFalse(pl.Equal(other, ignore_keys=['bar']))
class MunkiPlistTest(mox.MoxTestBase):
"""Test MunkiPlist class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.munki = plist.MunkiPlist()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testEmpty(self):
"""Test various forms of empty plists."""
xml = '%s<dict/>%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT)
self.munki.LoadPlist(xml)
self.munki.Parse()
xml = '%s%s' % (plist.PLIST_HEAD, plist.PLIST_FOOT)
self.munki.LoadPlist(xml)
self.assertRaises(plist.InvalidPlistError, self.munki.Parse)
class MunkiManifestPlistTest(mox.MoxTestBase):
"""Test MunkiManifestPlist class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.munki = plist.MunkiManifestPlist()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testParseSuccess(self):
"""Test Parse() with valid Manifest plist."""
self.munki.LoadPlist(
'<plist><dict><key>catalogs</key><array><string>hello</string></array>'
'<key>managed_installs</key><array><string>hello</string></array>'
'</dict></plist>')
self.munki.Parse()
def testParseMissingCatalogsList(self):
"""Test Parse() with missing catalogs."""
self.munki.LoadPlist(
'<plist><dict><key>managed_installs</key><array></array>'
'</dict></plist>')
self.assertRaises(
plist.InvalidPlistError, self.munki.Parse)
def testParseInvalidCatalogsList(self):
"""Test Parse() with an invalid catalogs list."""
self.munki.LoadPlist(
'<plist><dict><key>catalogs</key><string>hello</string>'
'<key>managed_installs</key><array><string>hello</string></array>'
'</dict></plist>')
self.assertRaises(
plist.InvalidPlistError, self.munki.Parse)
class MunkiPackageInfoPlistTest(mox.MoxTestBase):
"""Test MunkiPackageInfoPlist class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.munki = plist.MunkiPackageInfoPlist()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testParseSuccess(self):
"""Test Parse() with valid Package Info plist."""
self.munki.LoadPlist(
'<plist><dict><key>catalogs</key><array><string>hello</string></array>'
'<key>installer_item_hash</key><string>foo hash</string>'
'<key>installer_item_location</key><string>good location</string>'
'<key>name</key><string>fooname</string>'
'</dict></plist>')
self.munki.Parse()
def testParseMissingName(self):
"""Test Parse() with missing name."""
self.munki.LoadPlist(
'<plist><dict><key>catalogs</key><array><string>hello</string></array>'
'<key>installer_item_hash</key><string>foo hash</string>'
'<key>installer_item_location</key><string>foo hash</string>'
'</dict></plist>')
self.assertRaises(
plist.InvalidPlistError, self.munki.Parse)
def testParseInstallerItemHash(self):
"""Test Parse() with missing name."""
self.munki.LoadPlist(
'<plist><dict><key>catalogs</key><array><string>hello</string></array>'
'<key>name</key><string>fooname</string>'
'<key>installer_item_location</key><string>foo hash</string>'
'</dict></plist>')
self.assertRaises(
plist.InvalidPlistError, self.munki.Parse)
def testParseMissingInstallerItemLocation(self):
"""Test Parse() with missing installer_item_location."""
self.munki.LoadPlist(
'<plist><dict><key>catalogs</key><array><string>hello</string></array>'
'<key>installer_item_hash</key><string>foo hash</string>'
'<key>name</key><string>fooname</string>'
'</dict></plist>')
self.assertRaises(
plist.InvalidPlistError, self.munki.Parse)
def testParseInvalidCatalogsList(self):
"""Test Parse() with an invalid catalogs list."""
self.munki.LoadPlist(
'<plist><dict><key>catalogs</key><string>hello</string>'
'<key>name</key><string>fooname</string>'
'<key>installer_item_hash</key><string>foo hash</string>'
'<key>installer_item_location</key><string>good location</string>'
'</dict></plist>')
self.assertRaises(
plist.InvalidPlistError, self.munki.Parse)
def testValidateForceInstallAfterDate(self):
"""Tests _ValidateForceInstallAfterDate() with a valid date."""
self.munki._plist = {'force_install_after_date': datetime.datetime.utcnow()}
self.munki._ValidateForceInstallAfterDate()
def testValidateForceInstallAfterDateWithInvalidDate(self):
"""Tests _ValidateForceInstallAfterDate() with a dash in the name."""
# force_install_after_date here is a <string> not a <date>
self.munki._plist = {'force_install_after_date': '2013-07-10T13:00:00Z'}
self.assertRaises(
plist.InvalidPlistError, self.munki._ValidateForceInstallAfterDate)
def testInstallerItemLocation(self):
"""Tests _ValidateInstallerItemLocation()."""
self.munki._plist = {'installer_item_location': 'file.dmg'}
self.munki._ValidateInstallerItemLocation()
def testInstallerItemLocationWithPath(self):
"""Tests _ValidateInstallerItemLocation()."""
self.munki._plist = {'installer_item_location': 'path/to/file.dmg'}
self.assertRaises(
plist.InvalidPlistError, self.munki._ValidateInstallerItemLocation)
def testValidateInstallsFilePath(self):
"""Tests _ValidateInstallsFilePath() with valid file path."""
self.munki._plist = {
'installs': [
{'type': 'file', 'path': '/path/to/file'},
],
}
self.munki._ValidateInstallsFilePath()
def testValidateInstallsFilePathWhenNotFileType(self):
"""Tests _ValidateInstallsFilePath() with no file type install."""
self.munki._plist = {
'installs': [
{'type': 'not_file', 'something': 'else'},
],
}
self.munki._ValidateInstallsFilePath()
def testValidateInstallsFilePathWhenNoInstalls(self):
"""Tests _ValidateInstallsFilePath() with no file type install."""
self.munki._plist = {}
self.munki._ValidateInstallsFilePath()
def testValidateInstallsFilePathWhenInvalidPath(self):
"""Tests _ValidateInstallsFilePath() with invalid path."""
self.munki._plist = {
'installs': [
{'type': 'file', 'path': '\nzomg'},
],
}
self.assertRaises(
plist.InvalidPlistError, self.munki._ValidateInstallsFilePath)
def testValidateInstallsFilePathWhenMissingPath(self):
"""Tests _ValidateInstallsFilePath() with missing path."""
self.munki._plist = {
'installs': [
{'type': 'file'}, # 'path': missing!
],
}
self.assertRaises(
plist.InvalidPlistError, self.munki._ValidateInstallsFilePath)
def testValidateInstallsIsList(self):
"""Tests _ValidateInstallsFilePath() with a non-list installs value."""
self.munki._plist = {'installs': 'this is not a list!'}
self.assertRaises(
plist.InvalidPlistError, self.munki._ValidateInstallsFilePath)
def testValidateName(self):
"""Tests _ValidateName() with a valid name."""
self.munki._plist = {'name': 'fooname'}
self.munki._ValidateName()
def testValidateNameWithDash(self):
"""Tests _ValidateName() with a dash in the name."""
self.munki._plist = {'name': 'fooname-zomg'}
self.assertRaises(plist.InvalidPlistError, self.munki._ValidateName)
def testGetPackageName(self):
"""Tests the _GetPackageName()."""
name = 'foo pkg name'
self.munki._plist = {'name': name}
self.mox.ReplayAll()
self.assertEqual(name, self.munki.GetPackageName())
self.mox.VerifyAll()
def testGetPackageNameWhereNameNotInPkginfo(self):
"""Tests the _GetPackageName() where name not in pkginfo."""
self.munki._plist = {}
self.mox.ReplayAll()
self.assertRaises(plist.PlistError, self.munki.GetPackageName)
self.mox.VerifyAll()
def testGetMunkiNameWithDisplayName(self):
"""Tests the _GetMunkiName() with display_name."""
display_name = 'FooPackage'
version = '1.2.3'
munki_name = '%s-%s' % (display_name, version)
self.munki._plist = {'display_name': display_name, 'version': version}
self.mox.ReplayAll()
self.assertEqual(munki_name, self.munki.GetMunkiName())
self.mox.VerifyAll()
def testGetMunkiNameWithoutDisplayName(self):
"""Tests the _GetMunkiName() without display_name."""
name = 'FooPackage'
version = '1.2.3'
munki_name = '%s-%s' % (name, version)
self.munki._plist = {'name': name, 'version': version}
self.mox.ReplayAll()
self.assertEqual(munki_name, self.munki.GetMunkiName())
self.mox.VerifyAll()
def testGetMunkiNameWithoutVersion(self):
"""Tests the _GetMunkiName() without version."""
self.munki._plist = {'name': 'foo'}
self.mox.ReplayAll()
self.assertRaises(plist.InvalidPlistError, self.munki.GetMunkiName)
self.mox.VerifyAll()
def testGetMunkiNameWithoutNameOrDisplayName(self):
"""Tests the _GetMunkiName() without name or display_name."""
self.munki._plist = {'version': 'fooversion'}
self.mox.ReplayAll()
self.assertRaises(plist.InvalidPlistError, self.munki.GetMunkiName)
self.mox.VerifyAll()
def testSetDescription(self):
"""Test SetDescription()."""
self.munki._plist = {}
self.munki.SetDescription('foo')
self.assertEqual(self.munki._plist['description'], 'foo')
def testSetDisplayName(self):
"""Test SetDisplayName()."""
self.munki._plist = {}
self.munki.SetDisplayName('foo')
self.assertEqual(self.munki._plist['display_name'], 'foo')
def testSetUnattendedInstall(self):
"""Test SetUnattendedInstall()."""
self.munki._plist = {}
self.munki.SetUnattendedInstall(True)
self.assertTrue(self.munki._plist['unattended_install'])
self.assertTrue(self.munki._plist['forced_install'])
self.munki.SetUnattendedInstall(False)
self.assertTrue('unattended_install' not in self.munki._plist)
self.assertTrue('forced_install' not in self.munki._plist)
self.munki.SetUnattendedInstall(False)
self.assertTrue('unattended_install' not in self.munki._plist)
self.assertTrue('forced_install' not in self.munki._plist)
def testSetUnattendedUninstall(self):
"""Test SetUnattendedUninstall()."""
self.munki._plist = {}
self.munki.SetUnattendedUninstall(True)
self.assertTrue(self.munki._plist['unattended_uninstall'])
self.assertTrue(self.munki._plist['forced_uninstall'])
self.munki.SetUnattendedUninstall(False)
self.assertTrue('unattended_uninstall' not in self.munki._plist)
self.assertTrue('forced_uninstall' not in self.munki._plist)
self.munki.SetUnattendedUninstall(False)
self.assertTrue('unattended_uninstall' not in self.munki._plist)
self.assertTrue('forced_uninstall' not in self.munki._plist)
def testSetCatalogs(self):
"""Test SetCatalogs()."""
self.munki._plist = {}
# Note: here we are also testing the changed flag
self.assertFalse(self.munki._changed)
self.munki.SetCatalogs(['hi'])
self.assertEqual(self.munki._plist['catalogs'], ['hi'])
self.assertTrue(self.munki._changed)
def testHasChanged(self):
"""Test HasChanged()."""
self.munki._changed = True
self.assertTrue(self.munki.HasChanged())
self.assertFalse(self.munki._changed)
def testSetChanged(self):
"""Test SetChanged()."""
self.munki.SetChanged()
self.assertTrue(self.munki._changed)
def testSetChangedFalse(self):
"""Test SetChanged(False)."""
self.munki.SetChanged(False)
self.assertFalse(self.munki._changed)
def testSetChangedInvalid(self):
"""Test SetChanged(not bool)."""
self.assertRaises(ValueError, self.munki.SetChanged, 'zomg')
def testEq(self):
"""Test __eq__."""
other = plist.MunkiPackageInfoPlist()
other._plist = {'foo': 1}
self.munki._plist = {'foo': 1}
self.assertFalse(id(other._plist) == id(self.munki._plist))
self.assertTrue(self.munki == other)
self.assertFalse(self.munki == {'foo': 1})
self.assertFalse(self.munki == self)
other._plist = {'foo': 2}
self.assertFalse(self.munki == other)
def testContains(self):
"""Test __contains__."""
self.munki._plist = {'foo': None}
self.assertTrue('foo' in self.munki)
self.assertFalse('bar' in self.munki)
def testSetitem(self):
"""Test __setitem__."""
self.munki._plist = {}
self.assertFalse('foo' in self.munki)
self.munki['foo'] = 123
self.assertTrue('foo' in self.munki)
self.assertEqual(self.munki['foo'], 123)
self.assertTrue(self.munki._changed)
def testDelitem(self):
"""Test __delitem__."""
self.munki._plist = {'foo': True}
self.assertTrue('foo' in self.munki)
del(self.munki['foo'])
self.assertTrue('foo' not in self.munki)
def testGet(self):
"""Test get()."""
self.munki._plist = {'foo': 123}
self.assertEqual(123, self.munki.get('foo'))
self.assertEqual(None, self.munki.get('bar'))
self.assertEqual(456, self.munki.get('bar', 456))
def testSet(self):
"""Test set()."""
self.munki._plist = {'foo': 123}
self.munki.set('foo', 456)
self.assertEqual(self.munki._plist['foo'], 456)
def testEqualIgnoringManifestsAndCatalogs(self):
"""Tests EqualIgnoringManifestsAndCatalogs()."""
pkginfo = plist.MunkiPackageInfoPlist()
pkginfo._plist = {
'manifests': ['stable', 'testing'],
'catalogs': ['stable', 'testing'],
'foo': True,
}
other = plist.MunkiPackageInfoPlist()
other._plist = {
'manifests': ['unstable'], 'catalogs': ['unstable'], 'foo': True}
self.assertTrue(pkginfo.EqualIgnoringManifestsAndCatalogs(other))
def testEqualIgnoringManifestsAndCatalogsFalse(self):
"""Tests EqualIgnoringManifestsAndCatalogs() false."""
pkginfo = plist.MunkiPackageInfoPlist()
pkginfo._plist = {
'manifests': ['stable', 'testing'],
'catalogs': ['stable', 'testing'],
'foo': True,
}
other = plist.MunkiPackageInfoPlist()
other._plist = {
'manifests': ['unstable'], 'catalogs': ['unstable'], 'foo': False}
self.assertFalse(pkginfo.EqualIgnoringManifestsAndCatalogs(other))
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
|
{
"content_hash": "93d029121962694cf1dfff040c62359d",
"timestamp": "",
"source": "github",
"line_count": 1297,
"max_line_length": 111,
"avg_line_length": 32.55050115651503,
"alnum_prop": 0.6176986119664598,
"repo_name": "googlearchive/simian",
"id": "04b08d114a53272aee8279e0759af44feedb0ecb",
"size": "42840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/simian/mac/munki/plist_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "38259"
},
{
"name": "HTML",
"bytes": "97532"
},
{
"name": "JavaScript",
"bytes": "34498"
},
{
"name": "Makefile",
"bytes": "8173"
},
{
"name": "Python",
"bytes": "1422429"
},
{
"name": "Shell",
"bytes": "13277"
}
],
"symlink_target": ""
}
|
import json
from pprint import pprint
def get_duration_pitches():
segment_list = []
with open('paint_it_black.json') as data_file:
data = json.load(data_file)
segments = data["segments"]
for segment in segments:
segment_list.append((segment["duration"], segment["pitches"]))
return(segment_list)
def get_duration_highest_pitch():
segment_list = []
with open('paint_it_black.json') as data_file:
data = json.load(data_file)
segments = data["segments"]
for segment in segments:
if segment["loudness_max"] > -25:
pitches_list = []
for pitch in segment["pitches"]:
if pitch > 0.5:
pitches_list.append(segment["pitches"].index(pitch))
segment_list.append((segment["start"],segment["duration"], pitches_list))
#pprint(segment_list)
return segment_list
def get_duration_real_tones():
tonelist = ["C", "C#","D", "D#","E","F","F#","G","G#","A","A#","B"]
newlist = []
for tuple in get_duration_highest_pitch():
tones = []
for tone in tuple[2]:
tones.append(tonelist[tone])
newlist.append((tuple[0], tuple[1], tones))
pprint(newlist)
return newlist
get_duration_real_tones();
|
{
"content_hash": "76b69aa2338daf55272ed41e2d88fd21",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 25.795454545454547,
"alnum_prop": 0.6607929515418502,
"repo_name": "HackOutWest15/synthetic-music-group",
"id": "3b53f8d6c53a33c5239bc30f61be4c54036824e7",
"size": "1135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "read_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8803"
}
],
"symlink_target": ""
}
|
from sys import exit
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(self.test_args)
exit(errno)
long_description = u'\n\n'.join((
open('README.rst').read(),
open('CHANGELOG').read()
))
setup(
name='dpaste',
version='2.10',
description='dpaste is a Django based pastebin. It\'s intended to run '
'separately but its also possible to be installed into an '
'existing Django project like a regular app.',
long_description=long_description,
author='Martin Mahner',
author_email='martin@mahner.org',
url='https://github.com/bartTC/dpaste/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Framework :: Django',
],
packages=find_packages(),
package_data={
'dpaste': ['static/*.*', 'templates/*.*'],
'docs': ['*'],
},
include_package_data=True,
install_requires=[
'django>=1.4',
'django-mptt>=0.7.4',
'pygments>=1.6',
'requests>=2.0.0',
],
tests_require=[
'tox>=1.6.1'
],
cmdclass={
'test': Tox
},
)
|
{
"content_hash": "1ddd87a57bcb16f7cefa491afe291113",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 75,
"avg_line_length": 27.967741935483872,
"alnum_prop": 0.583044982698962,
"repo_name": "SanketDG/dpaste",
"id": "ae3eb0ad5f9d74b177e9c78378d3fabeb83e0042",
"size": "1756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7499"
},
{
"name": "HTML",
"bytes": "19216"
},
{
"name": "Nginx",
"bytes": "2813"
},
{
"name": "Python",
"bytes": "79520"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
"""Tools for working with MongoDB `ObjectIds
<http://dochub.mongodb.org/core/objectids>`_.
"""
import binascii
import calendar
import datetime
try:
import hashlib
_md5func = hashlib.md5
except ImportError: # for Python < 2.5
import md5
_md5func = md5.new
import os
import random
import socket
import struct
import threading
import time
from bson.errors import InvalidId
from bson.py3compat import (PY3, b, binary_type, text_type,
bytes_from_hex, string_types)
from bson.tz_util import utc
EMPTY = b("")
ZERO = b("\x00")
def _machine_bytes():
"""Get the machine portion of an ObjectId.
"""
machine_hash = _md5func()
if PY3:
# gethostname() returns a unicode string in python 3.x
# while update() requires a byte string.
machine_hash.update(socket.gethostname().encode())
else:
# Calling encode() here will fail with non-ascii hostnames
machine_hash.update(socket.gethostname())
return machine_hash.digest()[0:3]
def _raise_invalid_id(oid):
raise InvalidId(
"%r is not a valid ObjectId, it must be a 12-byte input"
" of type %r or a 24-character hex string" % (
oid, binary_type.__name__))
class ObjectId(object):
"""A MongoDB ObjectId.
"""
_inc = random.randint(0, 0xFFFFFF)
_inc_lock = threading.Lock()
_machine_bytes = _machine_bytes()
__slots__ = ('__id')
_type_marker = 7
def __init__(self, oid=None):
"""Initialize a new ObjectId.
An ObjectId is a 12-byte unique identifier consisting of:
- a 4-byte value representing the seconds since the Unix epoch,
- a 3-byte machine identifier,
- a 2-byte process id, and
- a 3-byte counter, starting with a random value.
By default, ``ObjectId()`` creates a new unique identifier. The
optional parameter `oid` can be an :class:`ObjectId`, or any 12
:class:`bytes` or, in Python 2, any 12-character :class:`str`.
For example, the 12 bytes b'foo-bar-quux' do not follow the ObjectId
specification but they are acceptable input::
>>> ObjectId(b'foo-bar-quux')
ObjectId('666f6f2d6261722d71757578')
`oid` can also be a :class:`unicode` or :class:`str` of 24 hex digits::
>>> ObjectId('0123456789ab0123456789ab')
ObjectId('0123456789ab0123456789ab')
>>>
>>> # A u-prefixed unicode literal:
>>> ObjectId('0123456789ab0123456789ab')
ObjectId('0123456789ab0123456789ab')
Raises :class:`~bson.errors.InvalidId` if `oid` is not 12 bytes nor
24 hex digits, or :class:`TypeError` if `oid` is not an accepted type.
:Parameters:
- `oid` (optional): a valid ObjectId.
.. versionadded:: 1.2.1
The `oid` parameter can be a ``unicode`` instance (that contains
24 hexadecimal digits).
.. mongodoc:: objectids
"""
if oid is None:
self.__generate()
else:
self.__validate(oid)
@classmethod
def from_datetime(cls, generation_time):
"""Create a dummy ObjectId instance with a specific generation time.
This method is useful for doing range queries on a field
containing :class:`ObjectId` instances.
.. warning::
It is not safe to insert a document containing an ObjectId
generated using this method. This method deliberately
eliminates the uniqueness guarantee that ObjectIds
generally provide. ObjectIds generated with this method
should be used exclusively in queries.
`generation_time` will be converted to UTC. Naive datetime
instances will be treated as though they already contain UTC.
An example using this helper to get documents where ``"_id"``
was generated before January 1, 2010 would be:
>>> gen_time = datetime.datetime(2010, 1, 1)
>>> dummy_id = ObjectId.from_datetime(gen_time)
>>> result = collection.find({"_id": {"$lt": dummy_id}})
:Parameters:
- `generation_time`: :class:`~datetime.datetime` to be used
as the generation time for the resulting ObjectId.
.. versionchanged:: 1.8
Properly handle timezone aware values for
`generation_time`.
.. versionadded:: 1.6
"""
if generation_time.utcoffset() is not None:
generation_time = generation_time - generation_time.utcoffset()
ts = calendar.timegm(generation_time.timetuple())
oid = struct.pack(">i", int(ts)) + ZERO * 8
return cls(oid)
@classmethod
def is_valid(cls, oid):
"""Checks if a `oid` string is valid or not.
:Parameters:
- `oid`: the object id to validate
.. versionadded:: 2.3
"""
if not oid:
return False
try:
ObjectId(oid)
return True
except (InvalidId, TypeError):
return False
def __generate(self):
"""Generate a new value for this ObjectId.
"""
oid = EMPTY
# 4 bytes current time
oid += struct.pack(">i", int(time.time()))
# 3 bytes machine
oid += ObjectId._machine_bytes
# 2 bytes pid
oid += struct.pack(">H", os.getpid() % 0xFFFF)
# 3 bytes inc
ObjectId._inc_lock.acquire()
oid += struct.pack(">i", ObjectId._inc)[1:4]
ObjectId._inc = (ObjectId._inc + 1) % 0xFFFFFF
ObjectId._inc_lock.release()
self.__id = oid
def __validate(self, oid):
"""Validate and use the given id for this ObjectId.
Raises TypeError if id is not an instance of
(:class:`basestring` (:class:`str` or :class:`bytes`
in python 3), ObjectId) and InvalidId if it is not a
valid ObjectId.
:Parameters:
- `oid`: a valid ObjectId
"""
if isinstance(oid, ObjectId):
self.__id = oid.__id
elif isinstance(oid, string_types):
if len(oid) == 12:
if isinstance(oid, binary_type):
self.__id = oid
else:
_raise_invalid_id(oid)
elif len(oid) == 24:
try:
self.__id = bytes_from_hex(oid)
except (TypeError, ValueError):
_raise_invalid_id(oid)
else:
_raise_invalid_id(oid)
else:
raise TypeError("id must be an instance of (%s, %s, ObjectId), "
"not %s" % (binary_type.__name__,
text_type.__name__, type(oid)))
@property
def binary(self):
"""12-byte binary representation of this ObjectId.
"""
return self.__id
@property
def generation_time(self):
"""A :class:`datetime.datetime` instance representing the time of
generation for this :class:`ObjectId`.
The :class:`datetime.datetime` is timezone aware, and
represents the generation time in UTC. It is precise to the
second.
.. versionchanged:: 1.8
Now return an aware datetime instead of a naive one.
.. versionadded:: 1.2
"""
t = struct.unpack(">i", self.__id[0:4])[0]
return datetime.datetime.fromtimestamp(t, utc)
def __getstate__(self):
"""return value of object for pickling.
needed explicitly because __slots__() defined.
"""
return self.__id
def __setstate__(self, value):
"""explicit state set from pickling
"""
# Provide backwards compatability with OIDs
# pickled with pymongo-1.9 or older.
if isinstance(value, dict):
oid = value["_ObjectId__id"]
else:
oid = value
# ObjectIds pickled in python 2.x used `str` for __id.
# In python 3.x this has to be converted to `bytes`
# by encoding latin-1.
if PY3 and isinstance(oid, text_type):
self.__id = oid.encode('latin-1')
else:
self.__id = oid
def __str__(self):
if PY3:
return binascii.hexlify(self.__id).decode()
return binascii.hexlify(self.__id)
def __repr__(self):
return "ObjectId('%s')" % (str(self),)
def __eq__(self, other):
if isinstance(other, ObjectId):
return self.__id == other.__id
return NotImplemented
def __ne__(self, other):
if isinstance(other, ObjectId):
return self.__id != other.__id
return NotImplemented
def __lt__(self, other):
if isinstance(other, ObjectId):
return self.__id < other.__id
return NotImplemented
def __le__(self, other):
if isinstance(other, ObjectId):
return self.__id <= other.__id
return NotImplemented
def __gt__(self, other):
if isinstance(other, ObjectId):
return self.__id > other.__id
return NotImplemented
def __ge__(self, other):
if isinstance(other, ObjectId):
return self.__id >= other.__id
return NotImplemented
def __hash__(self):
"""Get a hash value for this :class:`ObjectId`.
.. versionadded:: 1.1
"""
return hash(self.__id)
|
{
"content_hash": "7b82cc544cb1452c758020e550cc213c",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 79,
"avg_line_length": 30.545161290322582,
"alnum_prop": 0.5676417784348928,
"repo_name": "initNirvana/Easyphotos",
"id": "bb38497a2a28ce324462fe80113d6dbc461349a7",
"size": "10048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python3.4/site-packages/bson/objectid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "13653"
},
{
"name": "HTML",
"bytes": "129191"
},
{
"name": "JavaScript",
"bytes": "1401324"
},
{
"name": "Python",
"bytes": "11874458"
},
{
"name": "Shell",
"bytes": "3668"
},
{
"name": "Smarty",
"bytes": "21402"
}
],
"symlink_target": ""
}
|
from django.db import models
from core.utils import create_slug
from core.models import BaseModel
from payments.models import Customer
class ReactionEvent(BaseModel):
customer = models.ForeignKey(Customer)
name = models.CharField(max_length=255)
url = models.CharField(max_length=2048, blank=True, null=True)
location = models.CharField(max_length=128, blank=True, null=True)
slug = models.CharField(max_length=255, unique=True)
phone_number = models.CharField(max_length=100, null=True, blank=True)
event_date = models.DateField()
def save(self, *args, **kwargs):
if not self.slug or self.slug == '':
self.slug = create_slug(ReactionEvent, unicode(self))
super(ReactionEvent, self).save(*args, **kwargs)
def __unicode__(self):
return "%s on %s" % (self.name, str(self.event_date))
class Reaction(BaseModel):
event = models.ForeignKey(ReactionEvent)
slug = models.CharField(max_length=255, unique=True)
phone_number = models.CharField(max_length=128)
message = models.CharField(max_length=1024)
received_timestamp = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
if not self.slug or self.slug == '':
self.slug = create_slug(Reaction, unicode(self))
super(Reaction, self).save(*args, **kwargs)
def __unicode__(self):
return "%s" % self.message
|
{
"content_hash": "cbd43e648aee1efd0f25d6c58442e727",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 36.38461538461539,
"alnum_prop": 0.6730091613812544,
"repo_name": "makaimc/txt2react",
"id": "12ee2e4ea6640d81a09e883cca5f2e5f5361536b",
"size": "1419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reactions/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "173651"
},
{
"name": "JavaScript",
"bytes": "23283"
},
{
"name": "Python",
"bytes": "28690"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from tenancy.models import Tenant, TenantGroup
#
# Tenant groups
#
class TenantGroupSerializer(serializers.ModelSerializer):
class Meta:
model = TenantGroup
fields = ['id', 'name', 'slug']
class TenantGroupNestedSerializer(TenantGroupSerializer):
class Meta(TenantGroupSerializer.Meta):
pass
#
# Tenants
#
class TenantSerializer(serializers.ModelSerializer):
group = TenantGroupNestedSerializer()
class Meta:
model = Tenant
fields = ['id', 'name', 'slug', 'group', 'comments']
class TenantNestedSerializer(TenantSerializer):
class Meta(TenantSerializer.Meta):
fields = ['id', 'name', 'slug']
|
{
"content_hash": "ec7141ed9fdd8ee1a55db10872209411",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 60,
"avg_line_length": 18.789473684210527,
"alnum_prop": 0.6904761904761905,
"repo_name": "rfdrake/netbox",
"id": "30a4a3ca1a80ca98f211dba5018f340ba20fe9fc",
"size": "714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netbox/tenancy/api/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "157535"
},
{
"name": "HTML",
"bytes": "328897"
},
{
"name": "JavaScript",
"bytes": "12423"
},
{
"name": "Nginx",
"bytes": "774"
},
{
"name": "Python",
"bytes": "593223"
},
{
"name": "Shell",
"bytes": "3080"
}
],
"symlink_target": ""
}
|
import os
import datetime
import re
import codecs
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.conf import settings
from sqp import models as sqp_models
class Migration(DataMigration):
def forwards(self, orm):
try:
sql = 'ALTER TABLE sqp_item DROP INDEX unique_name;'
db.execute_many(sql)
print "unique_name index dropped"
except:
print "unique_name index not dropped (most likely already deleted)"
log_text = ''
Q_BASE_DIR = settings.PROJECT_DIR + '/data/questions_jorge/'
files = []
r,d,files = os.walk(Q_BASE_DIR).next()
#looking for russian A and B chars
item_regex = re.compile(ur'^(P\.)?[A-ZАВ]{1}[0-9]{1,3}([A-Za-z]{1,3})?(\.)?$')
text_area_regex = re.compile(ur'\{[A-Z]+\}')
q_regex = re.compile(ur'Q{1}[0-9]{1,4}')
for file_name in files:
file_log_text = []
CREATED_ITEMS = 0
CREATED_QUESTIONS = 0
EDITED_QUESTIONS = 0
NOT_EDITED = 0
SKIPPED_AREAS = 0
IMPORTED_LINES = 0
SKIPPED_LINES = []
#utf-8-sig to get rid of the utf-8 BOM /ufeff
#http://stackoverflow.com/questions/9228202/tokenizing-unicode-using-nltk
file = codecs.open(Q_BASE_DIR + file_name, "r", "utf-8-sig")
if not '.txt' in file_name:
continue
print "NOW CHECKING file %s" % file.name
round_name, country_iso, language_iso = file_name.replace('.txt', '').split('_')
language = sqp_models.Language.objects.get(iso=language_iso)
country = sqp_models.Country.objects.get(iso=country_iso)
round_name = round_name.replace('ESS', 'ESS Round ')
study = sqp_models.Study.objects.get(name=round_name)
key = None
questions = {}
text_areas = ['INTRO',
'QUESTION',
'ANSWERS',
'TRASH']
line_number = 0
for line in file:
line_number += 1
#Get rid of any Q13 Q12 crap
if q_regex.match(line):
line = re.sub(q_regex, '', line).strip()
key = None
if item_regex.match(line.strip()):
key = item_regex.match(line.strip()).group(0)
#russian chars
key = key.replace(u'\u0410', 'A')
key = key.replace(u'\u0412', 'B')
#P.
key = key.replace('P.', '')
key = key.replace(' ', '')
#Trailing .
key = key.replace('.', '')
questions[key] = {'INTRO' : '',
'QUESTION' : '',
'ANSWERS' : '',
'found_text_areas' : []
}
current_text_area = 'QUESTION'
continue
elif key and text_area_regex.match(line):
match = text_area_regex.match(line).group(0)
current_text_area = match.replace('{', '').replace('}', '')
if current_text_area == 'ANSWERS 1':
current_text_area ='ANSWERS'
elif current_text_area == 'ANSWERS 2':
SKIPPED_AREAS += 1
continue
if current_text_area in questions[key]['found_text_areas']:
current_text_area = 'TRASH'
else:
questions[key]['found_text_areas'].append(current_text_area)
if current_text_area not in text_areas:
raise Exception('Unrecognized text area "%s"' % current_text_area)
continue
#Only take the first occurence of QUESTION / INTRO / ANSWERS
if key and current_text_area != 'TRASH':
questions[key][current_text_area] += line
IMPORTED_LINES += 1
elif line.strip() != '':
SKIPPED_LINES.append({'line_number' : line_number,
'content': line})
n = 0
for key in questions:
n +=1
#if n > 10:break
#print "NOW SAVING question %s" % key
try:
item, i_was_created = sqp_models.Item.objects.get_or_create(admin=key, study=study)
if i_was_created:
CREATED_ITEMS += 1
except:
print '!!!!!!!!!!BAD KEY!!!!!!!!!!!!!!!%s' % key
raise Exception()
question, q_was_created = sqp_models.Question.objects.get_or_create(item=item, country=country, language=language)
if q_was_created:
CREATED_QUESTIONS += 1
if question.rfa_text or question.introduction_text or question.answer_text:
NOT_EDITED += 1
else:
question.introduction_text = questions[key]['INTRO'].strip()
question.rfa_text = questions[key]['QUESTION'].strip()
question.answer_text = questions[key]['ANSWERS'].strip()
if q_was_created:
question.imported_from = 'jorge-created'
else:
question.imported_from = 'jorge-existing'
question.save(create_suggestions = False)
EDITED_QUESTIONS += 1
file_log_text.append('%s %s %s new items:%s, total qs:%s, created qs:%s, edited qs:%s, not edited qs:%s, skipped keys:%s' % \
(country_iso, language_iso, round_name,
CREATED_ITEMS, len(questions), CREATED_QUESTIONS, EDITED_QUESTIONS, NOT_EDITED, SKIPPED_AREAS))
file_log_text.append('LINES SKIPPED %s / IMPORTED %s' % (len(SKIPPED_LINES), IMPORTED_LINES))
if SKIPPED_LINES:
file_log_text.append('SKIPPED_LINES')
for l in SKIPPED_LINES:
file_log_text.append(' %s: %s' % (l['line_number'], l['content'].replace('\n', '')))
file_log_text.append('IMPORTED ITEMS: %s' % ','.join(questions.keys()))
file_log_text.append('------------------------------------------------------------------------')
print '\n'.join(file_log_text)
print
log_text += '\n'.join(file_log_text) + '\n\n\n'
log_file = codecs.open('/tmp/jorge_import.log', 'w', "utf-8-sig")
log_file.write(log_text)
log_file.close()
print "LOG STORED AT '/tmp/jorge_import.log'"
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.branch': {
'Meta': {'ordering': "('label__characteristic__name', 'label__id')", 'object_name': 'Branch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Label']"}),
'to_characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"})
},
'sqp.characteristic': {
'Meta': {'ordering': "['name']", 'object_name': 'Characteristic'},
'auto_fill_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'desc': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'suggestion': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'validation_rules': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sqp.ValidationRule']", 'null': 'True', 'blank': 'True'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Widget']"})
},
'sqp.characteristicset': {
'Meta': {'ordering': "['id']", 'object_name': 'CharacteristicSet'},
'branches': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Branch']", 'symmetrical': 'False'}),
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.coding': {
'Meta': {'ordering': "['user', 'characteristic']", 'object_name': 'Coding'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'choice': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'seconds_taken': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'sqp.codingchange': {
'Meta': {'object_name': 'CodingChange'},
'change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'change_by_user_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'change_type': ('django.db.models.fields.IntegerField', [], {}),
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'coding_change_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CodingChangeGroup']"}),
'coding_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'coding_user_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'error_occured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_value': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'new_value_by_related_country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']", 'null': 'True', 'blank': 'True'}),
'new_value_by_related_lang': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']", 'null': 'True', 'blank': 'True'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processing_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'question_id': ('django.db.models.fields.IntegerField', [], {})
},
'sqp.codingchangegroup': {
'Meta': {'ordering': "['id']", 'object_name': 'CodingChangeGroup'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sqp.codingsuggestion': {
'Meta': {'object_name': 'CodingSuggestion'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'explanation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.completion': {
'Meta': {'object_name': 'Completion'},
'authorized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'characteristic_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CharacteristicSet']"}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'out_of_date': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'potential_improvements': ('sqp.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'predictions': ('sqp.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'sqp.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_three': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.faq': {
'Meta': {'object_name': 'FAQ'},
'answer': ('django.db.models.fields.TextField', [], {}),
'asker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {})
},
'sqp.history': {
'Meta': {'object_name': 'History'},
'action_description': ('django.db.models.fields.TextField', [], {}),
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'actor': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_model': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '170'}),
'previous_values': ('django.db.models.fields.TextField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'sqp.item': {
'Meta': {'ordering': "('study', 'admin_letter', 'admin_number', 'id')", 'object_name': 'Item'},
'admin': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'admin_letter': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'admin_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_item_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Study']"})
},
'sqp.itemgroup': {
'Meta': {'object_name': 'ItemGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Item']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.label': {
'Meta': {'ordering': "('characteristic__name', 'id')", 'object_name': 'Label'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'compute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'True'", 'max_length': '150'})
},
'sqp.language': {
'Meta': {'ordering': "('name',)", 'object_name': 'Language'},
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'iso2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.parameter': {
'Meta': {'ordering': "['order']", 'object_name': 'Parameter'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.View']", 'through': "orm['sqp.Prediction']", 'symmetrical': 'False'})
},
'sqp.prediction': {
'Meta': {'object_name': 'Prediction'},
'function_name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'paramater': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Parameter']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.View']"})
},
'sqp.question': {
'Meta': {'ordering': "('item__study', 'country', 'language', 'item__admin_letter', 'item__admin_number', 'item__id')", 'object_name': 'Question'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_question_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_from': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'introduction_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Item']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']"}),
'rel': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rel_hi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rel_lo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'relz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'relz_se': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rfa_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_hi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_lo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'valz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'valz_se': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.questionbulkassignments': {
'Meta': {'object_name': 'QuestionBulkAssignments'},
'assignments': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.UserQuestion']", 'symmetrical': 'False', 'blank': 'True'}),
'can_edit_details': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_edit_text': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']", 'null': 'True'}),
'has_been_run': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.ItemGroup']", 'null': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']", 'null': 'True'}),
'last_run_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
},
'sqp.questionbulkcreation': {
'Meta': {'object_name': 'QuestionBulkCreation'},
'copy_text_from_study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Study']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']"}),
'created_questions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Question']", 'symmetrical': 'False', 'blank': 'True'}),
'has_been_run': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.ItemGroup']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']"}),
'last_run_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.study': {
'Meta': {'ordering': "('name',)", 'object_name': 'Study'},
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_study_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'sqp.usedcountry': {
'Meta': {'ordering': "['name']", 'object_name': 'UsedCountry', 'db_table': "'vw_country_question'"},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'default_characteristic_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CharacteristicSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trusted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'sqp.userquestion': {
'Meta': {'object_name': 'UserQuestion'},
'can_edit_details': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_edit_text': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'sqp.validationrule': {
'Meta': {'object_name': 'ValidationRule'},
'failure_message': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'rule': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '7'})
},
'sqp.view': {
'Meta': {'ordering': "['order']", 'object_name': 'View'},
'expects': ('django.db.models.fields.CharField', [], {'default': "'tuple'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'sqp.widget': {
'Meta': {'object_name': 'Widget'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sqp']
|
{
"content_hash": "36cb6b59102949c2b5e5d6205eff6d8e",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 182,
"avg_line_length": 64.91489361702128,
"alnum_prop": 0.5191412651589643,
"repo_name": "recsm/SQP",
"id": "2e4b0776aa562371b0b5e9a2d2fded13ab91c59c",
"size": "30530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqp/migrations/0044_import_questions_jorge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "284413"
},
{
"name": "HTML",
"bytes": "581366"
},
{
"name": "JavaScript",
"bytes": "583584"
},
{
"name": "Makefile",
"bytes": "1104"
},
{
"name": "Python",
"bytes": "2144259"
},
{
"name": "Shell",
"bytes": "221"
}
],
"symlink_target": ""
}
|
import xbmcaddon
import notification
import xbmc
import source
class Service(object):
def __init__(self):
self.database = source.Database()
self.database.initialize(self.onInit)
def onInit(self, success):
if success:
self.database.updateChannelAndProgramListCaches(self.onCachesUpdated)
else:
self.database.close()
def onCachesUpdated(self):
if ADDON.getSetting('notifications.enabled') == 'true':
n = notification.Notification(self.database, ADDON.getAddonInfo('path'))
n.scheduleNotifications()
self.database.close(None)
try:
ADDON = xbmcaddon.Addon(id = 'script.tvguide')
if ADDON.getSetting('cache.data.on.xbmc.startup') == 'true':
Service()
except source.SourceNotConfiguredException:
pass # ignore
except Exception, ex:
xbmc.log('[script.tvguide] Uncaugt exception in service.py: %s' % str(ex) , xbmc.LOGDEBUG)
|
{
"content_hash": "f3646d2d8e7a5f975280a9d3ac830d42",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 94,
"avg_line_length": 29,
"alnum_prop": 0.6677115987460815,
"repo_name": "aplicatii-romanesti/allinclusive-kodi-pi",
"id": "31b88eb95b8876cbd6c3ca3dfe7435e6adff0904",
"size": "1774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".kodi/addons/script.tvguide/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6178"
},
{
"name": "Python",
"bytes": "8657978"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
}
|
import time
from datetime import timedelta, datetime
from logging import getLogger
from .compat import TO_UNICODE
from .constants import is_timestamp_type_name
from .converter import (SnowflakeConverter, ZERO_EPOCH)
from .sfbinaryformat import (binary_to_python, SnowflakeBinaryFormat)
from .sfdatetime import (SnowflakeDateTimeFormat, SnowflakeDateTime)
logger = getLogger(__name__)
def format_sftimestamp(fmt, value, franction_of_nanoseconds):
sf_datetime = SnowflakeDateTime(value, nanosecond=franction_of_nanoseconds)
return fmt.format(sf_datetime) if fmt else TO_UNICODE(sf_datetime)
class SnowflakeConverterSnowSQL(SnowflakeConverter):
"""
Snowflake Converter for SnowSQL.
Format data instead of just converting the values into native
Python objects.
"""
def __init__(self, **kwargs):
super(SnowflakeConverterSnowSQL, self).__init__(
use_sfbinaryformat=True)
logger.info('initialized')
def _get_format(self, type_name):
"""
Gets the format
"""
fmt = None
if type_name == u'DATE':
fmt = self._parameters.get(u'DATE_OUTPUT_FORMAT')
if not fmt:
fmt = u'YYYY-MM-DD'
elif type_name == u'TIME':
fmt = self._parameters.get(u'TIME_OUTPUT_FORMAT')
elif type_name + u'_OUTPUT_FORMAT' in self._parameters:
fmt = self._parameters[type_name + u'_OUTPUT_FORMAT']
if not fmt:
fmt = self._parameters[u'TIMESTAMP_OUTPUT_FORMAT']
elif type_name == u'BINARY':
fmt = self._parameters.get(u'BINARY_OUTPUT_FORMAT')
return fmt
#
# FROM Snowflake to Python objects
#
def to_python_method(self, type_name, column):
ctx = column.copy()
if ctx.get('scale'):
ctx['max_fraction'] = int(10 ** ctx['scale'])
ctx['zero_fill'] = '0' * (9 - ctx['scale'])
fmt = None
if is_timestamp_type_name(type_name):
fmt = SnowflakeDateTimeFormat(
self._get_format(type_name),
datetime_class=SnowflakeDateTime)
elif type_name == u'BINARY':
fmt = SnowflakeBinaryFormat(self._get_format(type_name))
logger.debug('Type: %s, Format: %s', type_name, fmt)
ctx['fmt'] = fmt
converters = [u'_{type_name}_to_python'.format(type_name=type_name)]
for conv in converters:
try:
return getattr(self, conv)(ctx)
except AttributeError:
pass
self.logger.warn("No column converter found for type: %s", type_name)
return None # Skip conversion
def _BOOLEAN_to_python(self, ctx):
"""
No conversion for SnowSQL
"""
return lambda value: "True" if value in (u'1', u"True") else u"False"
def _FIXED_to_python(self, ctx):
"""
No conversion for SnowSQL
"""
return None
def _REAL_to_python(self, ctx):
"""
No conversion for SnowSQL
"""
return None
def _BINARY_to_python(self, ctx):
"""
BINARY to a string formatted by BINARY_OUTPUT_FORMAT
"""
return lambda value: ctx['fmt'].format(binary_to_python(value))
def _DATE_to_python(self, ctx):
"""
DATE to datetime
No timezone is attached.
"""
def conv(value):
t = datetime.utcfromtimestamp(int(value) * 86400).date()
return ctx['fmt'].format(SnowflakeDateTime(t, nanosecond=0))
return conv
def _TIMESTAMP_TZ_to_python(self, ctx):
"""
TIMESTAMP TZ to datetime
The timezone offset is piggybacked.
"""
scale = ctx['scale']
max_fraction = ctx.get('max_fraction')
def conv0(encoded_value):
value, tz = encoded_value.split()
microseconds = float(value)
tzinfo = SnowflakeConverter._generate_tzinfo_from_tzoffset(
int(tz) - 1440)
t = datetime.fromtimestamp(microseconds, tz=tzinfo)
if scale == 0:
fraction_of_nanoseconds = 0
else:
fraction_of_nanoseconds = int(value[-scale:])
if value[0] == '-':
fraction_of_nanoseconds = max_fraction - fraction_of_nanoseconds
return format_sftimestamp(ctx['fmt'], t, fraction_of_nanoseconds)
def conv(encoded_value):
value, tz = encoded_value.split()
microseconds = float(value[0:-scale + 6])
tzinfo = SnowflakeConverter._generate_tzinfo_from_tzoffset(
int(tz) - 1440)
t = datetime.fromtimestamp(microseconds, tz=tzinfo)
if scale == 0:
fraction_of_nanoseconds = 0
else:
fraction_of_nanoseconds = int(value[-scale:])
if value[0] == '-':
fraction_of_nanoseconds = max_fraction - fraction_of_nanoseconds
return format_sftimestamp(ctx['fmt'], t, fraction_of_nanoseconds)
return conv if scale > 6 else conv0
def _TIMESTAMP_LTZ_to_python(self, ctx):
def conv(value):
t, _, fraction_of_nanoseconds = self._pre_TIMESTAMP_LTZ_to_python(
value, ctx)
return format_sftimestamp(ctx['fmt'], t, fraction_of_nanoseconds)
return conv
def _TIMESTAMP_NTZ_to_python(self, ctx):
"""
TIMESTAMP NTZ to Snowflake Formatted String
No timezone info is attached.
"""
def conv(value):
microseconds, _, fraction_of_nanoseconds = \
self._extract_timestamp(value, ctx)
try:
t = ZERO_EPOCH + timedelta(seconds=(microseconds))
except OverflowError:
self.logger.debug(
"OverflowError in converting from epoch time to datetime:"
" %s(ms). Falling back to use struct_time.",
microseconds)
t = time.gmtime(microseconds)
return format_sftimestamp(ctx['fmt'], t, fraction_of_nanoseconds)
return conv
_TIME_to_python = _TIMESTAMP_NTZ_to_python
|
{
"content_hash": "d00fc2cd522674db00c9d40a7898d343",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 84,
"avg_line_length": 33.58064516129032,
"alnum_prop": 0.5726865193723983,
"repo_name": "mayfield/snowflake-connector-python",
"id": "bab90cba26cf0efddeae5259b2ed5d0338654573",
"size": "6368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "converter_snowsql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "617787"
}
],
"symlink_target": ""
}
|
"""
Hello world AWS lambda function
"""
def handler(event, context):
message = 'Hello World!'
return { 'message' : message }
|
{
"content_hash": "1e43d8ee0eebc7fe8339bbabf07f53f0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 36,
"avg_line_length": 19.428571428571427,
"alnum_prop": 0.6323529411764706,
"repo_name": "xmementoit/practiseSamples",
"id": "3da56190b66ee71f387c1d24ec0b45795b04f3c1",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aws/awsLambda/src/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "45184"
},
{
"name": "C++",
"bytes": "112972"
},
{
"name": "CMake",
"bytes": "36339"
},
{
"name": "Dockerfile",
"bytes": "2070"
},
{
"name": "Elixir",
"bytes": "11390"
},
{
"name": "HCL",
"bytes": "3797"
},
{
"name": "HTML",
"bytes": "5012"
},
{
"name": "Java",
"bytes": "50515"
},
{
"name": "JavaScript",
"bytes": "4310"
},
{
"name": "Makefile",
"bytes": "246247"
},
{
"name": "Puppet",
"bytes": "11017"
},
{
"name": "Python",
"bytes": "89102"
},
{
"name": "QML",
"bytes": "6620"
},
{
"name": "QMake",
"bytes": "3547"
},
{
"name": "Ruby",
"bytes": "324"
},
{
"name": "Scala",
"bytes": "60"
},
{
"name": "Shell",
"bytes": "14949"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import hashlib
from pathlib import Path
if __name__ not in ("__main__", "__mp_main__"):
raise SystemExit(
"This file is intended to be executed as an executable program. You cannot use it as a module."
f"To execute this script, run ./{__file__} [FILE] ..."
)
AIRFLOW_SOURCES_ROOT = Path(__file__).parents[3].resolve()
BREEZE_SOURCES_ROOT = AIRFLOW_SOURCES_ROOT / "dev" / "breeze"
def get_package_setup_metadata_hash() -> str:
"""
Retrieves hash of setup.py and setup.cfg files.
This is used in order to determine if we need to upgrade Breeze, because some
setup files changed. Blake2b algorithm will not be flagged by security checkers
as insecure algorithm (in Python 3.9 and above we can use `usedforsecurity=False`
to disable it, but for now it's better to use more secure algorithms.
"""
try:
the_hash = hashlib.new("blake2b")
the_hash.update((BREEZE_SOURCES_ROOT / "setup.py").read_bytes())
the_hash.update((BREEZE_SOURCES_ROOT / "setup.cfg").read_bytes())
the_hash.update((BREEZE_SOURCES_ROOT / "pyproject.toml").read_bytes())
return the_hash.hexdigest()
except FileNotFoundError as e:
return f"Missing file {e.filename}"
def process_breeze_readme():
breeze_readme = BREEZE_SOURCES_ROOT / "README.md"
lines = breeze_readme.read_text().splitlines(keepends=True)
result_lines = []
for line in lines:
if line.startswith("Package config hash:"):
line = f"Package config hash: {get_package_setup_metadata_hash()}\n"
result_lines.append(line)
breeze_readme.write_text("".join(result_lines))
if __name__ == "__main__":
process_breeze_readme()
|
{
"content_hash": "d22239de9dad59e06393185dfd2a9ffb",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 103,
"avg_line_length": 37.170212765957444,
"alnum_prop": 0.659988551803091,
"repo_name": "nathanielvarona/airflow",
"id": "ffa368477c563e2cd382b2a537d00da291cf6a32",
"size": "2554",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/ci/pre_commit/pre_commit_update_breeze_config_hash.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from rest.models import *
class CourtSerializer(serializers.Serializer):
class Meta:
resource_name = 'courts'
class CitationSerializer(serializers.ModelSerializer):
class Meta:
model = Citations
resource_name = 'citations'
class ViolationSerializer(serializers.ModelSerializer):
class Meta:
model = Violations
resource_name = 'violations'
class WarrantSerializer(serializers.ModelSerializer):
class Meta:
model = Warrants
resource_name = 'warrants'
class CitationViolationSerializer(serializers.ModelSerializer):
violations = ViolationSerializer(many=True, read_only=True)
class Meta:
model = Citations
resource_name = 'citations'
fields = ('citation_number', 'citation_date', 'first_name', 'last_name', 'date_of_birth', 'defendant_address', 'defendant_city', 'defendant_state', 'drivers_license_number', 'court_date', 'court_location', 'court_address', 'violations')
|
{
"content_hash": "bf40776656787f4e7bfb1ab40044e8da",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 244,
"avg_line_length": 35.06896551724138,
"alnum_prop": 0.7069813176007866,
"repo_name": "xHeliotrope/injustice_dropper",
"id": "c6b6ac2762724e254dc02c93ffb98db73060ec6b",
"size": "1017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "injustice/rest/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44276"
},
{
"name": "HTML",
"bytes": "119284"
},
{
"name": "JavaScript",
"bytes": "106617"
},
{
"name": "Python",
"bytes": "27418152"
},
{
"name": "Shell",
"bytes": "3684"
}
],
"symlink_target": ""
}
|
"""
PlugIn for Nexus OS driver
"""
import logging
from quantum.common import exceptions as exc
from quantum.openstack.common import importutils
from quantum.plugins.cisco.common import cisco_constants as const
from quantum.plugins.cisco.common import cisco_credentials_v2 as cred
from quantum.plugins.cisco.common import cisco_exceptions as cisco_exc
from quantum.plugins.cisco.common import config as conf
from quantum.plugins.cisco.db import network_db_v2 as cdb
from quantum.plugins.cisco.db import nexus_db_v2 as nxos_db
from quantum.plugins.cisco.l2device_plugin_base import L2DevicePluginBase
LOG = logging.getLogger(__name__)
class NexusPlugin(L2DevicePluginBase):
"""Nexus PlugIn Main Class."""
_networks = {}
def __init__(self):
"""Extract configuration parameters from the configuration file."""
self._client = importutils.import_object(conf.CISCO.nexus_driver)
LOG.debug(_("Loaded driver %s"), conf.CISCO.nexus_driver)
self._nexus_switches = conf.get_nexus_dictionary()
self.credentials = {}
def get_credential(self, nexus_ip):
if nexus_ip not in self.credentials:
_nexus_username = cred.Store.get_username(nexus_ip)
_nexus_password = cred.Store.get_password(nexus_ip)
self.credentials[nexus_ip] = {
'username': _nexus_username,
'password': _nexus_password
}
return self.credentials[nexus_ip]
def get_all_networks(self, tenant_id):
"""Get all networks.
Returns a dictionary containing all <network_uuid, network_name> for
the specified tenant.
"""
LOG.debug(_("NexusPlugin:get_all_networks() called"))
return self._networks.values()
def create_network(self, tenant_id, net_name, net_id, vlan_name, vlan_id,
host, instance):
"""Create network.
Create a VLAN in the appropriate switch/port, and configure the
appropriate interfaces for this VLAN.
"""
LOG.debug(_("NexusPlugin:create_network() called"))
# Grab the switch IP and port for this host
for switch_ip, attr in self._nexus_switches:
if str(attr) == str(host):
port_id = self._nexus_switches[switch_ip, attr]
break
else:
raise cisco_exc.NexusComputeHostNotConfigured(host=host)
# Check if this network is already in the DB
binding = nxos_db.get_port_vlan_switch_binding(
port_id, vlan_id, switch_ip)
vlan_created = False
vlan_enabled = False
if not binding:
_nexus_ip = switch_ip
_nexus_ports = (port_id,)
_nexus_ssh_port = \
self._nexus_switches[switch_ip, 'ssh_port']
_nexus_creds = self.get_credential(_nexus_ip)
_nexus_username = _nexus_creds['username']
_nexus_password = _nexus_creds['password']
# Check for vlan/switch binding
vbinding = nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
if not vbinding:
# Create vlan and trunk vlan on the port
self._client.create_vlan(
vlan_name, str(vlan_id), _nexus_ip,
_nexus_username, _nexus_password,
_nexus_ports, _nexus_ssh_port, vlan_id)
vlan_created = True
else:
# Only trunk vlan on the port
man = self._client.nxos_connect(_nexus_ip,
int(_nexus_ssh_port),
_nexus_username,
_nexus_password)
self._client.enable_vlan_on_trunk_int(man,
port_id,
vlan_id)
vlan_enabled = True
try:
nxos_db.add_nexusport_binding(port_id, str(vlan_id),
switch_ip, instance)
except Exception as e:
try:
# Add binding failed, roll back any vlan creation/enabling
if vlan_created:
self._client.delete_vlan(
str(vlan_id), _nexus_ip,
_nexus_username, _nexus_password,
_nexus_ports, _nexus_ssh_port)
if vlan_enabled:
self._client.disable_vlan_on_trunk_int(man,
port_id,
vlan_id)
finally:
# Raise the original exception
raise e
new_net_dict = {const.NET_ID: net_id,
const.NET_NAME: net_name,
const.NET_PORTS: {},
const.NET_VLAN_NAME: vlan_name,
const.NET_VLAN_ID: vlan_id}
self._networks[net_id] = new_net_dict
return new_net_dict
def delete_network(self, tenant_id, net_id, **kwargs):
"""Delete network.
Deletes the VLAN in all switches, and removes the VLAN configuration
from the relevant interfaces.
"""
LOG.debug(_("NexusPlugin:delete_network() called"))
def get_network_details(self, tenant_id, net_id, **kwargs):
"""Return the details of a particular network."""
LOG.debug(_("NexusPlugin:get_network_details() called"))
network = self._get_network(tenant_id, net_id)
return network
def update_network(self, tenant_id, net_id, **kwargs):
"""Update the properties of a particular Virtual Network."""
LOG.debug(_("NexusPlugin:update_network() called"))
def get_all_ports(self, tenant_id, net_id, **kwargs):
"""Get all ports.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:get_all_ports() called"))
def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs):
"""Create port.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:create_port() called"))
def delete_port(self, device_id, vlan_id):
"""Delete port.
Delete port bindings from the database and scan whether the network
is still required on the interfaces trunked.
"""
LOG.debug(_("NexusPlugin:delete_port() called"))
# Delete DB row for this port
row = nxos_db.get_nexusvm_binding(vlan_id, device_id)
if row:
nxos_db.remove_nexusport_binding(row['port_id'], row['vlan_id'],
row['switch_ip'],
row['instance_id'])
# Check for any other bindings with the same vlan_id and switch_ip
bindings = nxos_db.get_nexusvlan_binding(
row['vlan_id'], row['switch_ip'])
if not bindings:
try:
# Delete this vlan from this switch
_nexus_ip = row['switch_ip']
_nexus_ports = (row['port_id'],)
_nexus_ssh_port = (self._nexus_switches[_nexus_ip,
'ssh_port'])
_nexus_creds = self.get_credential(_nexus_ip)
_nexus_username = _nexus_creds['username']
_nexus_password = _nexus_creds['password']
self._client.delete_vlan(
str(row['vlan_id']), _nexus_ip,
_nexus_username, _nexus_password,
_nexus_ports, _nexus_ssh_port)
except Exception as e:
# The delete vlan operation on the Nexus failed,
# so this delete_port request has failed. For
# consistency, roll back the Nexus database to what
# it was before this request.
try:
nxos_db.add_nexusport_binding(row['port_id'],
row['vlan_id'],
row['switch_ip'],
row['instance_id'])
finally:
# Raise the original exception
raise e
return row['instance_id']
def update_port(self, tenant_id, net_id, port_id, port_state, **kwargs):
"""Update port.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:update_port() called"))
def get_port_details(self, tenant_id, net_id, port_id, **kwargs):
"""Get port details.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:get_port_details() called"))
def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id,
**kwargs):
"""Plug interfaces.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:plug_interface() called"))
def unplug_interface(self, tenant_id, net_id, port_id, **kwargs):
"""Unplug interface.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:unplug_interface() called"))
def _get_vlan_id_for_network(self, tenant_id, network_id, context,
base_plugin_ref):
"""Obtain the VLAN ID given the Network ID."""
vlan = cdb.get_vlan_binding(network_id)
return vlan.vlan_id
def _get_network(self, tenant_id, network_id, context, base_plugin_ref):
"""Get the Network ID."""
network = base_plugin_ref._get_network(context, network_id)
if not network:
raise exc.NetworkNotFound(net_id=network_id)
return {const.NET_ID: network_id, const.NET_NAME: network.name,
const.NET_PORTS: network.ports}
|
{
"content_hash": "fc644ed95cb0de710d8c525cb76a22b4",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 78,
"avg_line_length": 41.13833992094862,
"alnum_prop": 0.5326671790930054,
"repo_name": "yamt/neutron",
"id": "a5873965aa2aba1e6b171f378c40ad49284e8294",
"size": "11249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/plugins/cisco/nexus/cisco_nexus_plugin_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "4078056"
},
{
"name": "Shell",
"bytes": "10023"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
from pynlpl.formats import folia
tweets_doc = folia.Document(id="tweetlist1")
tweets_doc.declare(folia.Event, "https://raw.githubusercontent.com/ahurriyetoglu/sinfexfolia/master/sinfex-event.foliaset.xml")
tweets_doc.declare(folia.Entity, "https://raw.githubusercontent.com/ahurriyetoglu/sinfexfolia/master/sinfex-entities.foliaset.xml")
textbody = tweets_doc.append(folia.Text)
for tw in tweets_df.text.values[:250]:
tweet = folia.Event(tweets_doc, cls='tweet',generate_id_in=textbody)
for t in tw.split():
tweet.append(folia.Word, text=t) #,space=space)
#tweet.append(folia.Word, text="",space=space)
textbody.append(tweet)
tweets_doc.save("tst.folia.xml")
|
{
"content_hash": "fb3b2d1747ba79f79bdc82dda2999f84",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 131,
"avg_line_length": 42.9375,
"alnum_prop": 0.7496360989810772,
"repo_name": "ahurriyetoglu/sinfexfolia",
"id": "323d6470eaa099b981701b1cdd3a1b8e54162075",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create_folia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "687"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
from django.utils import six
# The models definitions below used to crash. Generating models dynamically
# at runtime is a bad idea because it pollutes the app registry. This doesn't
# integrate well with the test suite but at least it prevents regressions.
class CustomBaseModel(models.base.ModelBase):
pass
class MyModel(six.with_metaclass(CustomBaseModel, models.Model)):
"""Model subclass with a custom base using six.with_metaclass."""
# This is done to ensure that for Python2 only, defining metaclasses
# still does not fail to create the model.
if six.PY2:
class MyPython2Model(models.Model):
"""Model subclass with a custom base using __metaclass__."""
__metaclass__ = CustomBaseModel
|
{
"content_hash": "7864c48cc7ab7589cf1d969efcd565a3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 32,
"alnum_prop": 0.74875,
"repo_name": "cloudera/hue",
"id": "7a6b1145162f5bcf76e6093db08e7d2cf16b38e9",
"size": "800",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Django-1.11.29/tests/base/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
import os
from django.core.management.base import BaseCommand, CommandError
from raven import Client
class SentryCommand(BaseCommand):
def execute(self, *args, **options):
try:
return super(SentryCommand, self).execute(*args, **options)
except Exception as e:
if not isinstance(e, CommandError):
if 'SENTRY_DSN' in os.environ:
dsn = os.environ['SENTRY_DSN']
else:
raise
sentry = Client(dsn)
sentry.get_ident(sentry.captureException())
|
{
"content_hash": "b6f587f68284aa22746320733d12d176",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 30.894736842105264,
"alnum_prop": 0.575809199318569,
"repo_name": "kyleconroy/speakers",
"id": "aeaf153e1c5c92bee0210fcba512170d4ed6d4de",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cfp/management/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "66276"
},
{
"name": "HTML",
"bytes": "85548"
},
{
"name": "JavaScript",
"bytes": "4576"
},
{
"name": "Makefile",
"bytes": "262"
},
{
"name": "Python",
"bytes": "137640"
}
],
"symlink_target": ""
}
|
from bambou import NURESTFetcher
class NUSSHKeysFetcher(NURESTFetcher):
""" Represents a NUSSHKeys fetcher
Notes:
This fetcher enables to fetch NUSSHKey objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUSSHKey class that is managed.
Returns:
.NUSSHKey: the managed class
"""
from .. import NUSSHKey
return NUSSHKey
|
{
"content_hash": "970ac1ed861869bcae0806642ac9c137",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 59,
"avg_line_length": 19.28,
"alnum_prop": 0.5809128630705395,
"repo_name": "nuagenetworks/vspk-python",
"id": "91ffae199ad820c1e5a7dbbbfee3d985c36cc160",
"size": "2093",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vspk/v5_0/fetchers/nusshkeys_fetcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12909327"
}
],
"symlink_target": ""
}
|
"""
An example plugin for spockbot
Demonstrates the following functionality:
- Receiving chat messages
- Sending chat commands
- Using inventory
- Moving to location
- Triggering a periodic event using a timer
- Registering for an event upon startup
- Placing blocks
- Reading blocks
"""
import logging
# Import any modules that you need in your plugin
from spockbot.mcdata import blocks
from spockbot.plugins.base import PluginBase, pl_announce
from spockbot.plugins.tools.event import EVENT_UNREGISTER
from spockbot.vector import Vector3
__author__ = 'Cosmo Harrigan, Morgan Creekmore'
logger = logging.getLogger('spockbot')
# The bot will walk to this starting position. Set it to a sensible
# location for your world file. The format is: (x, y, z)
TARGET_COORDINATES = Vector3(10, 2, 10)
# Required class decorator
@pl_announce('ExamplePlugin')
class ExamplePlugin(PluginBase):
# Require other plugins that you want use later in the plugin
requires = ('Movement', 'Timers', 'World', 'ClientInfo', 'Inventory',
'Interact', 'Chat')
# Example of registering an event handler
# Packet event types are enumerated here:
# https://github.com/SpockBotMC/SpockBot/blob/master/spockbot/mcp
# /mcdata.py#L213
# There are other events that can be used that are emitted by other plugins
events = {
# This event will be triggered when a chat message is received
# from the server
'PLAY<Chat Message': 'chat_event_handler',
# This event will be triggered after authentication when the bot
# joins the game
'client_join_game': 'perform_initial_actions',
# This event is triggered once the inventory plugin has the
# full inventory
'inventory_synced': 'hold_block',
}
def __init__(self, ploader, settings):
# Used to init the PluginBase
super(ExamplePlugin, self).__init__(ploader, settings)
# Example of registering a timer that triggers a method periodically
frequency = 5 # Number of seconds between triggers
self.timers.reg_event_timer(frequency, self.periodic_event_handler)
def perform_initial_actions(self, name, data):
"""Sends a chat message, then moves to target coordinates."""
# Send a chat message
self.chat.chat('Bot active')
# Walk to target coordinates
self.movement.move_to(*TARGET_COORDINATES)
def chat_event_handler(self, name, data):
"""Called when a chat message occurs in the game"""
logger.info('Chat message received: {0}'.format(data))
def hold_block(self, name, data):
# Search the hotbar for cobblestone
slot = self.inventory.find_slot(4, self.inventory.window.hotbar_slots)
# Switch to slot with cobblestone
if slot is not None:
self.inventory.select_active_slot(slot)
# Switch to first slot because there is no cobblestone in hotbar
else:
self.inventory.select_active_slot(0)
# Return EVENT_UNREGISTER to unregister the event handler
return EVENT_UNREGISTER
def periodic_event_handler(self):
"""Triggered every 5 seconds by a timer"""
logger.info('My position: {0} pitch: {1} yaw: {2}'.format(
self.clientinfo.position,
self.clientinfo.position.pitch,
self.clientinfo.position.yaw))
# Place a block in front of the player
self.interact.place_block(
self.clientinfo.position + Vector3(-1, 0, -1))
# Read a block under the player
block_pos = self.clientinfo.position.floor()
block_id, meta = self.world.get_block(*block_pos)
block_at = blocks.get_block(block_id, meta)
self.chat.chat('Found block %s at %s' % (
block_at.display_name, block_pos))
|
{
"content_hash": "f9a5a5a1edde26f0f461d27dbaa51986",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 36.92307692307692,
"alnum_prop": 0.6703125,
"repo_name": "Gjum/SpockBot",
"id": "fa7d3664b1a5650224e5b2d49d32c8a5f0f31def",
"size": "3840",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/basic/example_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "290360"
}
],
"symlink_target": ""
}
|
import os
import re
import codecs
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def read_filelines(filename):
with open(filename) as f:
return map(lambda l: l.strip(), f.readlines())
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
VERSION = find_version('robotx', '__init__.py')
DESC = 'Toolset for automation development with Robot Framework'
LDESC = read('README.rst')
DEPS = read_filelines('requires.txt')
EXECUTE = {'console_scripts': ['robotx=robotx.core:execute']}
setup(
name='robotx',
version=VERSION,
author='Xin Gao',
author_email='fdumpling@gmail.com',
url='https://github.com/idumpling/robotx/',
license='MIT',
description=DESC,
long_description=LDESC,
install_requires=DEPS,
packages=find_packages(),
include_package_data=True,
package_dir={'robotx': 'robotx'},
entry_points=EXECUTE,
)
|
{
"content_hash": "74fcd88f967f4c568d89332534ac7b62",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 68,
"avg_line_length": 26.313725490196077,
"alnum_prop": 0.6549925484351714,
"repo_name": "chuijiaolianying/robotx",
"id": "bebeceec76de694ee1575bf710fb3b530b5f51ae",
"size": "1365",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1433960"
},
{
"name": "Python",
"bytes": "100729"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
import os
from cacheback import __version__
PACKAGE_DIR = os.path.abspath(os.path.dirname(__file__))
os.chdir(PACKAGE_DIR)
setup(name='django-cacheback',
version=__version__,
url='https://github.com/codeinthehole/django-cacheback',
author="David Winterbottom",
author_email="david.winterbottom@gmail.com",
description=("Caching library for Django that uses Celery "
"to refresh cache items asynchronously"),
long_description=open(os.path.join(PACKAGE_DIR, 'README.rst')).read(),
license='MIT',
packages=find_packages(exclude=["sandbox*", "tests*"]),
include_package_data=True,
install_requires=[
'django>=1.3,<1.9',
'django-celery>=3.0',
'celery<3.2',
'six',
],
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python']
)
|
{
"content_hash": "0dc577d502edb3a0c276620948e17156",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 37.06060606060606,
"alnum_prop": 0.588716271463614,
"repo_name": "bharling/django-cacheback",
"id": "4277891dc662c4a255401ff76688fde8da63d2d4",
"size": "1246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "172"
},
{
"name": "Makefile",
"bytes": "302"
},
{
"name": "Python",
"bytes": "43326"
},
{
"name": "Shell",
"bytes": "334"
}
],
"symlink_target": ""
}
|
import pygraph.classes.graph
import pygraph.classes.exceptions
import sys
import logging
import md5
logging.basicConfig()
logger = logging.getLogger('guiraldelli.master.unification.algorithms')
logger.setLevel(logging.INFO)
def graphs_unification(graphs_universe):
'''Compose a final graph by the 'selected edges' of the graphs
beloning to the universe of graphs.'''
logger = logging.getLogger('guiraldelli.master.unification.algorithms.graphs_unification')
logger.setLevel(logging.INFO)
final_graph = pygraph.classes.graph.graph()
final_graph.add_nodes(vertices(graphs_universe))
for one_vertix in final_graph.nodes():
for other_vertix in final_graph.nodes():
if one_vertix != other_vertix:
minimum_weight = minimum_weighted_edge(one_vertix, other_vertix, graphs_universe)
logger.debug('For edge (%s, %s) minimum weight is %s.', one_vertix, other_vertix, minimum_weight)
if minimum_weight != None:
try:
final_graph.add_edge((one_vertix, other_vertix), wt=minimum_weight)
except pygraph.classes.exceptions.AdditionError:
pass
else:
pass
return final_graph
def vertices(graphs_universe):
'''Compose a list of vertices that should be added in the final graph
by all the vertices existing in the graphs of the universe of graphs.'''
vertices = list()
for graph in graphs_universe:
for vertix in graph.nodes():
if vertix not in vertices:
vertices.append(vertix)
else:
pass
return vertices
def minimum_weighted_edge(one_vertix, other_vertix, graphs_universe):
'''Find the minimum weigthed edge and returns the minimum weight to
compose a new edge in the final graph.'''
logger = logging.getLogger('guiraldelli.master.unification.algorithms.minimum_weighted_edge')
logger.setLevel(logging.WARN)
inspection_edge = (one_vertix, other_vertix)
minimum_weight = None
for graph in graphs_universe:
if graph.has_edge(inspection_edge):
if minimum_weight == None or graph.edge_weight(inspection_edge) < minimum_weight:
minimum_weight = graph.edge_weight(inspection_edge)
logger.info("Selected edge from the graph %s.", md5.new(str(graph)).hexdigest())
# NOTE: there is no need to receive the edge
else:
pass
else:
pass
logger.debug('For edge (%s, %s) minimum weight is %s.', one_vertix, other_vertix, minimum_weight)
return minimum_weight
|
{
"content_hash": "db437b534dede8444c00c95387abc5a1",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 113,
"avg_line_length": 43.354838709677416,
"alnum_prop": 0.6469494047619048,
"repo_name": "guiraldelli/MSc",
"id": "7341b901c46405766958df81162349ff14493d2c",
"size": "2688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haplotype_network/guiraldelli/master/unification/algorithms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "62804"
}
],
"symlink_target": ""
}
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^', include('indaba.talks.urls', namespace = 'talks')),
url(r'^', include('indaba.accounts.urls', namespace='accounts')),
]
|
{
"content_hash": "91d1565b8d29d5976cd9fa77c596c0a0",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 41.80769230769231,
"alnum_prop": 0.6954921803127875,
"repo_name": "PyZim/indaba",
"id": "4a079d00be42452cfcd46cb3733f8992f4f236d0",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indaba/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49353"
},
{
"name": "HTML",
"bytes": "15929"
},
{
"name": "JavaScript",
"bytes": "69850"
},
{
"name": "Makefile",
"bytes": "2270"
},
{
"name": "Python",
"bytes": "28371"
}
],
"symlink_target": ""
}
|
import copy
import json
import re
from crispy_forms.bootstrap import InlineField
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from crispy_forms import bootstrap as twbscrispy
from corehq.apps.style import crispy as hqcrispy
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from datetime import timedelta, datetime, time, date
from django.conf import settings
from django.core.exceptions import ValidationError
from django.forms.fields import *
from django.forms.forms import Form
from django import forms
from django.forms import Field, Widget
from corehq.apps.accounting.utils import domain_is_on_trial
from corehq.apps.casegroups.models import CommCareCaseGroup
from corehq.apps.casegroups.dbaccessors import get_case_groups_in_domain
from corehq.apps.locations.models import SQLLocation
from corehq.apps.locations.util import get_locations_from_ids
from corehq.apps.reminders.event_handlers import TRIAL_MAX_EMAILS
from corehq.apps.reminders.util import DotExpandedDict, get_form_list
from corehq.apps.groups.models import Group
from corehq.apps.hqwebapp.crispy import ErrorsOnlyField
from corehq.apps.style.crispy import FieldWithHelpBubble, B3MultiField
from corehq.apps.users.forms import SupplyPointSelectWidget
from corehq import toggles
from corehq.util.timezones.conversions import UserTime
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.decorators.memoized import memoized
from .models import (
REPEAT_SCHEDULE_INDEFINITELY,
CaseReminderEvent,
RECIPIENT_USER,
RECIPIENT_CASE,
RECIPIENT_SURVEY_SAMPLE,
RECIPIENT_OWNER,
RECIPIENT_LOCATION,
MATCH_EXACT,
MATCH_REGEX,
MATCH_ANY_VALUE,
EVENT_AS_SCHEDULE,
EVENT_AS_OFFSET,
CaseReminderHandler,
FIRE_TIME_DEFAULT,
FIRE_TIME_CASE_PROPERTY,
METHOD_SMS,
METHOD_SMS_CALLBACK,
METHOD_SMS_SURVEY,
METHOD_IVR_SURVEY,
METHOD_EMAIL,
CASE_CRITERIA,
QUESTION_RETRY_CHOICES,
SurveyKeyword,
RECIPIENT_PARENT_CASE,
RECIPIENT_SUBCASE,
FIRE_TIME_RANDOM,
SEND_NOW,
SEND_LATER,
RECIPIENT_USER_GROUP,
UI_SIMPLE_FIXED,
UI_COMPLEX,
RECIPIENT_ALL_SUBCASES,
RECIPIENT_CASE_OWNER_LOCATION_PARENT,
DAY_MON,
DAY_TUE,
DAY_WED,
DAY_THU,
DAY_FRI,
DAY_SAT,
DAY_SUN,
DAY_ANY,
)
from dateutil.parser import parse
from django.utils.translation import ugettext as _, ugettext_noop, ugettext_lazy
from corehq.apps.app_manager.models import Form as CCHQForm
from dimagi.utils.django.fields import TrimmedCharField
from corehq.util.timezones.utils import get_timezone_for_user
from langcodes import get_name as get_language_name
ONE_MINUTE_OFFSET = time(0, 1)
NO_RESPONSE = "none"
YES_OR_NO = (
("Y", ugettext_lazy("Yes")),
("N", ugettext_lazy("No")),
)
NOW_OR_LATER = (
(SEND_NOW, ugettext_lazy("Now")),
(SEND_LATER, ugettext_lazy("Later")),
)
CONTENT_CHOICES = (
(METHOD_SMS, ugettext_lazy("SMS")),
(METHOD_SMS_SURVEY, ugettext_lazy("SMS Survey")),
)
KEYWORD_CONTENT_CHOICES = (
(METHOD_SMS, ugettext_lazy("SMS")),
(METHOD_SMS_SURVEY, ugettext_lazy("SMS Survey")),
(NO_RESPONSE, ugettext_lazy("No Response")),
)
KEYWORD_RECIPIENT_CHOICES = (
(RECIPIENT_USER_GROUP, ugettext_lazy("Mobile Worker Group")),
(RECIPIENT_OWNER, ugettext_lazy("The case's owner")),
)
ONE_TIME_RECIPIENT_CHOICES = (
(RECIPIENT_USER_GROUP, ugettext_lazy("Mobile Worker Group")),
(RECIPIENT_SURVEY_SAMPLE, ugettext_lazy("Case Group")),
)
EVENT_CHOICES = (
(EVENT_AS_OFFSET, ugettext_lazy("Offset-based")),
(EVENT_AS_SCHEDULE, ugettext_lazy("Schedule-based"))
)
EMAIL_TRIAL_MESSAGE = ugettext_lazy("You are currently on a trial plan. "
"You are allowed to send %(limit)s reminder emails after which you will "
"not be able to send anymore reminder emails unless you upgrade your "
"plan.")
def add_field_choices(form, field_name, choice_tuples):
choices = copy.copy(form.fields[field_name].choices)
choices.extend(choice_tuples)
form.fields[field_name].choices = choices
def user_group_choices(domain):
ids = Group.ids_by_domain(domain)
return [(doc['_id'], doc['name'])
for doc in iter_docs(Group.get_db(), ids)]
def case_group_choices(domain):
return [(group._id, group.name)
for group in get_case_groups_in_domain(domain)]
def form_choices(domain):
available_forms = get_form_list(domain)
return [(form['code'], form['name']) for form in available_forms]
def validate_integer(value, error_msg, nonnegative=False):
try:
assert value is not None
value = int(value)
if nonnegative:
assert value >= 0
return value
except (ValueError, AssertionError):
raise ValidationError(error_msg)
def validate_date(value):
date_regex = re.compile('^\d\d\d\d-\d\d-\d\d$')
if not isinstance(value, basestring) or date_regex.match(value) is None:
raise ValidationError(_('Dates must be in YYYY-MM-DD format.'))
try:
return parse(value).date()
except Exception:
raise ValidationError(_('Invalid date given.'))
def validate_time(value):
if isinstance(value, time):
return value
error_msg = _("Please enter a valid time from 00:00 to 23:59.")
time_regex = re.compile("^\d{1,2}:\d\d(:\d\d){0,1}$")
if not isinstance(value, basestring) or time_regex.match(value) is None:
raise ValidationError(error_msg)
try:
return parse(value).time()
except Exception:
raise ValidationError(error_msg)
def validate_form_unique_id(form_unique_id, domain):
error_msg = _('Invalid form chosen.')
try:
form = CCHQForm.get_form(form_unique_id)
app = form.get_app()
except Exception:
raise ValidationError(error_msg)
if app.domain != domain:
raise ValidationError(error_msg)
return form_unique_id
def clean_group_id(group_id, expected_domain):
error_msg = _('Invalid selection.')
if not group_id:
raise ValidationError(error_msg)
try:
group = Group.get(group_id)
except Exception:
raise ValidationError(error_msg)
if group.doc_type != 'Group' or group.domain != expected_domain:
raise ValidationError(error_msg)
return group_id
def clean_case_group_id(group_id, expected_domain):
error_msg = _('Invalid selection.')
if not group_id:
raise ValidationError(error_msg)
try:
group = CommCareCaseGroup.get(group_id)
except Exception:
raise ValidationError(error_msg)
if group.doc_type != 'CommCareCaseGroup' or group.domain != expected_domain:
raise ValidationError(error_msg)
return group_id
MATCH_TYPE_CHOICES = (
(MATCH_ANY_VALUE, ugettext_noop("exists.")),
(MATCH_EXACT, ugettext_noop("equals")),
(MATCH_REGEX, ugettext_noop("matches regular expression")),
)
START_REMINDER_ALL_CASES = 'start_all_cases'
START_REMINDER_ON_CASE_DATE = 'case_date'
START_REMINDER_ON_CASE_PROPERTY = 'case_property'
START_REMINDER_ON_DAY_OF_WEEK = 'day_of_week'
START_DATE_OFFSET_BEFORE = 'offset_before'
START_DATE_OFFSET_AFTER = 'offset_after'
START_PROPERTY_OFFSET_DELAY = 'offset_delay'
START_PROPERTY_OFFSET_IMMEDIATE = 'offset_immediate'
START_PROPERTY_ALL_CASES_VALUE = '_id'
EVENT_TIMING_IMMEDIATE = 'immediate'
REPEAT_TYPE_NO = 'no_repeat'
REPEAT_TYPE_INDEFINITE = 'indefinite'
REPEAT_TYPE_SPECIFIC = 'specific'
STOP_CONDITION_CASE_PROPERTY = 'case_property'
class BaseScheduleCaseReminderForm(forms.Form):
"""
This form creates a new CaseReminder. It is the most basic version, no advanced options (like language).
"""
nickname = forms.CharField(
label=ugettext_noop("Name"),
error_messages={
'required': ugettext_noop("Please enter a name for this reminder."),
}
)
# Fieldset: Send Options
# simple has start_condition_type = CASE_CRITERIA by default
case_type = forms.CharField(
required=False,
label=ugettext_noop("Send For Case Type"),
)
start_reminder_on = forms.ChoiceField(
label=ugettext_noop("Send Reminder For"),
required=False,
choices=(
(START_REMINDER_ALL_CASES, ugettext_noop("All Cases")),
(START_REMINDER_ON_CASE_PROPERTY, ugettext_noop("Only Cases in Following State")),
),
)
## send options > start_reminder_on = case_date
start_property = forms.CharField(
required=False,
label=ugettext_noop("Enter a Case Property"),
)
start_match_type = forms.ChoiceField(
required=False,
choices=MATCH_TYPE_CHOICES,
)
# only shows up if start_match_type != MATCH_ANY_VALUE
start_value = forms.CharField(
required=False,
label=ugettext_noop("Value")
)
# this is a UI control that determines how start_offset is calculated (0 or an integer)
start_property_offset_type = forms.ChoiceField(
required=False,
choices=(
(START_PROPERTY_OFFSET_IMMEDIATE, ugettext_noop("Immediately")),
(START_PROPERTY_OFFSET_DELAY, ugettext_noop("Delay By")),
(START_REMINDER_ON_CASE_DATE, ugettext_noop("Date in Case")),
(START_REMINDER_ON_DAY_OF_WEEK, ugettext_noop("Specific Day of Week")),
)
)
# becomes start_offset
start_property_offset = forms.IntegerField(
required=False,
initial=1,
)
## send options > start_reminder_on = case_property
start_date = forms.CharField(
required=False,
label=ugettext_noop("Enter a Case Property"),
)
start_date_offset_type = forms.ChoiceField(
required=False,
choices=(
(START_DATE_OFFSET_BEFORE, ugettext_noop("Before Date By")),
(START_DATE_OFFSET_AFTER, ugettext_noop("After Date By")),
)
)
start_day_of_week = forms.ChoiceField(
required=False,
choices=(
(DAY_SUN, ugettext_noop("Sunday")),
(DAY_MON, ugettext_noop("Monday")),
(DAY_TUE, ugettext_noop("Tuesday")),
(DAY_WED, ugettext_noop("Wednesday")),
(DAY_THU, ugettext_noop("Thursday")),
(DAY_FRI, ugettext_noop("Friday")),
(DAY_SAT, ugettext_noop("Saturday")),
)
)
# becomes start_offset
start_date_offset = forms.IntegerField(
required=False,
initial=0,
)
# Fieldset: Recipient
recipient = forms.ChoiceField(
choices=(
(RECIPIENT_CASE, ugettext_noop("Case")),
(RECIPIENT_OWNER, ugettext_noop("Case Owner")),
(RECIPIENT_USER, ugettext_noop("Last User Who Modified Case")),
(RECIPIENT_USER_GROUP, ugettext_noop("Mobile Worker Group")),
(RECIPIENT_ALL_SUBCASES, ugettext_noop("All Child Cases")),
(RECIPIENT_SUBCASE, ugettext_noop("Specific Child Case")),
(RECIPIENT_PARENT_CASE, ugettext_noop("Parent Case")),
),
)
## recipient = RECIPIENT_SUBCASE
recipient_case_match_property = forms.CharField(
label=ugettext_noop("Enter a Case Property"),
required=False
)
recipient_case_match_type = forms.ChoiceField(
required=False,
choices=MATCH_TYPE_CHOICES,
)
recipient_case_match_value = forms.CharField(
label=ugettext_noop("Value"),
required=False
)
## recipient = RECIPIENT_USER_GROUP
user_group_id = ChoiceField(
required=False,
label=ugettext_noop("Mobile Worker Group"),
)
# Fieldset: Message Content
method = forms.ChoiceField(
label=ugettext_noop("Send"),
choices=(
(METHOD_SMS, ugettext_noop("SMS")),
),
)
global_timeouts = forms.CharField(
label=ugettext_noop("Timeouts"),
required=False,
)
default_lang = forms.ChoiceField(
required=False,
label=ugettext_noop("Default Language"),
choices=(
('en', ugettext_noop("English (en)")),
)
)
event_timing = forms.ChoiceField(
label=ugettext_noop("Time of Day"),
)
event_interpretation = forms.ChoiceField(
label=ugettext_noop("Schedule Type"),
initial=EVENT_AS_OFFSET,
choices=EVENT_CHOICES,
widget=forms.HiddenInput # validate as choice, but don't show the widget.
)
# contains a string-ified JSON object of events
events = forms.CharField(
required=False,
widget=forms.HiddenInput
)
# Fieldset: Repeat
repeat_type = forms.ChoiceField(
required=False,
label=ugettext_noop("Repeat Reminder"),
initial=REPEAT_TYPE_NO,
choices=(
(REPEAT_TYPE_NO, ugettext_noop("No")), # reminder_type = ONE_TIME
(REPEAT_TYPE_INDEFINITE, ugettext_noop("Indefinitely")), # reminder_type = DEFAULT, max_iteration_count = -1
(REPEAT_TYPE_SPECIFIC, ugettext_noop("Specific Number of Times")),
)
)
# shown if repeat_type != 'no_repeat'
schedule_length = forms.IntegerField(
required=False,
label=ugettext_noop("Repeat Every"),
)
# shown if repeat_type == 'specific' (0 if no_repeat, -1 if indefinite)
max_iteration_count = forms.IntegerField(
required=False,
label=ugettext_noop("Number of Times"),
)
# shown if repeat_type != 'no_repeat'
stop_condition = forms.ChoiceField(
required=False,
label="",
choices=(
('', ugettext_noop('(none)')),
(STOP_CONDITION_CASE_PROPERTY, ugettext_noop('Based on Case Property')),
)
)
until = forms.CharField(
required=False,
label=ugettext_noop("Enter a Case Property"),
)
# Advanced Toggle
submit_partial_forms = forms.BooleanField(
required=False,
label=ugettext_noop("Submit Partial Forms"),
)
include_case_side_effects = forms.BooleanField(
required=False,
label=ugettext_noop("Include Case Changes for Partial Forms"),
)
# only show if SMS_SURVEY or IVR_SURVEY is chosen
max_question_retries = forms.ChoiceField(
required=False,
choices=((n, n) for n in QUESTION_RETRY_CHOICES)
)
force_surveys_to_use_triggered_case = forms.BooleanField(
required=False,
label=ugettext_noop("For Surveys, force answers to affect "
"case sending the survey."),
)
use_custom_content_handler = BooleanField(
required=False,
label=ugettext_noop("Use Custom Content Handler")
)
custom_content_handler = TrimmedCharField(
required=False,
label=ugettext_noop("Please Specify Custom Content Handler")
)
def __init__(self, data=None, is_previewer=False,
domain=None, is_edit=False, can_use_survey=False,
use_custom_content_handler=False,
custom_content_handler=None,
available_languages=None, *args, **kwargs
):
available_languages = available_languages or ['en']
self.available_languages = available_languages
self.initial_event = {
'day_num': 0,
'fire_time_type': FIRE_TIME_DEFAULT,
'subject': dict([(l, '') for l in available_languages]),
'message': dict([(l, '') for l in available_languages]),
}
if 'initial' not in kwargs:
kwargs['initial'] = {
'event_timing': self._format_event_timing_choice(EVENT_AS_OFFSET,
FIRE_TIME_DEFAULT, EVENT_TIMING_IMMEDIATE),
'events': json.dumps([self.initial_event])
}
if is_edit:
max_iteration_count = kwargs['initial']['max_iteration_count']
if max_iteration_count == 1:
repeat_type = REPEAT_TYPE_NO
elif max_iteration_count == REPEAT_SCHEDULE_INDEFINITELY:
repeat_type = REPEAT_TYPE_INDEFINITE
else:
repeat_type = REPEAT_TYPE_SPECIFIC
kwargs['initial']['repeat_type'] = repeat_type
super(BaseScheduleCaseReminderForm, self).__init__(data, *args, **kwargs)
self.domain = domain
self.is_edit = is_edit
self.is_previewer = is_previewer
self.fields['user_group_id'].choices = Group.choices_by_domain(self.domain)
self.fields['default_lang'].choices = [(l, l) for l in available_languages]
if can_use_survey:
add_field_choices(self, 'method', [
(METHOD_SMS_SURVEY, _('SMS Survey')),
])
if is_previewer and can_use_survey:
add_field_choices(self, 'method', [
(METHOD_IVR_SURVEY, _('IVR Survey')),
(METHOD_SMS_CALLBACK, _('SMS Expecting Callback')),
])
add_field_choices(self, 'method', [
(METHOD_EMAIL, _('Email')),
])
if toggles.ABT_REMINDER_RECIPIENT.enabled(self.domain):
add_field_choices(self, 'recipient', [
(
RECIPIENT_CASE_OWNER_LOCATION_PARENT,
_("The case owner's location's parent location")
),
])
from corehq.apps.reminders.views import RemindersListView
self.helper = FormHelper()
self.helper.label_class = 'col-sm-2 col-md-2 col-lg-2'
self.helper.field_class = 'col-sm-4 col-md-4 col-lg-4'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_("Basic Information"),
crispy.Field(
'nickname',
css_class='input-large',
),
),
self.section_start,
self.section_recipient,
self.section_message,
self.section_repeat,
self.section_advanced,
hqcrispy.FormActions(
twbscrispy.StrictButton(
_("Update Reminder") if is_edit else _("Create Reminder"),
css_class='btn-primary',
type='submit',
),
crispy.HTML('<a href="%s" class="btn btn-default">%s</a>' % (
reverse(RemindersListView.urlname, args=[self.domain]),
_("Cancel")
)),
)
)
@property
def ui_type(self):
raise NotImplementedError("You must specify a ui_type for the reminder")
@property
def section_start(self):
return crispy.Fieldset(
_('Start'),
crispy.HTML(
'<p style="padding: 0; margin-bottom: 1.5em;">'
'<i class="fa fa-info-circle"></i> %s</p>' % _(
"Choose what will cause this reminder to be sent"
),
),
*self.section_start_fields
)
@property
def section_start_fields(self):
return [
FieldWithHelpBubble(
'case_type',
css_class="input-xlarge",
data_bind="value: case_type, autocompleteSelect2: available_case_types",
placeholder=_("Enter a Case Type"),
help_bubble_text=_(
"Choose which case type this reminder will be "
"sent out for."
),
),
FieldWithHelpBubble(
'start_reminder_on',
data_bind="value: start_reminder_on",
css_class="input-xlarge",
help_bubble_text=("Reminders can either start based on a date in a case property "
"or if the case is in a particular state (ex: case property 'high_risk' "
"is equal to 'yes')")
),
crispy.Div(
hqcrispy.B3MultiField(
_("When Case Property"),
crispy.Div(
InlineField(
'start_property',
css_class="input-xlarge",
data_bind="autocompleteSelect2: getAvailableCaseProperties",
),
css_class='col-sm-6'
),
crispy.Div(
InlineField(
'start_match_type',
data_bind="value: start_match_type",
),
css_class='col-sm-4'
),
crispy.Div(
InlineField(
'start_value',
style="margin-left: 5px;",
data_bind="visible: isStartMatchValueVisible",
),
css_class='col-sm-2'
),
field_class='col-md-8 col-sm-8'
),
data_bind="visible: isStartReminderCaseProperty",
),
crispy.Div(
hqcrispy.B3MultiField(
_("Day of Reminder"),
crispy.Div(
InlineField(
'start_property_offset_type',
data_bind="value: start_property_offset_type",
css_class="input-xlarge"
),
css_class='col-sm-6'
),
crispy.Div(
InlineField(
'start_property_offset',
css_class='input-mini',
style="margin-left: 5px;"
),
css_class='col-sm-3',
data_bind="visible: isStartPropertyOffsetVisible"
),
crispy.Div(
crispy.HTML('day(s)'),
css_class="help-inline col-sm-1",
data_bind="visible: isStartPropertyOffsetVisible"
),
crispy.Div(
InlineField(
'start_day_of_week',
css_class='input-medium',
),
css_class='col-sm-4',
data_bind="visible: isStartDayOfWeekVisible"
),
field_class='col-md-8 col-sm-8'
),
),
crispy.Div(
crispy.Field(
'start_date',
placeholder=_("Enter Case Property"),
css_class="input-xlarge",
data_bind="autocompleteSelect2: getAvailableCaseProperties",
),
hqcrispy.B3MultiField(
"",
crispy.Div(
InlineField(
'start_date_offset_type',
css_class="input-xlarge",
),
css_class='col-sm-4'
),
crispy.Div(
InlineField(
'start_date_offset',
css_class='input-mini',
),
crispy.HTML('<p class="help-inline">day(s)</p>'),
style='display: inline; margin-left: 5px;',
css_class='col-sm-2'
),
field_class='col-lg-8 col-md-8'
),
data_bind="visible: isStartReminderCaseDate"
),
]
@property
def section_recipient(self):
return crispy.Fieldset(
_("Recipient"),
FieldWithHelpBubble(
'recipient',
data_bind="value: recipient",
help_bubble_text=("The contact related to the case that reminder should go to. The Case "
"Owners are any mobile workers for which the case appears on their phone. "
"For cases with child or parent cases, you can also send the message to those "
"contacts. "),
css_class="input-xlarge",
),
hqcrispy.B3MultiField(
_("When Case Property"),
crispy.Div(
InlineField(
'recipient_case_match_property',
css_class="input-xlarge",
data_bind="autocompleteSelect2: getAvailableSubcaseProperties",
),
css_class='col-sm-6'
),
crispy.Div(
InlineField(
'recipient_case_match_type',
data_bind="value: recipient_case_match_type",
),
css_class='col-sm-4'
),
crispy.Div(
InlineField(
'recipient_case_match_value',
data_bind="visible: isRecipientCaseValueVisible",
),
css_class='col-sm-2'
),
data_bind="visible: isRecipientSubcase",
field_class='col-sm-8'
),
crispy.Div(
crispy.Field(
'user_group_id',
css_class="input-xlarge",
),
data_bind="visible: isRecipientGroup"
),
)
@property
def section_message(self):
return crispy.Fieldset(
_("Message Content") if self.ui_type == UI_SIMPLE_FIXED else _("Schedule"),
*self.section_message_fields
)
@property
def section_message_fields(self):
return [
FieldWithHelpBubble(
'method',
data_bind="value: method",
help_bubble_text=("Send a single SMS message or an interactive SMS survey. "
"SMS surveys are designed in the Surveys or Application "
"section. "),
css_class="input-xlarge",
),
crispy.Field('event_interpretation', data_bind="value: event_interpretation"),
hqcrispy.HiddenFieldWithErrors('events', data_bind="value: events"),
]
@property
def timing_fields(self):
return [
hqcrispy.B3MultiField(
_("Time of Day"),
InlineField(
'event_timing',
data_bind="value: event_timing",
css_class="col-sm-6",
),
crispy.Div(
style="display: inline;",
data_bind="template: {name: 'event-fire-template', foreach: eventObjects}",
css_class="col-sm-6"
),
css_id="timing_block",
help_bubble_text=("This controls when the message will be sent. The Time in Case "
"option is useful, for example, if the recipient has chosen a "
"specific time to receive the message."),
field_class='col-md-4 col-lg-4'
),
crispy.Div(
style="display: inline;",
data_bind="template: {name: 'event-general-template', foreach: eventObjects}"
)
]
@property
def section_repeat(self):
return crispy.Fieldset(
_("Repeat"),
crispy.Field(
'repeat_type',
data_bind="value: repeat_type",
css_class="input-xlarge",
),
crispy.Div(
crispy.Field(
'max_iteration_count',
css_class="input-medium",
),
data_bind="visible: isMaxIterationCountVisible",
),
hqcrispy.B3MultiField(
_("Repeat Every"),
crispy.Div(
InlineField(
'schedule_length',
css_class="input-medium",
),
css_class="col-sm-6"
),
crispy.Div(
crispy.HTML('<p class="help-inline">day(s)</p>'),
css_class="col-sm-1"
),
field_class="col-md-8 col-lg-8",
data_bind="visible: isScheduleLengthVisible"
),
)
@property
def section_advanced(self):
fields = [
hqcrispy.B3MultiField(
_("Additional Stop Condition"),
crispy.Div(
InlineField(
'stop_condition',
data_bind="value: stop_condition",
),
css_class="col-sm-6",
),
crispy.Div(
InlineField(
'until',
css_class="input-large",
data_bind="autocompleteSelect2: getAvailableCaseProperties",
),
css_class="col-sm-6",
data_bind="visible: isUntilVisible",
),
help_bubble_text=_("Reminders can be stopped after a date set in the case, or if a particular "
"case property is set to OK. Choose either a case property that is a date or "
"a case property that is going to be set to OK. Reminders will always stop if "
"the start condition is no longer true or if the case that triggered the "
"reminder is closed."),
css_id="stop-condition-group",
field_class='col-md-8 col-lg-8'
),
crispy.Div(
hqcrispy.B3MultiField(
_("Default Language"),
InlineField(
'default_lang',
data_bind="options: available_languages, "
"value: default_lang, "
"optionsText: 'name', optionsValue: 'langcode'",
css_class="input-xlarge",
),
),
data_bind="visible: showDefaultLanguageOption",
),
crispy.Div(
FieldWithHelpBubble(
'global_timeouts',
data_bind="value: global_timeouts",
placeholder="e.g. 30,60,180",
help_bubble_text=_(
"Will repeat the last message or question if the "
"user does not respond. Specify each interval "
"(in minutes) separated by a comma. "
"After the last interval, the survey will be closed. "
),
),
data_bind="visible: isGlobalTimeoutsVisible",
),
crispy.Div(
FieldWithHelpBubble(
'max_question_retries',
help_bubble_text=_("For IVR surveys, the number of times a person can provide an invalid "
"answer before the call will hang up. ")
),
data_bind="visible: isMaxQuestionRetriesVisible",
),
crispy.Div(
FieldWithHelpBubble(
'submit_partial_forms',
data_bind="checked: submit_partial_forms",
help_bubble_text=_(
"For surveys, this will let forms be saved even if "
"the survey has not been completed and the user is "
"not responding."
),
),
data_bind="visible: isPartialSubmissionsVisible",
),
crispy.Div(
FieldWithHelpBubble(
'include_case_side_effects',
help_bubble_text=_("When submitting a partial survey, this controls whether the corresponding "
"case should be created, updated or closed. This is may not be safe to do if "
"the form has not been completed. ")
),
data_bind="visible: isPartialSubmissionsVisible() && submit_partial_forms()",
),
crispy.Div(
twbscrispy.PrependedText('force_surveys_to_use_triggered_case', ''),
data_bind="visible: isForceSurveysToUsedTriggeredCaseVisible",
),
]
if self.is_previewer:
fields.append(
crispy.Div(
InlineField(
twbscrispy.PrependedText(
'use_custom_content_handler', '',
data_bind="checked: use_custom_content_handler"
),
css_class='col-sm-6'
),
crispy.Div(
crispy.Field(
'custom_content_handler',
),
data_bind="visible: use_custom_content_handler"
)
)
)
return hqcrispy.FieldsetAccordionGroup(
_("Advanced Options"),
*fields,
active=False
)
@property
def current_values(self):
current_values = {
'is_trial_project': domain_is_on_trial(self.domain),
'email_trial_message': EMAIL_TRIAL_MESSAGE % {'limit': TRIAL_MAX_EMAILS},
}
for field_name in self.fields.keys():
current_values[field_name] = self[field_name].value()
return current_values
@property
def relevant_choices(self):
return {
'MATCH_ANY_VALUE': MATCH_ANY_VALUE,
'START_REMINDER_ON_CASE_PROPERTY': START_REMINDER_ON_CASE_PROPERTY,
'START_REMINDER_ON_CASE_DATE': START_REMINDER_ON_CASE_DATE,
'START_REMINDER_ON_DAY_OF_WEEK': START_REMINDER_ON_DAY_OF_WEEK,
'RECIPIENT_CASE': RECIPIENT_CASE,
'RECIPIENT_SUBCASE': RECIPIENT_SUBCASE,
'RECIPIENT_USER_GROUP': RECIPIENT_USER_GROUP,
'METHOD_SMS': METHOD_SMS,
'METHOD_SMS_CALLBACK': METHOD_SMS_CALLBACK,
'METHOD_SMS_SURVEY': METHOD_SMS_SURVEY,
'METHOD_IVR_SURVEY': METHOD_IVR_SURVEY,
'METHOD_EMAIL': METHOD_EMAIL,
'START_PROPERTY_OFFSET_DELAY': START_PROPERTY_OFFSET_DELAY,
'START_PROPERTY_OFFSET_IMMEDIATE': START_PROPERTY_OFFSET_IMMEDIATE,
'FIRE_TIME_DEFAULT': FIRE_TIME_DEFAULT,
'FIRE_TIME_CASE_PROPERTY': FIRE_TIME_CASE_PROPERTY,
'FIRE_TIME_RANDOM': FIRE_TIME_RANDOM,
'EVENT_AS_OFFSET': EVENT_AS_OFFSET,
'EVENT_AS_SCHEDULE': EVENT_AS_SCHEDULE,
'UI_SIMPLE_FIXED': UI_SIMPLE_FIXED,
'UI_COMPLEX': UI_COMPLEX,
'EVENT_TIMING_IMMEDIATE': EVENT_TIMING_IMMEDIATE,
'REPEAT_TYPE_NO': REPEAT_TYPE_NO,
'REPEAT_TYPE_INDEFINITE': REPEAT_TYPE_INDEFINITE,
'REPEAT_TYPE_SPECIFIC': REPEAT_TYPE_SPECIFIC,
'STOP_CONDITION_CASE_PROPERTY': STOP_CONDITION_CASE_PROPERTY,
}
@staticmethod
def _format_event_timing_choice(event_interpretation, fire_time_type, special=None):
return json.dumps({
'event_interpretation': event_interpretation,
'fire_time_type': fire_time_type,
'special': special,
})
def clean_case_type(self):
# todo check start_condition type when we get to the complex form
case_property = self.cleaned_data['case_type'].strip()
if not case_property:
raise ValidationError(_("Please specify a case type."))
return case_property
def clean_default_lang(self):
if len(self.available_languages) == 1:
return self.available_languages[0]
else:
return self.cleaned_data["default_lang"]
def clean_start_property(self):
start_reminder_on = self.cleaned_data['start_reminder_on']
if start_reminder_on == START_REMINDER_ON_CASE_PROPERTY:
start_property = self.cleaned_data['start_property'].strip()
if not start_property:
raise ValidationError(_(
"Please enter a case property for the match criteria."
))
return start_property
if start_reminder_on == START_REMINDER_ALL_CASES:
return START_PROPERTY_ALL_CASES_VALUE
return None
def clean_start_match_type(self):
start_reminder_on = self.cleaned_data['start_reminder_on']
if start_reminder_on == START_REMINDER_ON_CASE_PROPERTY:
return self.cleaned_data['start_match_type']
if start_reminder_on == START_REMINDER_ALL_CASES:
return MATCH_ANY_VALUE
return None
def clean_start_value(self):
if (self.cleaned_data['start_reminder_on'] == START_REMINDER_ON_CASE_PROPERTY
and self.cleaned_data['start_match_type'] != MATCH_ANY_VALUE):
start_value = self.cleaned_data['start_value'].strip()
if not start_value:
raise ValidationError(_(
"You must specify a value for the case property "
"match criteria."
))
return start_value
return None
def clean_start_property_offset(self):
if (self.cleaned_data['start_property_offset_type'] ==
START_PROPERTY_OFFSET_IMMEDIATE):
return 0
elif (self.cleaned_data['start_property_offset_type'] ==
START_PROPERTY_OFFSET_DELAY):
start_property_offset = self.cleaned_data['start_property_offset']
if start_property_offset < 0:
raise ValidationError(_("Please enter a non-negative number."))
return start_property_offset
else:
return None
def clean_start_day_of_week(self):
if self.cleaned_data['start_property_offset_type'] == START_REMINDER_ON_DAY_OF_WEEK:
day_of_week = self.cleaned_data['start_day_of_week']
try:
day_of_week = int(day_of_week)
assert day_of_week >= 0 and day_of_week <= 6
return day_of_week
except (ValueError, TypeError, AssertionError):
raise ValidationError(_("Please choose a day of the week."))
return DAY_ANY
def clean_start_date(self):
if (self.cleaned_data['start_property_offset_type'] ==
START_REMINDER_ON_CASE_DATE):
start_date = self.cleaned_data['start_date'].strip()
if not start_date:
raise ValidationError(_(
"You must specify a case property that will provide the "
"start date."
))
return start_date
return None
def clean_start_date_offset(self):
if (self.cleaned_data['start_property_offset_type'] ==
START_REMINDER_ON_CASE_DATE):
start_date_offset = self.cleaned_data['start_date_offset']
if start_date_offset < 0:
raise ValidationError("Please enter a positive number.")
if self.cleaned_data['start_date_offset_type'] == START_DATE_OFFSET_BEFORE:
return -start_date_offset
return start_date_offset
return None
def clean_user_group_id(self):
if self.cleaned_data['recipient'] == RECIPIENT_USER_GROUP:
value = self.cleaned_data['user_group_id']
return clean_group_id(value, self.domain)
else:
return None
def clean_recipient_case_match_property(self):
if self.cleaned_data['recipient'] == RECIPIENT_SUBCASE:
case_property = self.cleaned_data['recipient_case_match_property'].strip()
if not case_property:
raise ValidationError(_(
"You must specify a case property for the case's "
"child case."
))
return case_property
if self.cleaned_data['recipient'] == RECIPIENT_ALL_SUBCASES:
return '_id'
return None
def clean_recipient_case_match_type(self):
if self.cleaned_data['recipient'] == RECIPIENT_SUBCASE:
return self.cleaned_data['recipient_case_match_type']
if self.cleaned_data['recipient'] == RECIPIENT_ALL_SUBCASES:
return MATCH_ANY_VALUE
return None
def clean_recipient_case_match_value(self):
if (self.cleaned_data['recipient'] == RECIPIENT_SUBCASE
and self.cleaned_data['recipient_case_match_type'] != MATCH_ANY_VALUE):
value = self.cleaned_data['recipient_case_match_value'].strip()
if not value:
raise ValidationError(_("You must provide a value."))
return value
return None
def _clean_timeouts(self, value):
if value:
timeouts_str = value.split(",")
timeouts_int = []
for t in timeouts_str:
try:
t = int(t.strip())
assert t > 0
timeouts_int.append(t)
except (ValueError, AssertionError):
raise ValidationError(_(
"Timeout intervals must be a list of positive "
"numbers separated by commas."
))
return timeouts_int
return []
def clean_global_timeouts(self):
method = self.cleaned_data['method']
if (self.ui_type == UI_SIMPLE_FIXED and
method in (METHOD_SMS_CALLBACK, METHOD_SMS_SURVEY, METHOD_IVR_SURVEY)):
return self._clean_timeouts(self.cleaned_data['global_timeouts'])
else:
return []
def clean_translated_field(self, translations, default_lang):
for lang, msg in translations.items():
if msg:
msg = msg.strip()
if not msg:
del translations[lang]
else:
translations[lang] = msg
if default_lang not in translations:
default_lang_name = (get_language_name(default_lang) or
default_lang)
raise ValidationError(_("Please provide messages for the "
"default language (%(language)s) or change the default "
"language at the bottom of the page.") %
{"language": default_lang_name})
return translations
def clean_events(self):
method = self.cleaned_data['method']
try:
events = json.loads(self.cleaned_data['events'])
except ValueError:
raise ValidationError(_(
"A valid JSON object was not passed in the events input."
))
default_lang = self.cleaned_data["default_lang"]
has_fire_time_case_property = False
for event in events:
eventForm = CaseReminderEventForm(
data=event,
)
if not eventForm.is_valid():
raise ValidationError(_(
"Your event form didn't turn out quite right."
))
event.update(eventForm.cleaned_data)
# the reason why we clean the following fields here instead of eventForm is so that
# we can utilize the ValidationErrors for this field.
# clean subject:
if method == METHOD_EMAIL:
event['subject'] = self.clean_translated_field(
event.get('subject', {}), default_lang)
else:
event['subject'] = {}
# clean message:
if method in (METHOD_SMS, METHOD_SMS_CALLBACK, METHOD_EMAIL):
event['message'] = self.clean_translated_field(
event.get('message', {}), default_lang)
else:
event['message'] = {}
# clean form_unique_id:
if method in (METHOD_SMS, METHOD_SMS_CALLBACK, METHOD_EMAIL):
event['form_unique_id'] = None
else:
form_unique_id = event.get('form_unique_id')
if not form_unique_id:
raise ValidationError(_(
"Please create a form for the survey first, "
"and then create the reminder."
))
validate_form_unique_id(form_unique_id, self.domain)
fire_time_type = event['fire_time_type']
# clean fire_time:
if fire_time_type == FIRE_TIME_CASE_PROPERTY:
event['fire_time'] = None
has_fire_time_case_property = True
elif event['is_immediate']:
event['fire_time'] = ONE_MINUTE_OFFSET
else:
event['fire_time'] = validate_time(event['fire_time'])
# clean fire_time_aux:
if fire_time_type != FIRE_TIME_CASE_PROPERTY:
event['fire_time_aux'] = None
elif not event.get('fire_time_aux'):
raise ValidationError(_(
"Please enter the case property from which to pull "
"the time."
))
# clean time_window_length:
time_window_length = event['time_window_length']
if fire_time_type != FIRE_TIME_RANDOM:
event['time_window_length'] = None
elif not (0 < time_window_length < 1440):
raise ValidationError(_(
"Window Length must be greater than 0 and less "
"than 1440 minutes."
))
# clean day_num:
if self.ui_type == UI_SIMPLE_FIXED or event['is_immediate']:
event['day_num'] = 0
else:
event['day_num'] = validate_integer(event['day_num'],
_('Day must be specified and must be a non-negative number.'),
nonnegative=True)
# clean callback_timeout_intervals:
if (method == METHOD_SMS_CALLBACK
or method == METHOD_IVR_SURVEY
or method == METHOD_SMS_SURVEY):
if self.ui_type == UI_SIMPLE_FIXED:
value = self.cleaned_data.get('global_timeouts', [])
else:
value = self._clean_timeouts(event["callback_timeout_intervals"])
event['callback_timeout_intervals'] = value
else:
event['callback_timeout_intervals'] = []
# delete all data that was just UI based:
del event['message_data'] # this is only for storing the stringified version of message
del event['subject_data']
del event['is_immediate']
event_interpretation = self.cleaned_data["event_interpretation"]
if (event_interpretation == EVENT_AS_SCHEDULE and
not has_fire_time_case_property):
event_time = lambda e: (
(1440 * e['day_num']) +
(60 * e['fire_time'].hour) +
e['fire_time'].minute)
events.sort(key=event_time)
return events
def get_min_schedule_length(self):
"""
Only meant to be called when the event_interpretation is
EVENT_AS_SCHEDULE. This will return the minimum allowed value for
schedule_length.
"""
max_day_num = 0
for event in self.cleaned_data.get("events", []):
day_num = event['day_num']
if day_num > max_day_num:
max_day_num = day_num
return max_day_num + 1
def clean_schedule_length(self):
event_interpretation = self.cleaned_data["event_interpretation"]
if self.cleaned_data['repeat_type'] == REPEAT_TYPE_NO:
if event_interpretation == EVENT_AS_SCHEDULE:
return self.get_min_schedule_length()
else:
return 1
value = self.cleaned_data['schedule_length']
if event_interpretation == EVENT_AS_OFFSET and value < 0:
raise ValidationError("Please enter a non-negative number.")
elif event_interpretation == EVENT_AS_SCHEDULE:
min_value = self.get_min_schedule_length()
if value < min_value:
raise ValidationError("This must be at least %s based on the "
"schedule defined above." % min_value)
return value
def clean_max_iteration_count(self):
repeat_type = self.cleaned_data['repeat_type']
if repeat_type == REPEAT_TYPE_NO:
return 1
if repeat_type == REPEAT_TYPE_INDEFINITE:
return REPEAT_SCHEDULE_INDEFINITELY
max_iteration_count = self.cleaned_data['max_iteration_count']
if max_iteration_count <= 0:
raise ValidationError(_(
"Please enter a number that is 1 or greater."
))
return max_iteration_count
def clean_until(self):
if self.cleaned_data['stop_condition'] == STOP_CONDITION_CASE_PROPERTY:
value = self.cleaned_data['until'].strip()
if not value:
raise ValidationError(_(
"You must specify a case property for the stop condition."
))
return value
return None
def clean_max_question_retries(self):
value = self.cleaned_data['max_question_retries']
try:
value = int(value)
except ValueError:
raise ValidationError(_(
"Max question retries must be an integer."
))
return value
def clean_use_custom_content_handler(self):
if self.is_previewer:
return self.cleaned_data["use_custom_content_handler"]
else:
return None
def clean_custom_content_handler(self):
if self.is_previewer:
value = self.cleaned_data["custom_content_handler"]
if self.cleaned_data["use_custom_content_handler"]:
if value in settings.ALLOWED_CUSTOM_CONTENT_HANDLERS:
return value
else:
raise ValidationError(_("Invalid custom content handler."))
else:
return None
else:
return None
def save(self, reminder_handler):
if not isinstance(reminder_handler, CaseReminderHandler):
raise ValueError(_(
"You must save to a CaseReminderHandler object!"
))
events = self.cleaned_data['events']
event_objects = []
for event in events:
new_event = CaseReminderEvent()
for prop, val in event.items():
setattr(new_event, prop, val)
event_objects.append(new_event)
reminder_handler.events = event_objects
fields = [
'nickname',
'case_type',
'start_property',
'start_match_type',
'start_value',
'start_date',
'start_day_of_week',
'recipient',
'user_group_id',
'recipient_case_match_property',
'recipient_case_match_type',
'recipient_case_match_value',
'method',
'event_interpretation',
'schedule_length',
'max_iteration_count',
'until',
'submit_partial_forms',
'include_case_side_effects',
'default_lang',
'max_question_retries',
'force_surveys_to_use_triggered_case',
]
if self.is_previewer:
fields.append('custom_content_handler')
for field in fields:
value = self.cleaned_data[field]
if field == 'recipient' and value == RECIPIENT_ALL_SUBCASES:
value = RECIPIENT_SUBCASE
setattr(reminder_handler, field, value)
start_property_offset = self.cleaned_data['start_property_offset']
start_date_offset = self.cleaned_data['start_date_offset']
reminder_handler.start_offset = (start_property_offset or
start_date_offset or 0)
reminder_handler.ui_type = self.ui_type
reminder_handler.domain = self.domain
reminder_handler.start_condition_type = CASE_CRITERIA
# If any of the scheduling information has changed, have it recalculate
# the schedule for each reminder instance
if reminder_handler._id:
old_definition = CaseReminderHandler.get(reminder_handler._id)
save_kwargs = {
"schedule_changed": reminder_handler.schedule_has_changed(old_definition),
"prev_definition": old_definition,
}
else:
save_kwargs = {}
reminder_handler.save(**save_kwargs)
@classmethod
def compute_initial(cls, reminder_handler, available_languages):
initial = {}
fields = cls.__dict__['base_fields'].keys()
for field in fields:
try:
current_val = getattr(reminder_handler, field, Ellipsis)
if field == 'events':
events_json = []
for event in current_val:
event_json = event.to_json()
if not event_json.get("message", None):
event_json["message"] = {}
if not event_json.get("subject", None):
event_json["subject"] = {}
for langcode in available_languages:
if langcode not in event_json["message"]:
event_json["message"][langcode] = ""
if langcode not in event_json["subject"]:
event_json["subject"][langcode] = ""
timeouts = [str(i) for i in
event_json["callback_timeout_intervals"]]
event_json["callback_timeout_intervals"] = ", ".join(
timeouts)
events_json.append(event_json)
current_val = json.dumps(events_json)
if (field == 'recipient'
and reminder_handler.recipient_case_match_property == '_id'
and reminder_handler.recipient_case_match_type == MATCH_ANY_VALUE
):
current_val = RECIPIENT_ALL_SUBCASES
if current_val is not Ellipsis:
initial[field] = current_val
if field is 'custom_content_handler' and current_val is not None:
initial['use_custom_content_handler'] = True
except AttributeError:
pass
if (initial['start_property'] == START_PROPERTY_ALL_CASES_VALUE
and initial['start_match_type'] == MATCH_ANY_VALUE):
initial['start_reminder_on'] = START_REMINDER_ALL_CASES
del initial['start_property']
del initial['start_match_type']
else:
initial['start_reminder_on'] = START_REMINDER_ON_CASE_PROPERTY
if reminder_handler.start_date is None:
initial['start_property_offset_type'] = (
START_PROPERTY_OFFSET_IMMEDIATE
if reminder_handler.start_offset == 0
else START_PROPERTY_OFFSET_DELAY)
initial['start_property_offset'] = reminder_handler.start_offset
else:
initial['start_property_offset_type'] = START_REMINDER_ON_CASE_DATE
initial['start_date_offset_type'] = (
START_DATE_OFFSET_BEFORE
if reminder_handler.start_offset < 0
else START_DATE_OFFSET_AFTER)
initial['start_date_offset'] = abs(reminder_handler.start_offset)
if reminder_handler.start_day_of_week != DAY_ANY:
initial['start_property_offset_type'] = START_REMINDER_ON_DAY_OF_WEEK
if (len(reminder_handler.events) == 1 and
reminder_handler.event_interpretation == EVENT_AS_OFFSET and
reminder_handler.events[0].day_num == 0 and
reminder_handler.events[0].fire_time == ONE_MINUTE_OFFSET):
sends_immediately = True
else:
sends_immediately = False
if len(reminder_handler.events) > 0:
initial['event_timing'] = cls._format_event_timing_choice(
reminder_handler.event_interpretation,
reminder_handler.events[0].fire_time_type,
(EVENT_TIMING_IMMEDIATE if sends_immediately and
reminder_handler.ui_type == UI_SIMPLE_FIXED else None),
)
if reminder_handler.until:
initial['stop_condition'] = STOP_CONDITION_CASE_PROPERTY
return initial
class SimpleScheduleCaseReminderForm(BaseScheduleCaseReminderForm):
def __init__(self, *args, **kwargs):
super(SimpleScheduleCaseReminderForm, self).__init__(*args, **kwargs)
event_timing_choices = (
((EVENT_AS_OFFSET, FIRE_TIME_DEFAULT, EVENT_TIMING_IMMEDIATE),
_("Immediately When Triggered")),
((EVENT_AS_SCHEDULE, FIRE_TIME_DEFAULT, None),
_("At a Specific Time")),
((EVENT_AS_OFFSET, FIRE_TIME_DEFAULT, None),
_("Delay After Start")),
((EVENT_AS_SCHEDULE, FIRE_TIME_CASE_PROPERTY, None),
_("Time Specific in Case")),
((EVENT_AS_SCHEDULE, FIRE_TIME_RANDOM, None),
_("Random Time in Window")),
)
event_timing_choices = [(self._format_event_timing_choice(e[0][0], e[0][1], e[0][2]), e[1])
for e in event_timing_choices]
self.fields['event_timing'].choices = event_timing_choices
@property
def ui_type(self):
return UI_SIMPLE_FIXED
@property
def section_start_fields(self):
start_fields = super(SimpleScheduleCaseReminderForm, self).section_start_fields
start_fields.extend(self.timing_fields)
return start_fields
@property
def section_message_fields(self):
message_fields = super(SimpleScheduleCaseReminderForm, self).section_message_fields
message_fields.append(
crispy.Div(data_bind="template: {name: 'event-template', foreach: eventObjects}")
)
return message_fields
@property
def timing_fields(self):
return [
hqcrispy.B3MultiField(
_("Time of Day"),
crispy.Div(
InlineField(
'event_timing',
data_bind="value: event_timing"
),
css_class='col-sm-6'
),
crispy.Div(
style="display: inline;",
data_bind="template: {name: 'event-fire-template', foreach: eventObjects}",
css_class="col-sm-6"
),
css_id="timing_block",
help_bubble_text=_("This controls when the message will be sent. The Time in Case "
"option is useful, for example, if the recipient has chosen a "
"specific time to receive the message."),
field_class='col-md-8 col-lg-8'
),
crispy.Div(
style="display: inline;",
data_bind="template: {name: 'event-general-template', foreach: eventObjects}"
)
]
class ComplexScheduleCaseReminderForm(BaseScheduleCaseReminderForm):
def __init__(self, *args, **kwargs):
super(ComplexScheduleCaseReminderForm, self).__init__(*args, **kwargs)
event_timing_choices = (
((EVENT_AS_SCHEDULE, FIRE_TIME_DEFAULT, None),
_("At a Specific Time")),
((EVENT_AS_OFFSET, FIRE_TIME_DEFAULT, None),
_("Delay After Start By")),
((EVENT_AS_SCHEDULE, FIRE_TIME_CASE_PROPERTY, None),
_("Time Specific in Case")
),
((EVENT_AS_SCHEDULE, FIRE_TIME_RANDOM, None),
_("Random Time in Window")),
)
event_timing_choices = [(self._format_event_timing_choice(e[0][0], e[0][1], e[0][2]), e[1])
for e in event_timing_choices]
self.fields['event_timing'].choices = event_timing_choices
@property
def ui_type(self):
return UI_COMPLEX
@property
def section_message_fields(self):
fields = super(ComplexScheduleCaseReminderForm, self).section_message_fields
fields = fields[:1] + self.timing_fields + fields[1:]
fields.append(crispy.Div(template='reminders/partial/complex_message_table.html'))
return fields
@property
def timing_fields(self):
return [
hqcrispy.B3MultiField(
_("Time of Day"),
crispy.Div(
InlineField(
'event_timing',
data_bind="value: event_timing",
css_class="input-xlarge",
),
css_class='col-sm-6'
),
field_class='col-md-4 col-lg-4',
css_id="timing_block",
help_bubble_text=_("This controls when the message will be sent. The Time in Case "
"option is useful, for example, if the recipient has chosen a "
"specific time to receive the message.")
),
]
class CaseReminderEventForm(forms.Form):
"""
This form creates or modifies a CaseReminderEvent.
"""
fire_time_type = forms.ChoiceField(
choices=(
(FIRE_TIME_DEFAULT, ugettext_noop("Default")),
(FIRE_TIME_CASE_PROPERTY, ugettext_noop("Case Property")), # not valid when method != EVENT_AS_SCHEDULE
(FIRE_TIME_RANDOM, ugettext_noop("Random")), # not valid when method != EVENT_AS_SCHEDULE
),
widget=forms.HiddenInput, # don't actually display this widget to the user for now, but validate as choice
)
# EVENT_AS_OFFSET: number of days after last fire
# EVENT_AS_SCHEDULE: number of days since the current event cycle began
day_num = forms.IntegerField(
required=False,
widget=forms.HiddenInput,
)
# EVENT_AS_OFFSET: number of HH:MM:SS after last fire
# EVENT_AS_SCHEDULE: time of day
fire_time = forms.TimeField(
required=False,
label=ugettext_noop("HH:MM:SS"),
)
# method must be EVENT_AS_SCHEDULE
fire_time_aux = forms.CharField(
required=False,
label=ugettext_noop("Enter a Case Property"),
)
time_window_length = forms.IntegerField(
label=ugettext_noop("Window Length (minutes)"),
required=False
)
# subject is visible when the method of the reminder is METHOD_EMAIL
# value will be a dict of {langcode: message}
subject_data = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
# message is visible when the method of the reminder is (METHOD_SMS, METHOD_SMS_CALLBACK, METHOD_EMAIL)
# value will be a dict of {langcode: message}
message_data = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
# form_unique_id is visible when the method of the reminder is SMS_SURVEY or IVR_SURVEY
form_unique_id = forms.CharField(
required=False,
label=ugettext_noop("Survey"),
)
callback_timeout_intervals = forms.CharField(
required=False,
)
def __init__(self, ui_type=None, *args, **kwargs):
super(CaseReminderEventForm, self).__init__(*args, **kwargs)
self.ui_type = ui_type
self.helper_fire_time = FormHelper()
self.helper_fire_time.form_tag = False
self.helper_fire_time.layout = crispy.Layout(
crispy.Div(
template="reminders/partial/fire_time_field.html",
),
crispy.Div(
InlineField(
'fire_time_aux',
data_bind="value: fire_time_aux, attr: {id: ''}",
css_class="input-large",
),
css_class="help-inline",
data_bind="visible: isFireTimeAuxVisible",
),
)
# Note the following is only used for the Simple UI.
# The Complex UI goes off the template: reminders/partial/complex_message_table.html
self.helper = FormHelper()
self.helper.label_class = 'col-sm-2 col-md-2 col-lg-2'
self.helper.field_class = 'col-sm-4 col-md-4 col-lg-4'
self.helper.form_tag = False
self.helper.layout = crispy.Layout(
crispy.Field('subject_data', data_bind="value: subject_data, attr: {id: ''}"),
crispy.Field('message_data', data_bind="value: message_data, attr: {id: ''}"),
crispy.Div(data_bind="template: {name: 'event-message-template', foreach: messageTranslations}, "
"visible: isMessageVisible"),
crispy.Div(
crispy.Field(
'form_unique_id',
data_bind="value: form_unique_id, attr: {id: ''}",
css_class="input-xxlarge",
),
data_bind="visible: isSurveyVisible",
),
)
self.helper_general = FormHelper()
self.helper_general.form_tag = False
self.helper_general.label_class = 'col-sm-2 col-md-2 col-lg-2'
self.helper_general.field_class = 'col-sm-4 col-md-4 col-lg-4'
self.helper_general.layout = crispy.Layout(
crispy.Div(
crispy.Field('time_window_length', data_bind="value: time_window_length, attr: {id: ''}"),
data_bind="visible: isWindowLengthVisible",
),
crispy.Field('fire_time_type', data_bind="value: fire_time_type, attr: {id: ''}"),
crispy.Field('day_num', data_bind="value: day_num, attr: {id: ''}"),
)
class CaseReminderEventMessageForm(forms.Form):
"""
This form specifies the UI for messages in CaseReminderEventForm.
"""
langcode = forms.CharField(
required=False,
widget=forms.HiddenInput
)
subject = forms.CharField(
required=False,
widget=forms.Textarea
)
message = forms.CharField(
required=False,
widget=forms.Textarea
)
def __init__(self, *args, **kwargs):
super(CaseReminderEventMessageForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.label_class = 'col-sm-3 col-md-2 col-lg-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.helper.form_tag = False
self.helper.layout = crispy.Layout(
crispy.Field('langcode', data_bind="value: langcode"),
hqcrispy.B3MultiField(
'%s <span data-bind="visible: languageLabel()">'
'(<span data-bind="text:languageLabel"></span>)</span>' %
_("Message"),
crispy.Div(
InlineField(
'subject',
data_bind="value: subject, valueUpdate: 'keyup'",
css_class="input-xlarge",
rows="2",
),
css_class='col-sm-4',
data_bind="visible: $parent.isEmailSelected()"
),
crispy.Div(
InlineField(
'message',
data_bind="value: message, valueUpdate: 'keyup'",
css_class="input-xlarge",
rows="2",
),
css_class='col-sm-4'
),
crispy.Div(
style="padding-top: 25px; padding-left: 5px; clear: both",
data_bind="template: { name: 'event-message-length-template' },"
" visible: !$parent.isEmailSelected()"
),
),
)
def clean_selection(value):
if value == "" or value is None:
raise ValidationError(_("Please make a selection."))
else:
return value
class RecordListWidget(Widget):
# When initialized, expects to be passed attrs={"input_name" : < first dot-separated name of all related records in the html form >}
def value_from_datadict(self, data, files, name, *args, **kwargs):
input_name = self.attrs["input_name"]
raw = {}
for key in data:
if key.startswith(input_name + "."):
raw[key] = data[key]
data_dict = DotExpandedDict(raw)
data_list = []
if len(data_dict) > 0:
for key in sorted(data_dict[input_name].iterkeys()):
data_list.append(data_dict[input_name][key])
return data_list
def render(self, name, value, attrs=None):
return render_to_string('reminders/partial/record_list_widget.html', {
'value': value,
'name': name,
})
class RecordListField(Field):
required = None
label = None
initial = None
widget = None
help_text = None
# When initialized, expects to be passed kwarg input_name, which is the first dot-separated name of all related records in the html form
def __init__(self, *args, **kwargs):
input_name = kwargs.pop('input_name')
kwargs['widget'] = RecordListWidget(attrs={"input_name" : input_name})
super(RecordListField, self).__init__(*args, **kwargs)
def clean(self, value):
return value
class KeywordForm(Form):
_cchq_domain = None
_sk_id = None
keyword = CharField(label=ugettext_noop("Keyword"))
description = TrimmedCharField(label=ugettext_noop("Description"))
override_open_sessions = BooleanField(
required=False,
initial=False,
label=ugettext_noop("Override open SMS Surveys"),
)
allow_keyword_use_by = ChoiceField(
required=False,
label=ugettext_noop("Allow Keyword Use By"),
initial='any',
choices=(
('any', ugettext_noop("Both Mobile Workers and Cases")),
('users', ugettext_noop("Mobile Workers Only")),
('cases', ugettext_noop("Cases Only")),
)
)
sender_content_type = ChoiceField(
label=ugettext_noop("Send to Sender"),
)
sender_message = TrimmedCharField(
required=False,
label=ugettext_noop("Message"),
)
sender_form_unique_id = ChoiceField(
required=False,
label=ugettext_noop("Survey"),
)
other_recipient_content_type = ChoiceField(
required=False,
label=ugettext_noop("Notify Another Person"),
initial=NO_RESPONSE,
)
other_recipient_type = ChoiceField(
required=False,
initial=False,
label=ugettext_noop("Recipient"),
choices=KEYWORD_RECIPIENT_CHOICES,
)
other_recipient_id = ChoiceField(
required=False,
label=ugettext_noop("Group Name"),
)
other_recipient_message = TrimmedCharField(
required=False,
label=ugettext_noop("Message"),
)
other_recipient_form_unique_id = ChoiceField(
required=False,
label=ugettext_noop("Survey"),
)
process_structured_sms = BooleanField(
required=False,
label=ugettext_noop("Process incoming keywords as a Structured Message"),
)
structured_sms_form_unique_id = ChoiceField(
required=False,
label=ugettext_noop("Survey"),
)
use_custom_delimiter = BooleanField(
required=False,
label=ugettext_noop("Use Custom Delimiter"),
)
delimiter = TrimmedCharField(
required=False,
label=ugettext_noop("Please Specify Delimiter"),
)
use_named_args_separator = BooleanField(
required=False,
label=ugettext_noop("Use Joining Character"),
)
use_named_args = BooleanField(
required=False,
label=ugettext_noop("Use Named Answers"),
)
named_args_separator = TrimmedCharField(
required=False,
label=ugettext_noop("Please Specify Joining Characcter"),
)
named_args = RecordListField(
input_name="named_args",
initial=[],
)
def __init__(self, *args, **kwargs):
if 'domain' in kwargs:
self._cchq_domain = kwargs.pop('domain')
self.process_structured_sms = False
if 'process_structured' in kwargs:
self.process_structured_sms = kwargs.pop('process_structured')
super(KeywordForm, self).__init__(*args, **kwargs)
self.fields['sender_content_type'].choices = self.content_type_choices
self.fields['other_recipient_content_type'].choices = self.content_type_choices
self.fields['other_recipient_id'].choices = self.group_choices
self.fields['sender_form_unique_id'].choices = self.form_choices
self.fields['other_recipient_form_unique_id'].choices = self.form_choices
self.fields['structured_sms_form_unique_id'].choices = self.form_choices
from corehq.apps.reminders.views import KeywordsListView
self.helper = FormHelper()
self.helper.form_class = "form form-horizontal"
self.helper.label_class = 'col-sm-3 col-md-2'
self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
layout_fields = [
crispy.Fieldset(
_('Basic Information'),
crispy.Field(
'keyword',
data_bind="value: keyword, "
"valueUpdate: 'afterkeydown', "
"event: {keyup: updateExampleStructuredSMS}",
),
crispy.Field(
'description',
data_bind="text: description",
),
),
]
if self.process_structured_sms:
layout_fields.append(
crispy.Fieldset(
_("Structured Message Options"),
crispy.Field(
'structured_sms_form_unique_id',
data_bind="value: structured_sms_form_unique_id",
),
hqcrispy.B3MultiField(
_("Delimiters"),
crispy.Div(
crispy.Div(
InlineField(
twbscrispy.PrependedText('use_custom_delimiter', '',
data_bind="checked: use_custom_delimiter, "
"click: updateExampleStructuredSMS"),
block_css_class="span2",
),
css_class='col-md-4 col-lg-4'
),
crispy.Div(
InlineField(
'delimiter',
data_bind="value: delimiter, "
"valueUpdate: 'afterkeydown', "
"event: {keyup: updateExampleStructuredSMS},"
"visible: use_custom_delimiter",
block_css_class="span4",
),
css_class='col-md-4 col-lg-4'
)
),
),
hqcrispy.B3MultiField(
_("Named Answers"),
crispy.Div(
InlineField(
twbscrispy.PrependedText('use_named_args', '',
data_bind="checked: use_named_args, "
"click: updateExampleStructuredSMS"),
),
css_class='col-md-4 col-lg-4'
),
ErrorsOnlyField('named_args'),
crispy.Div(
data_bind="template: {"
" name: 'ko-template-named-args', "
" data: $data"
"}, "
"visible: use_named_args",
),
),
hqcrispy.B3MultiField(
_("Joining Characters"),
crispy.Div(
crispy.Div(
InlineField(
twbscrispy.PrependedText(
'use_named_args_separator', '',
data_bind="checked: use_named_args_separator, "
"click: updateExampleStructuredSMS"
),
),
css_class='col-md-4 col-lg-4'
),
crispy.Div(
InlineField(
'named_args_separator',
data_bind="value: named_args_separator, "
"valueUpdate: 'afterkeydown', "
"event: {keyup: updateExampleStructuredSMS},"
"visible: useJoiningCharacter",
),
css_class='col-md-6 col-lg-4'
)
),
data_bind="visible: use_named_args",
),
hqcrispy.B3MultiField(
_("Example Structured Message"),
crispy.HTML('<pre style="background: white;" '
'data-bind="text: example_structured_sms">'
'</pre>'),
),
),
)
layout_fields.extend([
crispy.Fieldset(
_("Response"),
crispy.Field(
'sender_content_type',
data_bind="value: sender_content_type",
),
crispy.Div(
crispy.Field(
'sender_message',
data_bind="text: sender_message",
),
data_bind="visible: isMessageSMS",
),
crispy.Div(
crispy.Field(
'sender_form_unique_id',
data_bind="value: sender_form_unique_id"
),
data_bind="visible: isMessageSurvey",
),
crispy.Field(
'other_recipient_content_type',
data_bind="value: other_recipient_content_type",
),
hqcrispy.B3MultiField(
"",
crispy.Div(
crispy.HTML(
'<h4 style="margin-bottom: 20px;">%s</h4>'
% _("Recipient Information"),
),
crispy.Field(
'other_recipient_type',
data_bind="value: other_recipient_type",
),
crispy.Div(
crispy.Field(
'other_recipient_id',
data_bind="value: other_recipient_id",
),
data_bind="visible: showRecipientGroup",
),
crispy.Div(
crispy.Field(
'other_recipient_message',
data_bind="value: other_recipient_message",
),
data_bind="visible: other_recipient_content_type() == 'sms'",
),
crispy.Div(
crispy.Field(
'other_recipient_form_unique_id',
data_bind="value: other_recipient_form_unique_id",
),
data_bind="visible: other_recipient_content_type() == 'survey'",
),
css_class="well",
data_bind="visible: notify_others",
),
),
),
crispy.Fieldset(
_("Advanced Options"),
twbscrispy.PrependedText(
'override_open_sessions', '',
data_bind="checked: override_open_sessions",
),
'allow_keyword_use_by',
),
hqcrispy.FormActions(
twbscrispy.StrictButton(
_("Save Keyword"),
css_class='btn-primary',
type='submit',
),
crispy.HTML('<a href="%s" class="btn btn-default">Cancel</a>'
% reverse(KeywordsListView.urlname, args=[self._cchq_domain]))
),
])
self.helper.layout = crispy.Layout(*layout_fields)
@property
def content_type_choices(self):
return KEYWORD_CONTENT_CHOICES
@property
@memoized
def group_choices(self):
group_ids = Group.ids_by_domain(self._cchq_domain)
groups = []
for group_doc in iter_docs(Group.get_db(), group_ids):
groups.append((group_doc['_id'], group_doc['name']))
return groups
@property
@memoized
def form_choices(self):
return form_choices(self._cchq_domain)
@property
def current_values(self):
values = {}
for field_name in self.fields.keys():
values[field_name] = self[field_name].value()
return values
def clean_keyword(self):
value = self.cleaned_data.get("keyword")
if value is not None:
value = value.strip().upper()
if value is None or value == "":
raise ValidationError(_("This field is required."))
if len(value.split()) > 1:
raise ValidationError(_("Keyword should be one word."))
duplicate = SurveyKeyword.get_keyword(self._cchq_domain, value)
if duplicate is not None and duplicate._id != self._sk_id:
raise ValidationError(_("Keyword already exists."))
return value
def clean_sender_message(self):
value = self.cleaned_data.get("sender_message")
if self.cleaned_data.get("sender_content_type") == METHOD_SMS:
if value is None or value == "":
raise ValidationError(_("This field is required."))
return value
else:
return None
def clean_sender_form_unique_id(self):
value = self.cleaned_data.get("sender_form_unique_id")
if self.cleaned_data.get("sender_content_type") == METHOD_SMS_SURVEY:
if value is None:
raise ValidationError(_(
"Please create a form first, and then add a keyword "
"for it."
))
validate_form_unique_id(value, self._cchq_domain)
return value
else:
return None
def clean_other_recipient_message(self):
value = self.cleaned_data.get("other_recipient_message")
if self.cleaned_data.get("other_recipient_content_type") == METHOD_SMS:
if value is None or value == "":
raise ValidationError(_("This field is required."))
return value
else:
return None
def clean_other_recipient_form_unique_id(self):
value = self.cleaned_data.get("other_recipient_form_unique_id")
if self.cleaned_data.get("other_recipient_content_type") == METHOD_SMS_SURVEY:
if value is None:
raise ValidationError(_(
"Please create a form first, and then "
"add a keyword for it."
))
validate_form_unique_id(value, self._cchq_domain)
return value
else:
return None
def clean_structured_sms_form_unique_id(self):
value = self.cleaned_data.get("structured_sms_form_unique_id")
if self.process_structured_sms:
if value is None:
raise ValidationError(_(
"Please create a form first, and then add a "
"keyword for it."
))
validate_form_unique_id(value, self._cchq_domain)
return value
else:
return None
def clean_delimiter(self):
value = self.cleaned_data.get("delimiter", None)
if self.process_structured_sms and self.cleaned_data["use_custom_delimiter"]:
if value is None or value == "":
raise ValidationError(_("This field is required."))
return value
else:
return None
def clean_named_args(self):
if self.process_structured_sms and self.cleaned_data["use_named_args"]:
use_named_args_separator = self.cleaned_data["use_named_args_separator"]
value = self.cleaned_data.get("named_args")
data_dict = {}
for d in value:
name = d["name"].strip().upper()
xpath = d["xpath"].strip()
if name == "" or xpath == "":
raise ValidationError(_(
"Name and xpath are both required fields."
))
for k, v in data_dict.items():
if (not use_named_args_separator
and (k.startswith(name) or name.startswith(k))
):
raise ValidationError(
_("Cannot have two names overlap: ") + "(%s, %s)"
% (k, name)
)
if use_named_args_separator and k == name:
raise ValidationError(
_("Cannot use the same name twice: ") + name
)
if v == xpath:
raise ValidationError(
_("Cannot reference the same xpath twice: ") + xpath
)
data_dict[name] = xpath
return data_dict
else:
return {}
def clean_named_args_separator(self):
value = self.cleaned_data["named_args_separator"]
if (self.process_structured_sms
and self.cleaned_data["use_named_args"]
and self.cleaned_data["use_named_args_separator"]
):
if value is None or value == "":
raise ValidationError(_("This field is required."))
if value == self.cleaned_data["delimiter"]:
raise ValidationError(_(
"Delimiter and joining character cannot be the same."
))
return value
else:
return None
def clean_other_recipient_type(self):
if self.cleaned_data['other_recipient_content_type'] == NO_RESPONSE:
return None
value = self.cleaned_data["other_recipient_type"]
if value == RECIPIENT_OWNER:
if self.cleaned_data['allow_keyword_use_by'] != 'cases':
raise ValidationError(_(
"In order to send to the case's owner you must restrict "
"keyword initiation only to cases."
))
return value
def clean_other_recipient_id(self):
if self.cleaned_data['other_recipient_content_type'] == NO_RESPONSE:
return None
value = self.cleaned_data["other_recipient_id"]
recipient_type = self.cleaned_data.get("other_recipient_type", None)
if recipient_type == RECIPIENT_USER_GROUP:
try:
g = Group.get(value)
assert g.doc_type == "Group"
assert g.domain == self._cchq_domain
except Exception:
raise ValidationError("Invalid Group.")
return value
else:
return None
class BroadcastForm(Form):
recipient_type = ChoiceField(
required=True,
label=ugettext_lazy('Recipient'),
choices=ONE_TIME_RECIPIENT_CHOICES,
)
timing = ChoiceField(
required=True,
label=ugettext_lazy('Timing'),
choices=NOW_OR_LATER,
)
date = CharField(
required=False,
label=ugettext_lazy('Date'),
)
time = CharField(
required=False,
label=ugettext_lazy('Time'),
)
datetime = DateTimeField(
required=False,
)
case_group_id = ChoiceField(
required=False,
label=ugettext_lazy('Case Group'),
)
user_group_id = ChoiceField(
required=False,
label=ugettext_lazy('Mobile Worker Group'),
)
location_ids = CharField(
label='Location(s)',
required=False,
)
include_child_locations = BooleanField(
required=False,
label=ugettext_lazy('Also send to users at child locations'),
)
content_type = ChoiceField(
label=ugettext_lazy('Send'),
choices=((METHOD_SMS, ugettext_lazy("SMS Message")),)
)
subject = TrimmedCharField(
required=False,
label=ugettext_lazy('Subject'),
widget=forms.Textarea,
)
message = TrimmedCharField(
required=False,
label=ugettext_lazy('Message'),
widget=forms.Textarea,
)
form_unique_id = ChoiceField(
required=False,
label=ugettext_lazy('Survey'),
)
def __init__(self, *args, **kwargs):
if 'domain' not in kwargs or 'can_use_survey' not in kwargs:
raise Exception('Expected kwargs: domain, can_use_survey')
self.domain = kwargs.pop('domain')
self.can_use_survey = kwargs.pop('can_use_survey')
super(BroadcastForm, self).__init__(*args, **kwargs)
if self.can_use_survey:
add_field_choices(self, 'content_type', [
(METHOD_SMS_SURVEY, _('SMS Survey')),
])
add_field_choices(self, 'content_type', [
(METHOD_EMAIL, _('Email')),
])
if toggles.BROADCAST_TO_LOCATIONS.enabled(self.domain):
add_field_choices(self, 'recipient_type', [
(RECIPIENT_LOCATION, _('Location')),
])
self.fields['form_unique_id'].choices = form_choices(self.domain)
self.fields['case_group_id'].choices = case_group_choices(self.domain)
self.fields['user_group_id'].choices = user_group_choices(self.domain)
self.fields['location_ids'].widget = SupplyPointSelectWidget(
domain=self.domain,
multiselect=True,
)
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
self.helper.label_class = 'col-sm-2 col-md-2 col-lg-2'
self.helper.field_class = 'col-sm-10 col-md-3 col-lg-3'
from corehq.apps.reminders.views import BroadcastListView
layout_fields = [
crispy.Fieldset(
_('Recipient'),
*self.crispy_recipient_fields
),
crispy.Fieldset(
_('Timing'),
*self.crispy_timing_fields
),
crispy.Fieldset(
_('Content'),
*self.crispy_content_fields
),
hqcrispy.FormActions(
twbscrispy.StrictButton(
_("Save"),
css_class='btn-primary',
type='submit',
),
crispy.HTML('<a href="%s" class="btn btn-default">Cancel</a>'
% reverse(BroadcastListView.urlname, args=[self.domain]))
),
]
self.helper.layout = crispy.Layout(*layout_fields)
@property
def crispy_recipient_fields(self):
return [
crispy.Field(
'recipient_type',
data_bind="value: recipient_type",
),
crispy.Div(
crispy.Field(
'case_group_id',
data_bind='value: case_group_id',
),
data_bind='visible: showCaseGroupSelect',
),
crispy.Div(
crispy.Field(
'user_group_id',
data_bind='value: user_group_id',
),
data_bind='visible: showUserGroupSelect',
),
crispy.Div(
crispy.Field(
'location_ids',
),
twbscrispy.PrependedText(
'include_child_locations', ''
),
data_bind='visible: showLocationSelect',
),
]
@property
def crispy_timing_fields(self):
return [
crispy.Field(
'timing',
data_bind='value: timing',
),
crispy.Div(
B3MultiField(
_("Date and Time"),
crispy.Div(
InlineField(
'date',
data_bind='value: date'
),
css_class='col-sm-6'
),
crispy.Div(
template='reminders/partial/time_picker.html'
),
),
ErrorsOnlyField('time'),
ErrorsOnlyField('datetime'),
data_bind='visible: showDateAndTimeSelect',
),
]
@property
def crispy_content_fields(self):
return [
crispy.Field(
'content_type',
data_bind='value: content_type',
),
crispy.Div(
crispy.Field(
'subject',
data_bind='value: subject',
style='height: 50px;',
),
data_bind='visible: showSubject',
),
crispy.Div(
crispy.Field(
'message',
data_bind='value: message',
style='height: 50px;',
),
data_bind='visible: showMessage',
),
crispy.Div(
crispy.Field(
'form_unique_id',
data_bind='value: form_unique_id',
),
data_bind='visible: showSurveySelect',
),
]
@property
def project_timezone(self):
return get_timezone_for_user(None, self.domain)
def clean_date(self):
if self.cleaned_data.get('timing') == SEND_NOW:
return None
else:
value = self.cleaned_data.get('date')
return validate_date(value)
def clean_time(self):
if self.cleaned_data.get('timing') == SEND_NOW:
return None
else:
value = self.cleaned_data.get('time')
return validate_time(value)
def clean_datetime(self):
utcnow = datetime.utcnow()
if self.cleaned_data.get('timing') == SEND_NOW:
value = utcnow + timedelta(minutes=1)
else:
dt = self.cleaned_data.get('date')
tm = self.cleaned_data.get('time')
if not isinstance(dt, date) or not isinstance(tm, time):
# validation didn't pass on the date or time fields
return None
value = datetime.combine(dt, tm)
value = UserTime(value, self.project_timezone).server_time().done().replace(tzinfo=None)
if value < utcnow:
raise ValidationError(_('Date and time cannot occur in the past.'))
return value
def clean_case_group_id(self):
if self.cleaned_data.get('recipient_type') == RECIPIENT_SURVEY_SAMPLE:
value = self.cleaned_data.get('case_group_id')
return clean_case_group_id(value, self.domain)
else:
return None
def clean_user_group_id(self):
if self.cleaned_data.get('recipient_type') == RECIPIENT_USER_GROUP:
value = self.cleaned_data.get('user_group_id')
return clean_group_id(value, self.domain)
else:
return None
def clean_subject(self):
value = None
if self.cleaned_data.get('content_type') == METHOD_EMAIL:
value = self.cleaned_data.get('subject')
if not value:
raise ValidationError('This field is required.')
return value
def clean_message(self):
value = None
if self.cleaned_data.get('content_type') in (METHOD_SMS, METHOD_EMAIL):
value = self.cleaned_data.get('message')
if not value:
raise ValidationError('This field is required.')
return value
def clean_form_unique_id(self):
if self.cleaned_data.get('content_type') == METHOD_SMS_SURVEY:
value = self.cleaned_data.get('form_unique_id')
return validate_form_unique_id(value, self.domain)
else:
return None
def clean_location_ids(self):
if self.cleaned_data.get('recipient_type') != RECIPIENT_LOCATION:
return []
value = self.cleaned_data.get('location_ids')
if not isinstance(value, basestring) or value.strip() == '':
raise ValidationError(_('Please choose at least one location'))
location_ids = [location_id.strip() for location_id in value.split(',')]
try:
locations = get_locations_from_ids(location_ids, self.domain)
except SQLLocation.DoesNotExist:
raise ValidationError(_('One or more of the locations was not found.'))
return [location.location_id for location in locations]
@property
def current_values(self):
values = {
'is_trial_project': domain_is_on_trial(self.domain),
'email_trial_message': EMAIL_TRIAL_MESSAGE % {'limit': TRIAL_MAX_EMAILS},
}
for field_name in self.fields.keys():
values[field_name] = self[field_name].value()
return values
|
{
"content_hash": "ea409045f769e8ebb2345ceefb48b352",
"timestamp": "",
"source": "github",
"line_count": 2668,
"max_line_length": 140,
"avg_line_length": 37.643178410794604,
"alnum_prop": 0.5201131113589295,
"repo_name": "qedsoftware/commcare-hq",
"id": "389285a4f3c106ced532199ca0627c20d92b1d02",
"size": "100432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/reminders/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
def get_peak_range(pre_line):
line = pre_line.strip()
colon = line.find(':')
hyphen = line.find('-')
chro = line[:colon]
start = int(line[(colon+1):hyphen])
end = int(line[(hyphen+1):])
return (chro,start,end)
import sys
import math
print 'Expanding peaks!'
if len(sys.argv)<3:
sys.exit('Requires list of peaks and window size!')
peakfile = sys.argv[1]
window_size = float(sys.argv[2])
win_edge = int(math.ceil(window_size/2))
outfile = open(peakfile+'.exp','w')
for line in open(peakfile,'r'):
[chro,start,end] = get_peak_range(line)
for i in range(start-win_edge,end+win_edge):
outline=chro+"\t"+str(i)+"\t"+str(i+1)+"\n"
outfile.write(outline)
|
{
"content_hash": "86f305065f6a6d7cf4169e0c6a053d52",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 55,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.6257062146892656,
"repo_name": "corcra/tf2",
"id": "c4e127ae01c2b52db8290bd52ad46ed90811db2e",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "preprocess/expand_peak.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3771"
},
{
"name": "R",
"bytes": "23536"
},
{
"name": "Shell",
"bytes": "2627"
}
],
"symlink_target": ""
}
|
"""Support for the MaryTTS service."""
import asyncio
import logging
import re
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SUPPORT_LANGUAGES = [
'de', 'en-GB', 'en-US', 'fr', 'it', 'lb', 'ru', 'sv', 'te', 'tr'
]
SUPPORT_CODEC = [
'aiff', 'au', 'wav'
]
CONF_VOICE = 'voice'
CONF_CODEC = 'codec'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 59125
DEFAULT_LANG = 'en-US'
DEFAULT_VOICE = 'cmu-slt-hsmm'
DEFAULT_CODEC = 'wav'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES),
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): cv.string,
vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODEC)
})
async def async_get_engine(hass, config):
"""Set up MaryTTS speech component."""
return MaryTTSProvider(hass, config)
class MaryTTSProvider(Provider):
"""MaryTTS speech api provider."""
def __init__(self, hass, conf):
"""Init MaryTTS TTS service."""
self.hass = hass
self._host = conf.get(CONF_HOST)
self._port = conf.get(CONF_PORT)
self._codec = conf.get(CONF_CODEC)
self._voice = conf.get(CONF_VOICE)
self._language = conf.get(CONF_LANG)
self.name = 'MaryTTS'
@property
def default_language(self):
"""Return the default language."""
return self._language
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
async def async_get_tts_audio(self, message, language, options=None):
"""Load TTS from MaryTTS."""
websession = async_get_clientsession(self.hass)
actual_language = re.sub('-', '_', language)
try:
with async_timeout.timeout(10, loop=self.hass.loop):
url = 'http://{}:{}/process?'.format(self._host, self._port)
audio = self._codec.upper()
if audio == 'WAV':
audio = 'WAVE'
url_param = {
'INPUT_TEXT': message,
'INPUT_TYPE': 'TEXT',
'AUDIO': audio,
'VOICE': self._voice,
'OUTPUT_TYPE': 'AUDIO',
'LOCALE': actual_language
}
request = await websession.get(url, params=url_param)
if request.status != 200:
_LOGGER.error("Error %d on load url %s",
request.status, request.url)
return (None, None)
data = await request.read()
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout for MaryTTS API")
return (None, None)
return (self._codec, data)
|
{
"content_hash": "dd8b766e0a8b98f31c01b0740cb1a2d4",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 77,
"avg_line_length": 30.471698113207548,
"alnum_prop": 0.591640866873065,
"repo_name": "jnewland/home-assistant",
"id": "294383cb4dd99e94afdd97ec3192cb7cf325b19f",
"size": "3230",
"binary": false,
"copies": "5",
"ref": "refs/heads/ci",
"path": "homeassistant/components/marytts/tts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15240512"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17862"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from models import FriendshipRequest, Friendship, UserBlocks
class FriendshipRequestAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
list_display = ('from_user', 'to_user', 'accepted', 'created')
list_filter = ('accepted',)
actions = ('accept_friendship', 'decline_friendship', 'cancel_friendship')
def accept_friendship(self, request, queryset):
for friendship_request in queryset:
friendship_request.accept()
accept_friendship.short_description = _(u'Accept selected friendship ' \
u'requests')
def decline_friendship(self, request, queryset):
for friendship_request in queryset:
friendship_request.decline()
decline_friendship.short_description = _(u'Decline selected friendship ' \
u'requests')
def cancel_friendship(self, request, queryset):
for friendship_request in queryset:
friendship_request.cancel()
cancel_friendship.short_description = _(u'Cancel selected friendship ' \
u'requests')
admin.site.register(FriendshipRequest, FriendshipRequestAdmin)
class FriendshipAdmin(admin.ModelAdmin):
list_display = ('user', 'friend_count', 'friend_summary')
admin.site.register(Friendship, FriendshipAdmin)
class UserBlocksAdmin(admin.ModelAdmin):
list_display = ('user', 'block_count', 'block_summary')
admin.site.register(UserBlocks, UserBlocksAdmin)
|
{
"content_hash": "30a1d8f3cfa2ab12721d23fc0d3b7f35",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 40.92307692307692,
"alnum_prop": 0.6666666666666666,
"repo_name": "softak/webfaction_demo",
"id": "1e1bd791fe304e7309e942d0a725eb62c9625a24",
"size": "1596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/friends/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
}
|
import datetime
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import DateTime, Date, String, Integer, Float, Boolean
from intranet3.models import Base
class TimeEntry(Base):
__tablename__ = 'time_entry'
id = Column(Integer, primary_key=True, index=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False, index=True)
date = Column(Date, nullable=False, index=True)
time = Column(Float, nullable=False)
description = Column(String, nullable=False)
added_ts = Column(DateTime, nullable=False, default=datetime.datetime.now)
modified_ts = Column(DateTime, nullable=False, default=datetime.datetime.now)
timer_ts = Column(DateTime)
ticket_id = Column(Integer, nullable=True, index=True)
project_id = Column(Integer, ForeignKey('project.id'), nullable=False, index=True)
# TODO: task
deleted = Column(Boolean, nullable=False, default=False, index=True)
frozen = Column(Boolean, nullable=False, default=False, index=True)
def to_dict(self):
entry = {
'id': self.id,
'desc': self.description,
'added': self.added_ts.strftime("%d.%m.%Y"),
'modified': self.modified_ts.strftime("%d.%m.%Y"),
'ticket_id': self.ticket_id,
'time': self.time,
'project': None
}
if self.project:
entry.update({
'project': {
'client_name': self.project.client.name,
'project_name': self.project.name,
}
})
return entry
|
{
"content_hash": "2f5f24aa0d4de3045d0bb3e8d28cf601",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 86,
"avg_line_length": 32.72,
"alnum_prop": 0.6008557457212714,
"repo_name": "stxnext/intranet-open",
"id": "a9284cfe1b5c5c7ee27fe0fee24671d3200e941d",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/intranet3/intranet3/models/times.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "209248"
},
{
"name": "JavaScript",
"bytes": "67808"
},
{
"name": "Python",
"bytes": "535298"
},
{
"name": "SQL",
"bytes": "5168"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
import scipy.stats as st
from sklearn.decomposition import PCA
from Starfish.emulator._utils import (
get_w_hat,
get_phi_squared,
get_altered_prior_factors,
Gamma,
)
class TestEmulatorUtils:
@pytest.fixture
def grid_setup(self, mock_hdf5_interface):
fluxes = np.array(list(mock_hdf5_interface.fluxes))
# Normalize to an average of 1 to remove uninteresting correlation
fluxes /= fluxes.mean(1, keepdims=True)
# Center and whiten
flux_mean = fluxes.mean(0)
fluxes -= flux_mean
flux_std = fluxes.std(0)
fluxes /= flux_std
# Perform PCA using sklearn
default_pca_kwargs = dict(n_components=0.99, svd_solver="full")
pca = PCA(**default_pca_kwargs)
weights = pca.fit_transform(fluxes)
eigenspectra = pca.components_
yield eigenspectra, fluxes
def test_altered_lambda_xi(self, grid_setup):
a_p, b_p = get_altered_prior_factors(*grid_setup)
assert np.isfinite(a_p)
assert np.isfinite(b_p)
def test_w_hat(self, grid_setup):
eigs, fluxes = grid_setup
w_hat = get_w_hat(eigs, fluxes)
assert len(w_hat) == len(fluxes) * len(eigs)
assert np.all(np.isfinite(w_hat))
def test_phi_squared(self, grid_setup):
eigs, fluxes = grid_setup
M = len(fluxes)
m = len(eigs)
phi2 = get_phi_squared(eigs, M)
assert phi2.shape == (M * m, M * m)
assert np.all(np.isfinite(phi2))
@pytest.mark.parametrize("params", [(1, 0.001), (2, 0.075)])
def test_gamma_dist(self, params):
a, b = params
mine = Gamma(a, b)
theirs = st.gamma(a, scale=1 / b)
x = np.linspace(1e-6, 1e4)
assert np.allclose(mine.logpdf(x), theirs.logpdf(x))
|
{
"content_hash": "76362a90c73eeb83ba28ff8602e33900",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 74,
"avg_line_length": 31.620689655172413,
"alnum_prop": 0.6063249727371864,
"repo_name": "iancze/Starfish",
"id": "137c5cac7b2c73ffdaac8e016c85ea648122b40b",
"size": "1834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_emulator/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "201674"
},
{
"name": "TeX",
"bytes": "1842"
}
],
"symlink_target": ""
}
|
import logging
import warnings
import inspect
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm import joinedload, aliased
from sqlalchemy.sql.expression import desc, ColumnElement
from sqlalchemy import Boolean, Table, func, or_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.expression import cast
from sqlalchemy import Unicode
from flask import flash
from flask_admin._compat import string_types, text_type
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.model.form import create_editable_list_form
from flask_admin.actions import action
from flask_admin._backwards import ObsoleteAttr
from flask_admin.contrib.sqla import form, filters as sqla_filters, tools
from .typefmt import DEFAULT_FORMATTERS
from .ajax import create_ajax_loader
# Set up logger
log = logging.getLogger("flask-admin.sqla")
class ModelView(BaseModelView):
"""
SQLAlchemy model view
Usage sample::
admin = Admin()
admin.add_view(ModelView(User, db.session))
"""
column_auto_select_related = ObsoleteAttr('column_auto_select_related',
'auto_select_related',
True)
"""
Enable automatic detection of displayed foreign keys in this view
and perform automatic joined loading for related models to improve
query performance.
Please note that detection is not recursive: if `__unicode__` method
of related model uses another model to generate string representation, it
will still make separate database call.
"""
column_select_related_list = ObsoleteAttr('column_select_related',
'list_select_related',
None)
"""
List of parameters for SQLAlchemy `subqueryload`. Overrides `column_auto_select_related`
property.
For example::
class PostAdmin(ModelView):
column_select_related_list = ('user', 'city')
You can also use properties::
class PostAdmin(ModelView):
column_select_related_list = (Post.user, Post.city)
Please refer to the `subqueryload` on list of possible values.
"""
column_display_all_relations = ObsoleteAttr('column_display_all_relations',
'list_display_all_relations',
False)
"""
Controls if list view should display all relations, not only many-to-one.
"""
column_searchable_list = ObsoleteAttr('column_searchable_list',
'searchable_columns',
None)
"""
Collection of the searchable columns.
Example::
class MyModelView(ModelView):
column_searchable_list = ('name', 'email')
You can also pass columns::
class MyModelView(ModelView):
column_searchable_list = (User.name, User.email)
The following search rules apply:
- If you enter ``ZZZ`` in the UI search field, it will generate ``ILIKE '%ZZZ%'``
statement against searchable columns.
- If you enter multiple words, each word will be searched separately, but
only rows that contain all words will be displayed. For example, searching
for ``abc def`` will find all rows that contain ``abc`` and ``def`` in one or
more columns.
- If you prefix your search term with ``^``, it will find all rows
that start with ``^``. So, if you entered ``^ZZZ`` then ``ILIKE 'ZZZ%'`` will be used.
- If you prefix your search term with ``=``, it will perform an exact match.
For example, if you entered ``=ZZZ``, the statement ``ILIKE 'ZZZ'`` will be used.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask_admin.contrib.sqla.filters.BaseSQLAFilter` classes.
Filters will be grouped by name when displayed in the drop-down.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
from flask_admin.contrib.sqla.filters import BooleanEqualFilter
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(column=User.name, name='Name'),)
or::
from flask_admin.contrib.sqla.filters import BaseSQLAFilter
class FilterLastNameBrown(BaseSQLAFilter):
def apply(self, query, value, alias=None):
if value == '1':
return query.filter(self.column == "Brown")
else:
return query.filter(self.column != "Brown")
def operation(self):
return 'is Brown'
class MyModelView(BaseModelView):
column_filters = [
FilterLastNameBrown(
User.last_name, 'Last Name', options=(('1', 'Yes'), ('0', 'No'))
)
]
"""
model_form_converter = form.AdminModelConverter
"""
Model form conversion class. Use this to implement custom field conversion logic.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
inline_model_form_converter = form.InlineModelConverter
"""
Inline model conversion class. If you need some kind of post-processing for inline
forms, you can customize behavior by doing something like this::
class MyInlineModelConverter(InlineModelConverter):
def post_process(self, form_class, info):
form_class.value = wtf.StringField('value')
return form_class
class MyAdminView(ModelView):
inline_model_form_converter = MyInlineModelConverter
"""
filter_converter = sqla_filters.FilterConverter()
"""
Field to filter converter.
Override this attribute to use non-default converter.
"""
fast_mass_delete = False
"""
If set to `False` and user deletes more than one model using built in action,
all models will be read from the database and then deleted one by one
giving SQLAlchemy a chance to manually cleanup any dependencies (many-to-many
relationships, etc).
If set to `True`, will run a ``DELETE`` statement which is somewhat faster,
but may leave corrupted data if you forget to configure ``DELETE
CASCADE`` for your model.
"""
inline_models = None
"""
Inline related-model editing for models with parent-child relations.
Accepts enumerable with one of the following possible values:
1. Child model class::
class MyModelView(ModelView):
inline_models = (Post,)
2. Child model class and additional options::
class MyModelView(ModelView):
inline_models = [(Post, dict(form_columns=['title']))]
3. Django-like ``InlineFormAdmin`` class instance::
class MyInlineModelForm(InlineFormAdmin):
form_columns = ('title', 'date')
class MyModelView(ModelView):
inline_models = (MyInlineModelForm(MyInlineModel),)
You can customize the generated field name by:
1. Using the `form_name` property as a key to the options dictionary::
class MyModelView(ModelView):
inline_models = ((Post, dict(form_label='Hello')))
2. Using forward relation name and `column_labels` property::
class Model1(Base):
pass
class Model2(Base):
# ...
model1 = relation(Model1, backref='models')
class MyModel1View(Base):
inline_models = (Model2,)
column_labels = {'models': 'Hello'}
"""
column_type_formatters = DEFAULT_FORMATTERS
form_choices = None
"""
Map choices to form fields
Example::
class MyModelView(BaseModelView):
form_choices = {'my_form_field': [
('db_value', 'display_value'),
]}
"""
form_optional_types = (Boolean,)
"""
List of field types that should be optional if column is not nullable.
Example::
class MyModelView(BaseModelView):
form_optional_types = (Boolean, Unicode)
"""
def __init__(self, model, session,
name=None, category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor.
:param model:
Model class
:param session:
SQLAlchemy session
:param name:
View name. If not set, defaults to the model name
:param category:
Category name
:param endpoint:
Endpoint name. If not set, defaults to the model name
:param url:
Base URL. If not set, defaults to '/admin/' + endpoint
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self.session = session
self._search_fields = None
self._filter_joins = dict()
self._sortable_joins = dict()
if self.form_choices is None:
self.form_choices = {}
super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
# Primary key
self._primary_key = self.scaffold_pk()
if self._primary_key is None:
raise Exception('Model %s does not have primary key.' % self.model.__name__)
# Configuration
if not self.column_select_related_list:
self._auto_joins = self.scaffold_auto_joins()
else:
self._auto_joins = self.column_select_related_list
# Internal API
def _get_model_iterator(self, model=None):
"""
Return property iterator for the model
"""
if model is None:
model = self.model
return model._sa_class_manager.mapper.iterate_properties
def _apply_path_joins(self, query, joins, path, inner_join=True):
"""
Apply join path to the query.
:param query:
Query to add joins to
:param joins:
List of current joins. Used to avoid joining on same relationship more than once
:param path:
Path to be joined
:param fn:
Join function
"""
last = None
if path:
for item in path:
key = (inner_join, item)
alias = joins.get(key)
if key not in joins:
if not isinstance(item, Table):
alias = aliased(item.property.mapper.class_)
fn = query.join if inner_join else query.outerjoin
if last is None:
query = fn(item) if alias is None else fn(alias, item)
else:
prop = getattr(last, item.key)
query = fn(prop) if alias is None else fn(alias, prop)
joins[key] = alias
last = alias
return query, joins, last
# Scaffolding
def scaffold_pk(self):
"""
Return the primary key name(s) from a model
If model has single primary key, will return a string and tuple otherwise
"""
return tools.get_primary_key(self.model)
def get_pk_value(self, model):
"""
Return the primary key value from a model object.
If there are multiple primary keys, they're encoded into string representation.
"""
if isinstance(self._primary_key, tuple):
return tools.iterencode(getattr(model, attr) for attr in self._primary_key)
else:
return tools.escape(getattr(model, self._primary_key))
def scaffold_list_columns(self):
"""
Return a list of columns from the model.
"""
columns = []
for p in self._get_model_iterator():
if hasattr(p, 'direction'):
if self.column_display_all_relations or p.direction.name == 'MANYTOONE':
columns.append(p.key)
elif hasattr(p, 'columns'):
if len(p.columns) > 1:
filtered = tools.filter_foreign_columns(self.model.__table__, p.columns)
if len(filtered) > 1:
warnings.warn('Can not convert multiple-column properties (%s.%s)' % (self.model, p.key))
continue
column = filtered[0]
else:
column = p.columns[0]
if column.foreign_keys:
continue
if not self.column_display_pk and column.primary_key:
continue
columns.append(p.key)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns.
Key is column name, value is sort column/field.
"""
columns = dict()
for p in self._get_model_iterator():
if hasattr(p, 'columns'):
# Sanity check
if len(p.columns) > 1:
# Multi-column properties are not supported
continue
column = p.columns[0]
# Can't sort on primary or foreign keys by default
if column.foreign_keys:
continue
if not self.column_display_pk and column.primary_key:
continue
columns[p.key] = column
return columns
def get_sortable_columns(self):
"""
Returns a dictionary of the sortable columns. Key is a model
field name and value is sort column (for example - attribute).
If `column_sortable_list` is set, will use it. Otherwise, will call
`scaffold_sortable_columns` to get them from the model.
"""
self._sortable_joins = dict()
if self.column_sortable_list is None:
return self.scaffold_sortable_columns()
else:
result = dict()
for c in self.column_sortable_list:
if isinstance(c, tuple):
column, path = tools.get_field_with_path(self.model, c[1])
column_name = c[0]
else:
column, path = tools.get_field_with_path(self.model, c)
column_name = text_type(c)
if path and hasattr(path[0], 'property'):
self._sortable_joins[column_name] = path
elif path:
raise Exception("For sorting columns in a related table, "
"column_sortable_list requires a string "
"like '<relation name>.<column name>'. "
"Failed on: {0}".format(c))
else:
# column is in same table, use only model attribute name
if getattr(column, 'key', None) is not None:
column_name = column.key
else:
column_name = text_type(c)
# column_name must match column_name used in `get_list_columns`
result[column_name] = column
return result
def get_list_columns(self):
"""
Returns a list of tuples with the model field name and formatted
field name. If `column_list` was set, returns it. Otherwise calls
`scaffold_list_columns` to generate the list from the model.
"""
if self.column_list is None:
columns = self.scaffold_list_columns()
# Filter excluded columns
if self.column_exclude_list:
columns = [c for c in columns
if c not in self.column_exclude_list]
return [(c, self.get_column_name(c)) for c in columns]
else:
columns = []
for c in self.column_list:
column, path = tools.get_field_with_path(self.model, c)
if path:
# column is in another table, use full path
column_name = text_type(c)
else:
# column is in same table, use only model attribute name
if getattr(column, 'key', None) is not None:
column_name = column.key
else:
column_name = text_type(c)
visible_name = self.get_column_name(column_name)
# column_name must match column_name in `get_sortable_columns`
columns.append((column_name, visible_name))
return columns
def init_search(self):
"""
Initialize search. Returns `True` if search is supported for this
view.
For SQLAlchemy, this will initialize internal fields: list of
column objects used for filtering, etc.
"""
if self.column_searchable_list:
self._search_fields = []
for p in self.column_searchable_list:
attr, joins = tools.get_field_with_path(self.model, p)
if not attr:
raise Exception('Failed to find field for search field: %s' % p)
for column in tools.get_columns_for_field(attr):
self._search_fields.append((column, joins))
return bool(self.column_searchable_list)
def scaffold_filters(self, name):
"""
Return list of enabled filters
"""
attr, joins = tools.get_field_with_path(self.model, name)
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Figure out filters for related column
if hasattr(attr, 'property') and hasattr(attr.property, 'direction'):
filters = []
for p in self._get_model_iterator(attr.property.mapper.class_):
if hasattr(p, 'columns'):
# TODO: Check for multiple columns
column = p.columns[0]
if column.foreign_keys or column.primary_key:
continue
visible_name = '%s / %s' % (self.get_column_name(attr.prop.table.name),
self.get_column_name(p.key))
type_name = type(column.type).__name__
flt = self.filter_converter.convert(type_name,
column,
visible_name)
if flt:
table = column.table
if joins:
self._filter_joins[column] = joins
elif tools.need_join(self.model, table):
self._filter_joins[column] = [table]
filters.extend(flt)
return filters
else:
is_hybrid_property = isinstance(attr, ColumnElement)
if is_hybrid_property:
column = attr
else:
columns = tools.get_columns_for_field(attr)
if len(columns) > 1:
raise Exception('Can not filter more than on one column for %s' % name)
column = columns[0]
# Join not needed for hybrid properties
if (not is_hybrid_property and tools.need_join(self.model, column.table) and
name not in self.column_labels):
visible_name = '%s / %s' % (
self.get_column_name(column.table.name),
self.get_column_name(column.name)
)
else:
if not isinstance(name, string_types):
visible_name = self.get_column_name(name.property.key)
else:
visible_name = self.get_column_name(name)
type_name = type(column.type).__name__
flt = self.filter_converter.convert(
type_name,
column,
visible_name,
options=self.column_choices.get(name),
)
if joins:
self._filter_joins[column] = joins
elif not is_hybrid_property and tools.need_join(self.model, column.table):
self._filter_joins[column] = [column.table]
return flt
def handle_filter(self, filter):
if isinstance(filter, sqla_filters.BaseSQLAFilter):
column = filter.column
# hybrid_property joins are not supported yet
if (isinstance(column, InstrumentedAttribute) and
tools.need_join(self.model, column.table)):
self._filter_joins[column] = [column.table]
return filter
def scaffold_form(self):
"""
Create form from the model.
"""
converter = self.model_form_converter(self.session, self)
form_class = form.get_form(self.model, converter,
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
if self.inline_models:
form_class = self.scaffold_inline_form_models(form_class)
return form_class
def scaffold_list_form(self, widget=None, validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param widget:
WTForms widget class. Defaults to `XEditableWidget`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
"""
converter = self.model_form_converter(self.session, self)
form_class = form.get_form(self.model, converter,
base_class=self.form_base_class,
only=self.column_editable_list,
field_args=validators)
return create_editable_list_form(self.form_base_class, form_class,
widget)
def scaffold_inline_form_models(self, form_class):
"""
Contribute inline models to the form
:param form_class:
Form class
"""
inline_converter = self.inline_model_form_converter(self.session,
self,
self.model_form_converter)
for m in self.inline_models:
form_class = inline_converter.contribute(self.model, form_class, m)
return form_class
def scaffold_auto_joins(self):
"""
Return a list of joined tables by going through the
displayed columns.
"""
if not self.column_auto_select_related:
return []
relations = set()
for p in self._get_model_iterator():
if hasattr(p, 'direction'):
# Check if it is pointing to same model
if p.mapper.class_ == self.model:
continue
if p.direction.name in ['MANYTOONE', 'MANYTOMANY']:
relations.add(p.key)
joined = []
for prop, name in self._list_columns:
if prop in relations:
joined.append(getattr(self.model, prop))
return joined
# AJAX foreignkey support
def _create_ajax_loader(self, name, options):
return create_ajax_loader(self.model, self.session, name, name, options)
# Database-related API
def get_query(self):
"""
Return a query for the model type.
If you override this method, don't forget to override `get_count_query` as well.
This method can be used to set a "persistent filter" on an index_view.
Example::
class MyView(ModelView):
def get_query(self):
return super(MyView, self).get_query().filter(User.username == current_user.username)
"""
return self.session.query(self.model)
def get_count_query(self):
"""
Return a the count query for the model type
A ``query(self.model).count()`` approach produces an excessive
subquery, so ``query(func.count('*'))`` should be used instead.
See commit ``#45a2723`` for details.
"""
return self.session.query(func.count('*')).select_from(self.model)
def _order_by(self, query, joins, sort_joins, sort_field, sort_desc):
"""
Apply order_by to the query
:param query:
Query
:pram joins:
Current joins
:param sort_joins:
Sort joins (properties or tables)
:param sort_field:
Sort field
:param sort_desc:
Ascending or descending
"""
if sort_field is not None:
# Handle joins
query, joins, alias = self._apply_path_joins(query, joins, sort_joins, inner_join=False)
column = sort_field if alias is None else getattr(alias, sort_field.key)
if sort_desc:
query = query.order_by(desc(column))
else:
query = query.order_by(column)
return query, joins
def _get_default_order(self):
order = super(ModelView, self)._get_default_order()
if order is not None:
field, direction = order
attr, joins = tools.get_field_with_path(self.model, field)
return attr, joins, direction
return None
def _apply_sorting(self, query, joins, sort_column, sort_desc):
if sort_column is not None:
if sort_column in self._sortable_columns:
sort_field = self._sortable_columns[sort_column]
sort_joins = self._sortable_joins.get(sort_column)
query, joins = self._order_by(query, joins, sort_joins, sort_field, sort_desc)
else:
order = self._get_default_order()
if order:
sort_field, sort_joins, sort_desc = order
query, joins = self._order_by(query, joins, sort_joins, sort_field, sort_desc)
return query, joins
def _apply_search(self, query, count_query, joins, count_joins, search):
"""
Apply search to a query.
"""
terms = search.split(' ')
for term in terms:
if not term:
continue
stmt = tools.parse_like_term(term)
filter_stmt = []
count_filter_stmt = []
for field, path in self._search_fields:
query, joins, alias = self._apply_path_joins(query, joins, path, inner_join=False)
count_alias = None
if count_query is not None:
count_query, count_joins, count_alias = self._apply_path_joins(count_query,
count_joins,
path,
inner_join=False)
column = field if alias is None else getattr(alias, field.key)
filter_stmt.append(cast(column, Unicode).ilike(stmt))
if count_filter_stmt is not None:
column = field if count_alias is None else getattr(count_alias, field.key)
count_filter_stmt.append(cast(column, Unicode).ilike(stmt))
query = query.filter(or_(*filter_stmt))
if count_query is not None:
count_query = count_query.filter(or_(*count_filter_stmt))
return query, count_query, joins, count_joins
def _apply_filters(self, query, count_query, joins, count_joins, filters):
for idx, flt_name, value in filters:
flt = self._filters[idx]
alias = None
count_alias = None
# Figure out joins
if isinstance(flt, sqla_filters.BaseSQLAFilter):
path = self._filter_joins.get(flt.column, [])
query, joins, alias = self._apply_path_joins(query, joins, path, inner_join=False)
if count_query is not None:
count_query, count_joins, count_alias = self._apply_path_joins(
count_query,
count_joins,
path,
inner_join=False)
# Clean value .clean() and apply the filter
clean_value = flt.clean(value)
try:
query = flt.apply(query, clean_value, alias)
except TypeError:
spec = inspect.getargspec(flt.apply)
if len(spec.args) == 3:
warnings.warn('Please update your custom filter %s to include additional `alias` parameter.' % repr(flt))
else:
raise
query = flt.apply(query, clean_value)
if count_query is not None:
try:
count_query = flt.apply(count_query, clean_value, count_alias)
except TypeError:
count_query = flt.apply(count_query, clean_value)
return query, count_query, joins, count_joins
def _apply_pagination(self, query, page, page_size):
if page_size is None:
page_size = self.page_size
if page_size:
query = query.limit(page_size)
if page and page_size:
query = query.offset(page * page_size)
return query
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True, page_size=None):
"""
Return records from the database.
:param page:
Page number
:param sort_column:
Sort column name
:param sort_desc:
Descending or ascending sort
:param search:
Search query
:param execute:
Execute query immediately? Default is `True`
:param filters:
List of filter tuples
:param page_size:
Number of results. Defaults to ModelView's page_size. Can be
overriden to change the page_size limit. Removing the page_size
limit requires setting page_size to 0 or False.
"""
# Will contain join paths with optional aliased object
joins = {}
count_joins = {}
query = self.get_query()
count_query = self.get_count_query() if not self.simple_list_pager else None
# Ignore eager-loaded relations (prevent unnecessary joins)
# TODO: Separate join detection for query and count query?
if hasattr(query, '_join_entities'):
for entity in query._join_entities:
for table in entity.tables:
joins[table] = None
# Apply search criteria
if self._search_supported and search:
query, count_query, joins, count_joins = self._apply_search(query,
count_query,
joins,
count_joins,
search)
# Apply filters
if filters and self._filters:
query, count_query, joins, count_joins = self._apply_filters(query,
count_query,
joins,
count_joins,
filters)
# Calculate number of rows if necessary
count = count_query.scalar() if count_query else None
# Auto join
for j in self._auto_joins:
query = query.options(joinedload(j))
# Sorting
query, joins = self._apply_sorting(query, joins, sort_column, sort_desc)
# Pagination
query = self._apply_pagination(query, page, page_size)
# Execute if needed
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model by its id.
:param id:
Model id
"""
return self.session.query(self.model).get(tools.iterdecode(id))
# Error handler
def handle_view_exception(self, exc):
if isinstance(exc, IntegrityError):
flash(gettext('Integrity error. %(message)s', message=text_type(exc)), 'error')
return True
return super(ModelView, self).handle_view_exception(exc)
# Model handlers
def create_model(self, form):
"""
Create model from form.
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self.session.add(model)
self._on_model_change(form, model, True)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to create record.')
self.session.rollback()
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model from form.
:param form:
Form instance
:param model:
Model instance
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to update record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to update record.')
self.session.rollback()
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model.
:param model:
Model to delete
"""
try:
self.on_model_delete(model)
self.session.flush()
self.session.delete(model)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to delete record.')
self.session.rollback()
return False
else:
self.after_model_delete(model)
return True
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
query = tools.get_query_for_ids(self.get_query(), self.model, ids)
if self.fast_mass_delete:
count = query.delete(synchronize_session=False)
else:
count = 0
for m in query.all():
if self.delete_model(m):
count += 1
self.session.commit()
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count))
except Exception as ex:
if not self.handle_view_exception(ex):
raise
flash(gettext('Failed to delete records. %(error)s', error=str(ex)), 'error')
|
{
"content_hash": "68a099071f3730bcb4d32766e82597de",
"timestamp": "",
"source": "github",
"line_count": 1115,
"max_line_length": 125,
"avg_line_length": 34.7372197309417,
"alnum_prop": 0.5247857069090158,
"repo_name": "jschneier/flask-admin",
"id": "323ca294fb8b4b363d0ffeea68efc368ac52e4ab",
"size": "38732",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flask_admin/contrib/sqla/view.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "120"
},
{
"name": "HTML",
"bytes": "94849"
},
{
"name": "JavaScript",
"bytes": "30692"
},
{
"name": "Makefile",
"bytes": "5587"
},
{
"name": "Python",
"bytes": "595108"
},
{
"name": "Shell",
"bytes": "1316"
}
],
"symlink_target": ""
}
|
import defs
import thirdparty.gzipinputstream as gzis
import logging
import numbers
import os
import platform
import re
import socket
import ssl
import sys
import vector3
if sys.version_info >= (3, 0):
import urllib.parse
import urllib.request
import urllib.error
else:
import urllib2
import urllib
import urlparse
log = logging.getLogger("util")
USER_AGENT = '{}/{}'.format(defs.name, defs.version)
# Match a float such as "33", "-33", "-33.1"
_rgxstr_float = r'[-+]?\d+(?:\.\d+)?'
# Match a set of coords such as "[33, -45.6, 78.910]"
_rgxstr_coords = r'^\[\s*(?P<x>{0})\s*[,/]\s*(?P<y>{0})\s*[,/]\s*(?P<z>{0})\s*\](?:=(?P<name>.+))?$'.format(_rgxstr_float)
# Compile the regex for faster execution later
_regex_coords = re.compile(_rgxstr_coords)
def parse_coords(sysname):
rx_match = _regex_coords.match(sysname)
if rx_match is not None:
# If it matches, make a fake system and station at those coordinates
try:
cx = float(rx_match.group('x'))
cy = float(rx_match.group('y'))
cz = float(rx_match.group('z'))
name = rx_match.group('name') if rx_match.group('name') is not None else sysname
return (cx, cy, cz, name)
except Exception as ex:
log.debug("Failed to parse manual system: {}".format(ex))
return None
def open_url(url, allow_gzip = True):
response = None
headers = {'User-Agent': USER_AGENT}
if allow_gzip:
headers['Accept-Encoding'] = 'gzip'
if sys.version_info >= (3, 0):
# Specify our own user agent as Cloudflare doesn't seem to like the urllib one
request = urllib.request.Request(url, headers=headers)
try:
response = urllib.request.urlopen(request)
except urllib.error.HTTPError as err:
log.error("Error {0} opening {1}: {2}".format(err.code, url, err.reason))
return None
else:
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# If we're on OSX with OpenSSL 0.9.x, manually specify preferred ciphers so CloudFlare can negotiate successfully
if platform.system() == 'Darwin' and ssl.OPENSSL_VERSION_INFO[0] < 1:
sslctx.set_ciphers("ECCdraft:HIGH:!aNULL")
# Specify our own user agent as Cloudflare doesn't seem to like the urllib one
request = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(request, context=sslctx)
except urllib2.HTTPError as err:
log.error("Error {0} opening {1}: {2}".format(err.code, url, err.reason))
return None
if response.info().get('Content-Encoding') == 'gzip':
try:
return gzis.GzipInputStream(response)
except:
log.error("Error decompressing {0}".format(url))
return None
else:
return response
def read_stream_line(stream):
try:
if sys.version_info >= (3, 0):
return stream.readline().decode("utf-8")
else:
return stream.readline()
except socket.error as e:
if e.errno == socket.errno.ECONNRESET:
log.warning("Received ECONNRESET while reading line from socket-based stream")
return None
else:
raise
def read_stream(stream, limit = None):
try:
if sys.version_info >= (3, 0):
return stream.read(limit).decode("utf-8")
else:
if limit is None and not isinstance(stream, gzis.GzipInputStream):
limit = -1
return stream.read(limit)
except socket.error as e:
if e.errno == socket.errno.ECONNRESET:
log.warning("Received ECONNRESET while reading from socket-based stream")
return None
else:
raise
def read_from_url(url):
return read_stream(open_url(url))
def path_to_url(path):
if sys.version_info >= (3, 0):
return urllib.parse.urljoin('file:', urllib.request.pathname2url(os.path.abspath(path)))
else:
return urlparse.urljoin('file:', urllib.pathname2url(os.path.abspath(path)))
def is_interactive():
return hasattr(sys, 'ps1')
def is_str(s):
if sys.version_info >= (3, 0):
return isinstance(s, str)
else:
return isinstance(s, basestring)
def download_file(url, file):
if sys.version_info >= (3, 0):
urllib.request.urlretrieve(url, file)
else:
urllib2.urlretrieve(url, file)
def string_bool(s):
return s.lower() in ("yes", "true", "1")
def hex2str(s):
return ''.join(chr(int(s[i:i+2], 16)) for i in range(0, len(s), 2))
# 32-bit hashing algorithm found at http://papa.bretmulvey.com/post/124027987928/hash-functions
# Seemingly originally by Bob Jenkins <bob_jenkins-at-burtleburtle.net> in the 1990s
def jenkins32(key):
key += (key << 12)
key &= 0xFFFFFFFF
key ^= (key >> 22)
key += (key << 4)
key &= 0xFFFFFFFF
key ^= (key >> 9)
key += (key << 10)
key &= 0xFFFFFFFF
key ^= (key >> 2)
key += (key << 7)
key &= 0xFFFFFFFF
key ^= (key >> 12)
return key
# Grabs the value from the first N bits, then return a right-shifted remainder
def unpack_and_shift(value, bits):
return (value >> bits, value & (2**bits-1))
# Shifts existing data left by N bits and adds a new value into the "empty" space
def pack_and_shift(value, new_data, bits):
return (value << bits) + (new_data & (2**bits-1))
# Interleaves two values, starting at least significant bit
# e.g. (0b1111, 0b0000) --> (0b01010101)
def interleave(val1, val2, maxbits):
output = 0
for i in range(0, maxbits//2 + 1):
output |= ((val1 >> i) & 1) << (i*2)
for i in range(0, maxbits//2 + 1):
output |= ((val2 >> i) & 1) << (i*2 + 1)
return output & (2**maxbits - 1)
# Deinterleaves two values, starting at least significant bit
# e.g. (0b00110010) --> (0b0100, 0b0101)
def deinterleave(val, maxbits):
out1 = 0
out2 = 0
for i in range(0, maxbits, 2):
out1 |= ((val >> i) & 1) << (i//2)
for i in range(1, maxbits, 2):
out2 |= ((val >> i) & 1) << (i//2)
return (out1, out2)
def get_as_position(v):
if v is None:
return None
# If it's already a vector, all is OK
if isinstance(v, vector3.Vector3):
return v
if hasattr(v, "position"):
return v.position
if hasattr(v, "centre"):
return v.centre
if hasattr(v, "system"):
return get_as_position(v.system)
try:
if len(v) == 3 and all([isinstance(i, numbers.Number) for i in v]):
return vector3.Vector3(v[0], v[1], v[2])
except:
pass
return None
|
{
"content_hash": "66339b6015019e835f305ac332bfb926",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 122,
"avg_line_length": 29.38388625592417,
"alnum_prop": 0.6456451612903226,
"repo_name": "KayJohnston/jackies-map",
"id": "68da84140a7c464e2b3bcd7b06411a1437b83a91",
"size": "6200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2613268"
}
],
"symlink_target": ""
}
|
import itertools
import re
import requests
from tuneharvest.common import Link
from collections.abc import Callable, Iterable
from argparse import Namespace
TOPIC_RE = re.compile(r'.+/t/[^\./]+')
def register(subparsers):
from_discourse_parser = subparsers.add_parser('discourse', help='Read links from discourse')
from_discourse_parser.set_defaults(action=from_discourse)
from_discourse_parser.add_argument('url', help='URL of discourse topic')
def _topic_path(path: str, num: int =0):
return '{}/{}.json'.format(TOPIC_RE.match(path).group(0), num)
def _posts(path: str):
last_post = -1
posts_seen = 1
while posts_seen > 0:
posts_seen = 0
data = requests.get(_topic_path(path, last_post + 1)).json()
for post in data['post_stream']['posts']:
if post['post_number'] > last_post:
posts_seen += 1
last_post = post['post_number']
yield post
def _link_urls(path: str):
for post in _posts(path):
for link in post.get('link_counts', ()):
yield link['url']
def from_discourse(args: Namespace)-> Iterable:
for url in _link_urls(args.url):
yield Link(media=url)
|
{
"content_hash": "58a85ea3d9d5ae3aea7fd549de4a7a85",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 96,
"avg_line_length": 26.434782608695652,
"alnum_prop": 0.6274671052631579,
"repo_name": "kcsaff/tuneharvest",
"id": "d3077e6a56892c79e6548a32dd14646a39678187",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tuneharvest/sources/discourse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "173"
},
{
"name": "Python",
"bytes": "26144"
}
],
"symlink_target": ""
}
|
from sanic.response import text
from sanic.router import RouteExists
import pytest
@pytest.mark.parametrize(
"method,attr, expected",
[
("get", "text", "OK1 test"),
("post", "text", "OK2 test"),
("put", "text", "OK2 test"),
("delete", "status", 405),
],
)
def test_overload_dynamic_routes(app, method, attr, expected):
@app.route("/overload/<param>", methods=["GET"])
async def handler1(request, param):
return text("OK1 " + param)
@app.route("/overload/<param>", methods=["POST", "PUT"])
async def handler2(request, param):
return text("OK2 " + param)
request, response = getattr(app.test_client, method)("/overload/test")
assert getattr(response, attr) == expected
def test_overload_dynamic_routes_exist(app):
@app.route("/overload/<param>", methods=["GET"])
async def handler1(request, param):
return text("OK1 " + param)
@app.route("/overload/<param>", methods=["POST", "PUT"])
async def handler2(request, param):
return text("OK2 " + param)
# if this doesn't raise an error, than at least the below should happen:
# assert response.text == 'Duplicated'
with pytest.raises(RouteExists):
@app.route("/overload/<param>", methods=["PUT", "DELETE"])
async def handler3(request, param):
return text("Duplicated")
|
{
"content_hash": "3f725f796e1167090d31606b4c3547f9",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 32.02325581395349,
"alnum_prop": 0.6172839506172839,
"repo_name": "lixxu/sanic",
"id": "6a5c57c68cf28f2267377acc4354c0bdfbc4b187",
"size": "1377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dynamic_routes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "529"
},
{
"name": "Go",
"bytes": "482"
},
{
"name": "HTML",
"bytes": "1173"
},
{
"name": "Makefile",
"bytes": "1517"
},
{
"name": "Python",
"bytes": "482115"
},
{
"name": "Shell",
"bytes": "462"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Optional
from zerver.models import get_user_profile_by_id
from zerver.lib.rate_limiter import client, max_api_calls, max_api_window, \
RateLimitedUser
from django.core.management.base import BaseCommand, CommandParser
from django.conf import settings
import logging
import time
class Command(BaseCommand):
help = """Checks redis to make sure our rate limiting system hasn't grown a bug and left redis with a bunch of data
Usage: ./manage.py [--trim] check_redis"""
def add_arguments(self, parser):
# type: (CommandParser) -> None
parser.add_argument('-t', '--trim',
dest='trim',
default=False,
action='store_true',
help="Actually trim excess")
def _check_within_range(self, key, count_func, trim_func=None):
# type: (str, Callable[[], int], Optional[Callable[[str, int], None]]) -> None
user_id = int(key.split(':')[1])
try:
user = get_user_profile_by_id(user_id)
except Exception:
user = None
entity = RateLimitedUser(user)
max_calls = max_api_calls(entity)
age = int(client.ttl(key))
if age < 0:
logging.error("Found key with age of %s, will never expire: %s" % (age, key,))
count = count_func()
if count > max_calls:
logging.error("Redis health check found key with more elements \
than max_api_calls! (trying to trim) %s %s" % (key, count))
if trim_func is not None:
client.expire(key, max_api_window(entity))
trim_func(key, max_calls)
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if not settings.RATE_LIMITING:
print("This machine is not using redis or rate limiting, aborting")
exit(1)
# Find all keys, and make sure they're all within size constraints
wildcard_list = "ratelimit:*:*:list"
wildcard_zset = "ratelimit:*:*:zset"
trim_func = lambda key, max_calls: client.ltrim(key, 0, max_calls - 1) # type: Optional[Callable[[str, int], None]]
if not options['trim']:
trim_func = None
lists = client.keys(wildcard_list)
for list_name in lists:
self._check_within_range(list_name,
lambda: client.llen(list_name),
trim_func)
zsets = client.keys(wildcard_zset)
for zset in zsets:
now = time.time()
# We can warn on our zset being too large, but we don't know what
# elements to trim. We'd have to go through every list item and take
# the intersection. The best we can do is expire it
self._check_within_range(zset,
lambda: client.zcount(zset, 0, now),
lambda key, max_calls: None)
|
{
"content_hash": "70cd58ba40e470d01388df813220ab00",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 124,
"avg_line_length": 39.76315789473684,
"alnum_prop": 0.5625413633355394,
"repo_name": "amanharitsh123/zulip",
"id": "7b670c1b43ef922fe8689d7e65a584efc3269508",
"size": "3023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/management/commands/check_redis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432211"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "494378"
},
{
"name": "JavaScript",
"bytes": "2167185"
},
{
"name": "Nginx",
"bytes": "1485"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86921"
},
{
"name": "Python",
"bytes": "3792729"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "61752"
}
],
"symlink_target": ""
}
|
import os
import platform
import shlex
import subprocess
import sys
import time
from core.api.Status import Status
from core.exceptions import UnsupportedPlatformException
class Executor(object):
def __init__(
self,
cwd: str
):
self.process = None
self.cwd = cwd
def logged_call(
self,
cmd,
logfile: str = 'dune.log',
stderr: bool = False
):
if 'posix' not in sys.builtin_module_names:
raise UnsupportedPlatformException
log = open(
os.path.join(self.cwd, logfile),
'ab'
)
cmd = _convert_subprocess_cmd(cmd)
try:
if not stderr:
self.process = subprocess.Popen(
args=cmd,
cwd=self.cwd,
stdout=subprocess.PIPE,
bufsize=-1,
close_fds=True
)
else:
self.process = subprocess.Popen(
args=cmd,
cwd=self.cwd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
close_fds=True
)
except subprocess.CalledProcessError as e:
_perror(e)
while True:
line = self.process.stdout.readline()
if line == b'':
# No output in stdout
if not self.is_running():
# subprocess has completed execution
return # TODO: Implement status codes
else:
print("Running")
# subprocess is still running
# TODO: This can be optimized by sleeping for a while
pass
else:
# Found some text in stdout
# noinspection PyTypeChecker
log.write(line)
# Flush file object to ensure real-time logging
log.flush()
log.close()
def call(self, cmd):
if 'posix' not in sys.builtin_module_names:
raise UnsupportedPlatformException
cmd = _convert_subprocess_cmd(cmd)
try:
self.process = subprocess.Popen(
args=cmd,
cwd=self.cwd,
stdout=subprocess.PIPE
)
except subprocess.CalledProcessError as e:
_perror(e)
def probe_status(self):
# FIXME: Pointless line to satisfy processor clock
time.sleep(0.5)
status = self.process.poll()
if status is None:
return Status.RUNNING
elif status is 0:
return Status.SUCCEEDED
elif status < 0:
return Status.KILLED
else:
# FIXME: Investigate more into this case
return Status.FAILED
def is_running(self) -> bool:
# FIXME: Pointless line to satisfy processor clock
time.sleep(0.5)
return self.process.poll() is None
def has_completed(self) -> bool:
# FIXME: Pointless line to satisfy processor clock
time.sleep(0.5)
return self.process.poll() is 0
def was_terminated(self) -> bool:
# FIXME: Pointless line to satisfy processor clock
time.sleep(0.5)
return self.process.poll() < 0
def terminate(self):
self.process.terminate()
self.process.stdout.close()
def close(self):
self.process.stdout.close()
@property
def pid(self) -> int:
return self.process.pid
def _convert_subprocess_cmd(cmd):
if platform.system() == 'Windows':
raise UnsupportedPlatformException
else:
return shlex.split(cmd)
def _perror(e):
print("subprocess.CalledProcessError: Command '%s' returned non-zero exit status %s" % (
' '.join(e.cmd), str(e.returncode)))
# TODO: Implement cleanup()
# Communicate return code to the calling program if any
sys.exit(e.returncode)
def call(cmd, cwd):
cmd = _convert_subprocess_cmd(cmd)
try:
subprocess.check_call(
cmd,
cwd=cwd,
shell=False,
stdin=subprocess.PIPE,
stdout=None,
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
_perror(e)
def check_output(cmd, cwd):
cmd = _convert_subprocess_cmd(cmd)
try:
out = subprocess.check_output(
cmd,
cwd=cwd,
shell=False,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT
).decode('utf-8')
except subprocess.CalledProcessError as e:
_perror(e)
finally:
return out
def pidof(name):
cmd = _convert_subprocess_cmd('pidof ' + name)
return subprocess.check_output(
cmd
)
|
{
"content_hash": "6628de359d27d6568920d0c1ef30b901",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 92,
"avg_line_length": 25.957894736842107,
"alnum_prop": 0.5277777777777778,
"repo_name": "adyasha/dune",
"id": "7732f5c126565acdf87743cc5c78a102df37d551",
"size": "4932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/utils/Executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33"
},
{
"name": "HTML",
"bytes": "17358"
},
{
"name": "JavaScript",
"bytes": "8145"
},
{
"name": "Python",
"bytes": "26098"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import sys
import tempfile
import objdump_parser
import test_format
class RdfaTestRunner(test_format.TestRunner):
SECTION_NAME = 'dis'
def CommandLineOptions(self, parser):
parser.add_option('--objdump',
help='Path to objdump')
def GetSectionContent(self, options, sections):
arch = {32: '-Mi386', 64: '-Mx86-64'}[options.bits]
data = ''.join(test_format.ParseHex(sections['hex']))
tmp = tempfile.NamedTemporaryFile(mode='wb', delete=False)
try:
tmp.write(data)
tmp.close()
objdump_proc = subprocess.Popen(
[options.objdump,
'-mi386', arch, '--target=binary',
'--disassemble-all', '--disassemble-zeroes',
'--insn-width=15',
tmp.name],
stdout=subprocess.PIPE)
result = ''.join(objdump_parser.SkipHeader(objdump_proc.stdout))
return_code = objdump_proc.wait()
assert return_code == 0, 'error running objdump'
finally:
tmp.close()
os.remove(tmp.name)
return result
def main(argv):
RdfaTestRunner().Run(argv)
if __name__ == '__main__':
main(sys.argv[1:])
|
{
"content_hash": "c682a8dc9b0d241c645e5813bc11d1e9",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 70,
"avg_line_length": 22.862745098039216,
"alnum_prop": 0.6149228130360206,
"repo_name": "Lind-Project/native_client",
"id": "120e0477ecad56733232686ba6d02f696549a4d1",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/trusted/validator_ragel/check_dis_section.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "149910"
},
{
"name": "Batchfile",
"bytes": "10418"
},
{
"name": "C",
"bytes": "10425715"
},
{
"name": "C++",
"bytes": "7409986"
},
{
"name": "HTML",
"bytes": "183711"
},
{
"name": "JavaScript",
"bytes": "5925"
},
{
"name": "Logos",
"bytes": "647"
},
{
"name": "Makefile",
"bytes": "65439"
},
{
"name": "Objective-C++",
"bytes": "2658"
},
{
"name": "Python",
"bytes": "2127774"
},
{
"name": "Ragel",
"bytes": "104506"
},
{
"name": "Shell",
"bytes": "454354"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from contentblocks.views import contentblock_view, contentblock_edit
from .views import home, events_preview, about_contact
urlpatterns = [
# Main site
url(r'^$', home, {'event_preview_days': 14}, name='home'),
# url(r'^events_preview/$', events_preview, {'event_preview_days': 14}, name='eventspreview'),
url(r'^about_contact/$', about_contact, name='about_contact'),
# URLConfs from apps
# url(r'^events/', include('events.urls')),
url(r'^member/', include('clubmembers.urls')),
# Django apps
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('regbackend.urls')),
# Third-party apps
url(r'^quiz/', include('quiz.urls')),
# Contentblock processor (must be last)
url(r'^(?P<page>\w+)/$', contentblock_view, name='contentblock_view'),
url(r'^(?P<page>\w+)/edit/$', contentblock_edit, name='contentblock_edit'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{
"content_hash": "319498ea32e172dcbadb47847b037eb6",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 96,
"avg_line_length": 35.294117647058826,
"alnum_prop": 0.7033333333333334,
"repo_name": "Alofoxx/club-websystem",
"id": "d5d70a8b33ee6cccf95467021e1727194a85599f",
"size": "1200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/system/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83216"
},
{
"name": "HTML",
"bytes": "44471"
},
{
"name": "JavaScript",
"bytes": "494142"
},
{
"name": "Python",
"bytes": "144496"
},
{
"name": "Shell",
"bytes": "4239"
}
],
"symlink_target": ""
}
|
from factory import Factory
class DjangoModelFactory(Factory):
ABSTRACT_FACTORY = True
@classmethod
def _create(cls, target_class, *args, **kwargs):
"""Create an instance of the model, and save it to the database."""
obj = target_class(*args, **kwargs)
obj.save()
return obj
|
{
"content_hash": "354c3e783e271fee550d6ef392ac4bfa",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 24.846153846153847,
"alnum_prop": 0.6346749226006192,
"repo_name": "RedBulli/MemberRegister",
"id": "5d99ed7076ea291d0656d845f002019565ead99f",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MemberRegister/tests/factories/django_model_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7288"
}
],
"symlink_target": ""
}
|
from .basicpage import BasicPage
from .templatesettings import TemplateSettings
|
{
"content_hash": "5ab8ca3171aa357e21c2e397c3de1b3d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 46,
"avg_line_length": 39.5,
"alnum_prop": 0.8860759493670886,
"repo_name": "tracon/dragontail",
"id": "60d9d8e681dd1cd26b67776b7bc47f2e8854bf3f",
"size": "79",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragontail/content/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1097"
},
{
"name": "HTML",
"bytes": "8906"
},
{
"name": "Python",
"bytes": "19848"
}
],
"symlink_target": ""
}
|
class Element(dict):
fields = {}
def __init__(self, fields=None, element=None, keys=None):
if fields is None: fields={}
self.element = element
dict.__init__(self, dict.fromkeys(self.fields))
if not keys:
keys = fields.keys()
for key in keys:
if key in fields:
self[key] = fields[key]
def __getattr__(self, name):
if hasattr(self.__dict__, name):
return getattr(self.__dict__, name)
elif hasattr(self.element, name):
return getattr(self.element, name)
else:
raise AttributeError, "class Element has no attribute %s" % name
|
{
"content_hash": "b4aed2de3ee937983ddf385952c646cd",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 30.863636363636363,
"alnum_prop": 0.5449189985272459,
"repo_name": "yippeecw/sfa",
"id": "36ad12f73b43870fea8d4ee2cd7342f5fd98b290",
"size": "679",
"binary": false,
"copies": "2",
"ref": "refs/heads/geni-v3",
"path": "sfa/rspecs/elements/element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "424"
},
{
"name": "Makefile",
"bytes": "14208"
},
{
"name": "Python",
"bytes": "1398912"
},
{
"name": "Shell",
"bytes": "19422"
},
{
"name": "XSLT",
"bytes": "15293"
}
],
"symlink_target": ""
}
|
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ARB_uniform_buffer_object'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_ARB_uniform_buffer_object',False)
_p.unpack_constants( """GL_UNIFORM_BUFFER 0x8A11
GL_UNIFORM_BUFFER_BINDING 0x8A28
GL_UNIFORM_BUFFER_START 0x8A29
GL_UNIFORM_BUFFER_SIZE 0x8A2A
GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B
GL_MAX_GEOMETRY_UNIFORM_BLOCKS 0x8A2C
GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D
GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E
GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F
GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30
GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31
GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS 0x8A32
GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33
GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34
GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35
GL_ACTIVE_UNIFORM_BLOCKS 0x8A36
GL_UNIFORM_TYPE 0x8A37
GL_UNIFORM_SIZE 0x8A38
GL_UNIFORM_NAME_LENGTH 0x8A39
GL_UNIFORM_BLOCK_INDEX 0x8A3A
GL_UNIFORM_OFFSET 0x8A3B
GL_UNIFORM_ARRAY_STRIDE 0x8A3C
GL_UNIFORM_MATRIX_STRIDE 0x8A3D
GL_UNIFORM_IS_ROW_MAJOR 0x8A3E
GL_UNIFORM_BLOCK_BINDING 0x8A3F
GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40
GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41
GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42
GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43
GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44
GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER 0x8A45
GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46
GL_INVALID_INDEX 0xFFFFFFFF""", globals())
glget.addGLGetConstant( GL_UNIFORM_BUFFER_BINDING, (1,) )
glget.addGLGetConstant( GL_MAX_VERTEX_UNIFORM_BLOCKS, (1,) )
glget.addGLGetConstant( GL_MAX_GEOMETRY_UNIFORM_BLOCKS, (1,) )
glget.addGLGetConstant( GL_MAX_FRAGMENT_UNIFORM_BLOCKS, (1,) )
glget.addGLGetConstant( GL_MAX_COMBINED_UNIFORM_BLOCKS, (1,) )
glget.addGLGetConstant( GL_MAX_UNIFORM_BUFFER_BINDINGS, (1,) )
glget.addGLGetConstant( GL_MAX_UNIFORM_BLOCK_SIZE, (1,) )
glget.addGLGetConstant( GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS, (1,) )
glget.addGLGetConstant( GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS, (1,) )
glget.addGLGetConstant( GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS, (1,) )
glget.addGLGetConstant( GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, (1,) )
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,ctypes.POINTER( ctypes.POINTER( _cs.GLchar )),arrays.GLuintArray)
def glGetUniformIndices( program,uniformCount,uniformNames,uniformIndices ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray,_cs.GLenum,arrays.GLintArray)
def glGetActiveUniformsiv( program,uniformCount,uniformIndices,pname,params ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetActiveUniformName( program,uniformIndex,bufSize,length,uniformName ):pass
@_f
@_p.types(_cs.GLuint,_cs.GLuint,arrays.GLcharArray)
def glGetUniformBlockIndex( program,uniformBlockName ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetActiveUniformBlockiv( program,uniformBlockIndex,pname,params ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetActiveUniformBlockName( program,uniformBlockIndex,bufSize,length,uniformBlockName ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glUniformBlockBinding( program,uniformBlockIndex,uniformBlockBinding ):pass
def glInitUniformBufferObjectARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
|
{
"content_hash": "b295d4fb868e2050afe391a1c15cb9be",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 103,
"avg_line_length": 46.35897435897436,
"alnum_prop": 0.8017146017699115,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "9e753da46b0ed98113f65ff34a1e67332bef259b",
"size": "3616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/OpenGL/raw/GL/ARB/uniform_buffer_object.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
}
|
from .common import *
from av.codec import Codec
from av.video.format import VideoFormat
class TestCodecs(TestCase):
def test_codec_mpeg4(self):
c = Codec('mpeg4')
self.assertEqual(c.name, 'mpeg4')
self.assertEqual(c.long_name, 'MPEG-4 part 2')
self.assertEqual(c.type, 'video')
self.assertEqual(c.id, 13)
self.assertTrue(c.is_encoder)
self.assertTrue(c.is_decoder)
formats = c.video_formats
self.assertTrue(formats)
self.assertIsInstance(formats[0], VideoFormat)
self.assertTrue(any(f.name == 'yuv420p' for f in formats))
|
{
"content_hash": "aa7d5e59bdaf4f778749add7d46babb5",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 29.523809523809526,
"alnum_prop": 0.646774193548387,
"repo_name": "danielballan/PyAV",
"id": "d9f73bcd0a34d29503f034d3199e6d5a2172f9d8",
"size": "620",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_codec.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5572"
},
{
"name": "Makefile",
"bytes": "1803"
},
{
"name": "Python",
"bytes": "266098"
},
{
"name": "Shell",
"bytes": "2263"
}
],
"symlink_target": ""
}
|
"""
Match API endpoints - list matches and get replays/error logs.
"""
import io
import flask
import sqlalchemy
import google.cloud.exceptions as gcloud_exceptions
import google.cloud.storage as gcloud_storage
from .. import model, util
from .blueprint import web_api
from . import util as api_util
def get_match_helper(match_id):
"""
Get a particular match by its ID.
:param match_id: The ID of the match.
:return: A dictionary with the game information.
"""
with model.engine.connect() as conn:
query = conn.execute(sqlalchemy.sql.select([
model.game_participants.c.user_id,
model.game_participants.c.bot_id,
model.game_participants.c.rank,
model.game_participants.c.version_number,
model.game_participants.c.player_index,
model.game_participants.c.timed_out,
model.game_participants.c.leaderboard_rank,
model.game_participants.c.mu,
model.game_participants.c.sigma,
]).where(
model.game_participants.c.game_id == match_id
))
match = conn.execute(sqlalchemy.sql.select([
model.games.c.replay_name,
model.games.c.replay_bucket,
model.games.c.map_width,
model.games.c.map_height,
model.games.c.time_played,
model.games.c.challenge_id,
]).where(
model.games.c.id == match_id
)).first()
if not match:
return None
result = {
"map_width": match["map_width"],
"map_height": match["map_height"],
"replay": match["replay_name"],
"replay_class": match["replay_bucket"],
"time_played": match["time_played"],
"challenge_id": match["challenge_id"],
"players": {}
}
for row in query.fetchall():
result["game_id"] = match_id
result["players"][row["user_id"]] = {
"bot_id": row["bot_id"],
"version_number": row["version_number"],
"player_index": row["player_index"],
"rank": row["rank"],
"timed_out": bool(row["timed_out"]),
"leaderboard_rank": row["leaderboard_rank"],
"mu": row["mu"],
"sigma": row["sigma"],
}
# Update game_view_stat table
conn.execute(model.game_view_stats.update().where(
model.game_view_stats.c.game_id == match_id
).values(
views_total=model.game_view_stats.c.views_total + 1,
))
return result
def list_matches_helper(offset, limit, participant_clause,
where_clause, order_clause):
"""
Generate a list of matches by certain criteria.
:param int offset: How
:param int limit: How many results to return.
:param participant_clause: An SQLAlchemy clause to filter the matches,
based on the participants in the match.
:param where_clause: An SQLAlchemy clause to filter the matches.
:param list order_clause: A list of SQLAlchemy conditions to sort the
results on.
:return: A list of game data dictionaries.
"""
result = []
with model.engine.connect() as conn:
query = sqlalchemy.sql.select([
model.games.c.id,
model.games.c.replay_name,
model.games.c.replay_bucket,
model.games.c.map_width,
model.games.c.map_height,
model.games.c.time_played,
model.games.c.challenge_id,
model.game_stats.c.turns_total,
model.game_stats.c.planets_destroyed,
model.game_stats.c.ships_produced,
model.game_stats.c.ships_destroyed,
]).select_from(model.games.outerjoin(
model.game_stats,
(model.games.c.id == model.game_stats.c.game_id)
)).where(
where_clause &
sqlalchemy.sql.exists(
model.game_participants.select(
participant_clause &
(model.game_participants.c.game_id == model.games.c.id)
)
)
).order_by(
*order_clause
).offset(offset).limit(limit).reduce_columns()
matches = conn.execute(query)
for match in matches.fetchall():
participants = conn.execute(
model.game_participants.join(
model.users,
model.game_participants.c.user_id == model.users.c.id
).select(
model.game_participants.c.game_id == match["id"]
)
)
match = {
"game_id": match["id"],
"map_width": match["map_width"],
"map_height": match["map_height"],
"replay": match["replay_name"],
"replay_class": match["replay_bucket"],
"time_played": match["time_played"],
"turns_total": match["turns_total"],
"planets_destroyed": match["planets_destroyed"],
"ships_produced": match["ships_produced"],
"ships_destroyed": match["ships_destroyed"],
"challenge_id": match["challenge_id"],
"players": {},
}
for participant in participants:
match["players"][participant["user_id"]] = {
"username": participant["username"],
"bot_id": participant["bot_id"],
"version_number": participant["version_number"],
"player_index": participant["player_index"],
"rank": participant["rank"],
"timed_out": bool(participant["timed_out"]),
"leaderboard_rank": participant["leaderboard_rank"],
"mu": participant["mu"],
"sigma": participant["sigma"],
}
result.append(match)
return result
@web_api.route("/match")
@util.cross_origin(methods=["GET"])
def list_matches():
offset, limit = api_util.get_offset_limit()
where_clause, order_clause, manual_sort = api_util.get_sort_filter({
"game_id": model.games.c.id,
"time_played": model.games.c.time_played,
"views_total": model.game_view_stats.c.views_total,
"turns_total": model.game_stats.c.turns_total,
"planets_destroyed": model.game_stats.c.planets_destroyed,
"ships_produced": model.game_stats.c.ships_produced,
"ships_destroyed": model.game_stats.c.ships_destroyed,
"challenge_id": model.games.c.challenge_id,
}, ["timed_out"])
participant_clause = sqlalchemy.true()
for (field, _, _) in manual_sort:
if field == "timed_out":
participant_clause &= model.game_participants.c.timed_out
result = list_matches_helper(
offset, limit, participant_clause, where_clause, order_clause)
return flask.jsonify(result)
@web_api.route("/match/<int:match_id>")
def get_match(match_id):
match = get_match_helper(match_id)
if not match:
raise util.APIError(404, message="Match not found.")
return flask.jsonify(match)
@web_api.route("/replay/class/<int:replay_bucket>/name/<replay_name>",
methods=["GET"])
@util.cross_origin(methods=["GET"])
def get_replay(replay_bucket, replay_name):
bucket = model.get_replay_bucket(replay_bucket)
blob = gcloud_storage.Blob(replay_name, bucket, chunk_size=262144)
buffer = io.BytesIO()
try:
blob.download_to_file(buffer)
except gcloud_exceptions.NotFound:
raise util.APIError(404, message="Replay not found.")
buffer.seek(0)
response = flask.make_response(flask.send_file(
buffer,
mimetype="application/x-halite-2-replay",
as_attachment=True,
attachment_filename="{}.{}.hlt".format(replay_name, replay_bucket)))
response.headers["Content-Length"] = str(buffer.getbuffer().nbytes)
return response
|
{
"content_hash": "f1efea02304090f13542797c21f16d50",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 76,
"avg_line_length": 35.327510917030565,
"alnum_prop": 0.5644004944375772,
"repo_name": "HaliteChallenge/Halite-II",
"id": "303c9f87e84834a1133b96f8db10b2e2094dc096",
"size": "8090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apiserver/apiserver/web/match.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "8111"
},
{
"name": "C",
"bytes": "1910003"
},
{
"name": "C#",
"bytes": "31000"
},
{
"name": "C++",
"bytes": "820400"
},
{
"name": "CMake",
"bytes": "2698"
},
{
"name": "CSS",
"bytes": "412005"
},
{
"name": "Clojure",
"bytes": "15989"
},
{
"name": "Common Lisp",
"bytes": "20600"
},
{
"name": "Dart",
"bytes": "14090"
},
{
"name": "Elixir",
"bytes": "22917"
},
{
"name": "F#",
"bytes": "17888"
},
{
"name": "Go",
"bytes": "14458"
},
{
"name": "HTML",
"bytes": "62449"
},
{
"name": "Haskell",
"bytes": "15459"
},
{
"name": "Java",
"bytes": "28548"
},
{
"name": "JavaScript",
"bytes": "6941451"
},
{
"name": "Julia",
"bytes": "17710"
},
{
"name": "Kotlin",
"bytes": "18922"
},
{
"name": "Makefile",
"bytes": "32949"
},
{
"name": "Mako",
"bytes": "532"
},
{
"name": "OCaml",
"bytes": "21982"
},
{
"name": "PHP",
"bytes": "27244"
},
{
"name": "Python",
"bytes": "491423"
},
{
"name": "Ruby",
"bytes": "156808"
},
{
"name": "Rust",
"bytes": "20054"
},
{
"name": "Scala",
"bytes": "23110"
},
{
"name": "Shell",
"bytes": "30859"
},
{
"name": "Swift",
"bytes": "27281"
},
{
"name": "Vue",
"bytes": "192540"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from healthcheck import __version__
setup(
name="tsuru-hcaas",
version=__version__,
description="Healthcheck as a service API for Tsuru PaaS",
author="Tsuru",
author_email="tsuru@corp.globo.com",
classifiers=[
"Programming Language :: Python :: 2.7",
],
packages=find_packages(exclude=["tests"]),
include_package_data=True,
install_requires=["Flask==1.0.2", "pyzabbix==0.7.4", "pymongo==3.4.0"],
)
|
{
"content_hash": "2ad4d198a4b283348e3a34b435db6b5a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 27.38888888888889,
"alnum_prop": 0.6490872210953347,
"repo_name": "tsuru/healthcheck-as-a-service",
"id": "7dadf7fb0c57407e80fbe81c8ee6df324765a3c9",
"size": "693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "97"
},
{
"name": "Makefile",
"bytes": "541"
},
{
"name": "Procfile",
"bytes": "79"
},
{
"name": "Python",
"bytes": "107061"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2010 ICRL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division, with_statement, unicode_literals
import os, sys
import logging
import threading
import time
import base64
import xep_0096
from .. xmlstream.matcher.xpath import MatchXPath
from .. xmlstream.matcher.xmlmask import MatchXMLMask
from .. xmlstream.matcher.id import MatcherId
from .. xmlstream.handler.callback import Callback
from sleekxmpp.xmlstream import register_stanza_plugin
from .. xmlstream.stanzabase import ElementBase, ET, JID
from .. stanza.iq import Iq
STREAM_CLOSED_EVENT = 'BYTE_STREAM_CLOSED'
def sendAckIQ(xmpp, to, id):
iq = xmpp.makeIqResult(id=id)
iq['to'] = to
iq.send()
def sendCloseStream(xmpp, to, sid):
close = ET.Element('{%s}close' %xep_0047.XMLNS, sid=sid)
iq = xmpp.makeIqSet()
iq['to'] = to
iq.setPayload(close)
iq.send()
class xep_0047(xep_0096.FileTransferProtocol):
'''
In-band file transfer for xmpp.
Currently only IQ transfer is supported
Plugin will not accept a file transfer if the sender or recipient JID is the
same as the currently logged in JID.
Plugin configuration options:
acceptTransfers - Boolean - Sets the plugin to either accept or deny transfers
saveDirectory - String - The default directory that incoming file transfers will be saved in
saveNamePrefix - String - Prefix that will be prepended to the saved file name of an incoming transfer
overwriteFile - Boolean - If an incoming file transfer should overwrite a file if that file already exists
stanzaType - String - Either IQ or message, Currently only iq is supported
maxSessions - integer - The max number of send/receive sessions that may run concurrently
transferTimeout - integer - How long should a stream session wait between messages
maxBlockSize - integer - Largest block size that a stream session should accept (limited by xmpp server)
prefBlockSize - integer - The preferred block size for file transfer
acceptTransferCallback - function ptr- This should be a function pointer that will return a boolean value letting the caller know if a
file transfer should or should not be accepted.
fileNameCallback - function ptr- This should be a function pointer that will return a string with the full path and name a file should be saved as.
If the provided function pointer returns None or is not provided the default saveDirectory + saveNamePrefix_sid will be used.
'''
XMLNS = 'http://jabber.org/protocol/ibb'
def plugin_init(self):
self.xep = '0047'
self.description = 'in-band file transfer'
self.acceptTransfers = self.config.get('acceptTransfers', True)
self.saveDirectory = self.config.get('saveDirectory', '/tmp/')
self.saveNamePrefix = self.config.get('saveNamePrefix', 'xep_0047_')
self.overwriteFile = self.config.get('overwriteFile', True)
self.stanzaType = self.config.get('stanzaType', 'iq') #Currently only IQ is supported
self.maxSessions = self.config.get('maxSessions', 2)
self.transferTimeout = self.config.get('transferTimeout', 120) #how long we should wait between data messages until we consider the stream invalid
self.maxBlockSize = self.config.get('maxBlockSize', 8192)
self.prefBlockSize = self.config.get('prefBlockSize', 4096)
#callbacks
self.acceptTransferCallback = self.config.get('acceptTransferCallback')
self.fileNameCallback = self.config.get('fileNameCallback')
#thread setup
self.streamSessions = {} #id:thread
self.__streamSetupLock = threading.Lock()
#Register the xmpp stanzas used in this plugin
register_stanza_plugin(Iq, Open)
register_stanza_plugin(Iq, Close)
register_stanza_plugin(Iq, Data)
#add handlers to listen for incoming requests
self.xmpp.registerHandler(Callback('xep_0047_open_stream', MatchXPath('{%s}iq/{%s}open' %(self.xmpp.default_ns, xep_0047.XMLNS)), self._handleIncomingTransferRequest, thread=True))
self.xmpp.registerHandler(Callback('xep_0047_close_stream', MatchXPath('{%s}iq/{%s}close' %(self.xmpp.default_ns, xep_0047.XMLNS)), self._handleStreamClosed, thread=False))
#self.xmpp.add_handler("<iq type='set'><open xmlns='http://jabber.org/protocol/ibb' /></iq>", self._handleIncomingTransferRequest, threaded=True)
#self.xmpp.add_handler("<iq type='set'><close xmlns='http://jabber.org/protocol/ibb' /></iq>", self._handleStreamClosed, threaded=False)
#Event handler to allow session threads to call back to the main processor to remove the thread
self.xmpp.add_event_handler(STREAM_CLOSED_EVENT, self._eventCloseStream, threaded=True, disposable=False)
def post_init(self):
xep_0096.FileTransferProtocol.post_init(self)
if self.xmpp.plugin.get('xep_0030'):
self.xmpp.plugin['xep_0030'].add_feature(xep_0047.XMLNS)
def sendFile(self, fileName, to, threaded=True, sid=None, **kwargs):
'''
Sends a file to the intended receiver if the receiver is available and
willing to accept the transfer. If the send is requested to be threaded
the session sid will be returned, otherwise the method will block until
the file has been sent and the session closed.
The returned sid can be used to check on the status of the transfer or
cancel the transfer.
Error Conditions:
-IOError will be raised if the file to be sent is not found
-TooManySessionsException will be raised if there are already more than
self.maxSessions running (configurable via plugin configuration)
-Exception will be raised if the sender is not available
-NotAcceptableException will be raised if the sender denies the transfer request
or if the sender full JID is equal to the recipient
-InBandFailedException will be raised if there is an error during the
file transfer
'''
#Init the stream with the recipient
logging.debug("About to send file: %s" %fileName)
with self.__streamSetupLock:
if len(self.streamSessions) > self.maxSessions:
raise TooManySessionsException()
if not os.path.isfile(fileName):
raise IOError('file: %s not found' %fileName)
if self.xmpp.fulljid == to:
raise NotAcceptableException('Error setting up the stream, can not send file to ourselves %s', self.xmpp.fulljid)
if not self.xmpp.state.ensure('connected'):
raise Exception('Not connected to a server!')
if sid is None:
sid = xep_0096.generateSid()
iq = self.xmpp.makeIqSet()
iq['to'] = to
openElem = ET.Element('{%s}open' %xep_0047.XMLNS, sid=sid, stanza=self.stanzaType)
openElem.set('block-size', str(self.prefBlockSize))
iq.setPayload(openElem)
result = iq.send(block=True, timeout=10)
if result.get('type') == 'error':
if result.find('*/{urn:ietf:params:xml:ns:xmpp-stanzas}service-unavailable') != None:
raise Exception('user not online! User: %s' %to)
elif result.find('*/{urn:ietf:params:xml:ns:xmpp-stanzas}not-acceptable') != None:
raise NotAcceptableException('Error setting up the stream, receiver not ready %s' %result)
else:
raise Exception('Unknown error! %s' %result)
self.streamSessions[sid] = ByteStreamSession(self.xmpp, sid, JID(to), self.transferTimeout, self.prefBlockSize, self)
self.streamSessions[sid].start()
self.streamSessions[sid].sendFile(fileName, threaded)
return sid
def getSessionStatus(self, sid):
'''
Returns the status of the transfer specified by the sid. If the session
is not found none will be returned.
'''
session = self.streamSessions.get(sid)
if session:
return session.getStatus()
else:
return None
def getSessionStatusAll(self):
dict = {}
for session in self.streamSessions.values():
dict[session.sid] = session.getStatus()
return dict
def cancelSend(self, sid):
'''
cancels an outgoing file transfer.
If the session is not found, method will pass
'''
session = self.streamSessions.get(sid)
if session:
session.cancelStream()
def setAcceptStatus(self, status):
'''
sets if xep_0047 plugin will accept in-band file transfers or not.
if switching from true to false any currently working sessions will
finish
'''
self.acceptTransfers = status
def _handleIncomingTransferRequest(self, iq):
logging.debug("incoming request to open file transfer stream")
with self.__streamSetupLock:
#Check the block size
if(self.maxBlockSize < int(iq['open']['block-size'])):
errIq = self.xmpp.makeIqError(id=iq['id'], condition='resource-constraint')
errIq['to'] = iq['from']
errIq['error']['type'] = 'modify'
errIq.send()
return
#Check to see if the file transfer should be accepted
acceptTransfer = False
if self.acceptTransferCallback:
acceptTransfer = self.acceptTransferCallback(sid=iq['open']['sid'])
else:
if self.acceptTransfers and len(self.streamSessions) < self.maxSessions:
acceptTransfer = True
#Ask where to save the file if the callback is present
#TODO: fix this to work with non linux
saveFileAs = self.saveDirectory + self.saveNamePrefix + iq['open']['sid']
if self.fileNameCallback:
saveFileAs = self.fileNameCallback(sid=iq['open']['sid'])
#Do not accept a transfer from ourselves
if self.xmpp.fulljid == iq['from']:
acceptTransfer = False
if acceptTransfer:
logging.debug('saving file as: %s' %saveFileAs)
self.streamSessions[iq['open']['sid']] = ByteStreamSession(self.xmpp, iq['open']['sid'], iq['from'], self.transferTimeout, int(iq['open']['block-size']), self, saveFileAs)
self.streamSessions[iq['open']['sid']].start()
sendAckIQ(xmpp=self.xmpp, to=iq['from'], id=iq['id'])
else: #let the requesting party know we are not accepting file transfers
errIq = self.xmpp.makeIqError(id=iq['id'], condition='not-acceptable')
errIq['to'] = iq['from']
errIq['error']['type'] = 'cancel'
errIq.send()
def _handleStreamClosed(self, iq):
'''
Another party wishes to close a stream
'''
sid = iq['close']['sid']
from_ = iq['from'].jid
if self.streamSessions.get(sid) and self.streamSessions.get(sid).otherPartyJid.jid == iq['from'].jid:
with self.__streamSetupLock:
session = self.streamSessions.pop(sid)
session.streamClosed = True
session.process = False
session.join(5)
del session
sendAckIQ(self.xmpp, iq['from'], iq['id'])
else: #We don't know about this stream, send error
errIq = self.xmpp.makeIqError(id=iq['id'], condition='item-not-found')
errIq['to'] = iq['from']
errIq['error']['type'] = 'cancel'
errIq.send()
def _eventCloseStream(self, eventdata):
'''
Allows the session thread to
notify xep_0047 that a stream error has occurred or the stream has
finished and the session object should be disposed.
'''
with self.__streamSetupLock:
session = self.streamSessions[eventdata['sid']]
del self.streamSessions[eventdata['sid']]
session.join(60)
del session
class ByteStreamSession(threading.Thread):
def __init__(self, xmpp, sid, otherPartyJid, timeout, blockSize, plugin, recFileName = None):
threading.Thread.__init__(self, name='bytestream_session_%s' %sid)
#When we start the session the stream will already be open
#and we will want to process the I/O
self.process = True
self.streamClosed = False
self.__xmpp = xmpp
self.__plugin = plugin
self.__incSeqId = -1
self.__outSeqId = -1
self.__incSeqLock = threading.Lock()
self.__outSeqLock = threading.Lock()
self.__closeStreamLock = threading.Lock()
self.__lastMessage = time.time()
self.__incFile = None
self.__sendThread = None
self.__sendAckEvent = Event()
#block size needs to be a multiple of 4 for base 64 encoding, step
#the number down till it is divisible by 4 so we can fit in under the
#base64 encoded size
while blockSize % 4 != 0:
blockSize -= 1
self.__blockSize = blockSize
self.__fileReadSize = int(self.__blockSize / (4/3))
self.sid = sid
self.timeout = timeout
self.recFileName = recFileName
self.otherPartyJid = otherPartyJid
#register to start receiving file packets
self.__xmpp.registerHandler(Callback('file_receiver_iq_%s' %self.sid, MatchXMLMask("<iq type='set'><data xmlns='http://jabber.org/protocol/ibb' sid='%s' /></iq>" %self.sid), self._handlePacket, thread=False))
#self.__xmpp.registerHandler(XMLCallback('file_receiver_message_%s' %self.sid, MatchXMLMask("<message><data xmlns='%s' sid='%s' /></message>" %(XMLNS, self.sid)), self._handlePacket, False, False, False))
#self.__xmpp.registerHandler(XMLCallback('file_receiver_iq_%s' %self.sid, MatchXMLMask("<iq type='set'><data xmlns='%s' sid='%s' /></iq>" %(XMLNS, self.sid)), self._handlePacket, False, False, False))
def getSavedFileName(self):
#TODO: this probably needs to be fixed up to work on OSes other than linux
if self.recFileName:
return self.recFileName
else:
return None
def run(self):
'''
The Session will timeout of a message has not been received in more than
self.timeout seconds since the last message.
This method takes care of opening the file for writing and ensuring that
the file is closed, closing the stream if the session times out, and
ensuring that if a file is being sent that the send will quiesce properly.
'''
try:
if self.getSavedFileName():
self.__incFile = open(self.getSavedFileName(), 'wb')
while self.process:
logging.debug("seconds since last message: %f" %self.__lastMessage)
if time.time() - self.__lastMessage <= self.timeout:
time.sleep(2)
else: # no file to send and the file transfer has timed out, close up the stream
logging.info('file transfer timeout')
self._closeStream()
break
except Exception, e:
logging.error('error during file transfer. sid: %s, error: %s' %(self.sid, e))
finally:
logging.debug("end of stream. remove data handlers")
#remove the file handlers, stream has ended
self.__xmpp.removeHandler('file_receiver_iq_%s' %self.sid)
if self.__sendThread:
self.__sendThread.join()
del self.__sendThread
#close the file hander
if self.__incFile:
#self.__xmpp.event(xep_0096.FileTransferProtocol.FILE_FINISHED_RECEIVING, {'sid': self.sid, 'filename':self.getSavedFileName()})
self.__plugin.fileFinishedReceiving(sid=self.sid, filename=self.getSavedFileName())
self.__incFile.close()
logging.debug("finished processing packets")
def getNextIncSeqId(self):
with self.__incSeqLock:
self.__incSeqId += 1
return self.__incSeqId
def getNextOutSeqId(self):
with self.__outSeqLock:
self.__outSeqId += 1
return self.__outSeqId
def _handlePacket(self, iq):
#ensure the data packet is from the other party we are conversing with
#and the data is in the correct order
self.__lastMessage = time.time()
logging.debug('data: %s' %iq['data']['data'] )
logging.debug('data seq: %s' %iq['data']['seq'] )
logging.debug(iq['from'])
logging.debug('packet size: %s' %len(iq['data']['data']) )
nextSeqId = self.getNextIncSeqId()
if self.process:
if iq['from'].jid == self.otherPartyJid.jid and long(iq['data']['seq']) == nextSeqId and len(iq['data']['data']) <= self.__blockSize:
if self.__incFile: #write the file being sent if we have been giving somewhere to write it to
self.__incFile.write(base64.decodestring(iq['data']['data']))
#for IQ stanzas we must return a result
sendAckIQ(self.__xmpp, iq['from'], iq['id'])
else:
'''
packet not in correct order or bad sender
Ignore the input... Should we close the stream, something is wrong
if we get a packet from a different user on this byte stream. Could
possibly be an attack
'''
logging.warning('Bad file transfer packet received! Terminating session with %s' %self.otherPartyJid)
logging.error('seq #: %s expected seq: %i' %(iq['data']['seq'], nextSeqId) )
logging.error('packet size: %s Max Block size: %s' %(len(iq['data']['data']), self.__blockSize) )
self.process = False
self._closeStream()
def getStatus(self):
'''
Returns an dict of the following items:
sid - the sid of this session
processing - The processing state of this session
otherPartyJID - The other party we are swaping bytes with
streamClosed - If this ByteStream is closed or not
lastMessageTimestamp - The timestamp of the last received message (ack or data packet)
incFileName (optional) - if receiving a file, the full path and name of where the file is saved
incFileKBytes (optional)- The number of KBytes currently received
outFileKBytes (optional)- The number of bytes sent so far if sending a file
'''
status = {}
status['sid'] = self.sid
status['processing'] = self.process
status['otherPartyJID'] = self.otherPartyJid.jid
status['streamClosed'] = self.streamClosed
status['lastMessageTimestamp'] = self.__lastMessage
if self.getSavedFileName():
status['incFileName'] = self.getSavedFileName()
status['incFileKBytes'] = self.__blockSize * self.__incSeqId
if self.__sendThread:
status['outFileKBytes'] = self.__fileReadSize * self.__outSeqId
return status
def cancelStream(self):
'''
Cancels the current session with the other party and closes the stream.
This should only be called when this sender wishes to cancel, and not when
the other party cancels this session.
'''
self.process = False
while self.isAlive():
time.sleep(.5)
self._closeStream()
if self.getSavedFileName():
os.remove(self.getSavedFileName())
def sendFile(self, fileName, threaded=False):
'''
Sending a file always runs in it's own thread, but if threaded = False
this method will block until the sending is completed or canceled. Only
1 file may be sent per session.
'''
if self.__sendThread:
raise TooManySessionsException('Can only send 1 file per byte stream')
self.__sendThread = threading.Thread(target=self._sendFile, name='Byte_Stream_Session_sender_%s' %self.sid, kwargs={str('fileName'): fileName})
self.__sendThread.start()
if not threaded: #Block until the send is finished
self.__sendThread.join()
def _sendFile(self, fileName):
'''
Does the actual work of sending a file, loops over the file breaking into
the requested base64 encoded chunk size and sends it over the wire.
'''
with open(fileName, 'rb') as file:
self.__sendAckEvent.set()
finished_sending = False
while self.process:
if self.__sendAckEvent.wait(1):
data = file.read(self.__fileReadSize)
if data == str(''):
finished_sending = True
break
iq = self.__xmpp.makeIqSet()
dataElem = ET.Element('{%s}data' %xep_0047.XMLNS, sid=self.sid, seq=str(self.getNextOutSeqId()))
dataElem.text = base64.b64encode(data)
iq['to'] = self.otherPartyJid
iq.setPayload(dataElem)
self.__sendAckEvent.clear()
self.__xmpp.registerHandler(Callback('Bytestream_send_iq_matcher', MatcherId(iq['id']), self._sendFileAckHandler, thread=False, once=True, instream=False))
iq.send(block=False)
self.__plugin.fileFinishedSending(self.sid, finished_sending)
self._closeStream()
self.process = False
def _sendFileAckHandler(self, xml):
'''
Callback for the id matcher for the last data packet sent to the other
party. Once we receive an ack for our last data packet the __sendAckEvent
is set so the sender can proceed with the next packet
'''
if xml.get('type') == 'result':
self.__lastMessage = time.time()
self.__sendAckEvent.set()
else: #some kind of error occurred
self.process = False
def _closeStream(self):
'''
This method is thread safe, and only callable once. Use it to terminate
the session with the other party
'''
with self.__closeStreamLock:
if not self.streamClosed:
self.streamClosed = True
sendCloseStream(self.__xmpp, self.otherPartyJid, self.sid)
self.__xmpp.event(STREAM_CLOSED_EVENT, {'sid': self.sid})
'''stanza objects'''
class Open(ElementBase):
namespace = xep_0047.XMLNS
name = 'open'
plugin_attrib = 'open'
interfaces = set(('block-size', 'sid', 'stanza'))
#sub_interfaces = interfaces
class Close(ElementBase):
namespace = xep_0047.XMLNS
name = 'close'
plugin_attrib = 'close'
interfaces = set(('sid',))
#sub_interfaces = interfaces
class Data(ElementBase):
namespace = xep_0047.XMLNS
name = 'data'
plugin_attrib = 'data'
interfaces = set(('data','sid', 'seq'))
def getData(self):
return self.xml.text
def setData(self, data):
self.xml.text = data
def delData(self):
if self.parent is not None:
self.parent().xml.remove(self.xml)
'''Exceptions'''
class InBandTransferException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class TooManySessionsException(InBandTransferException):
def __init__(self, *args, **kwargs):
InBandTransferException.__init__(self, *args, **kwargs)
class NotAcceptableException(InBandTransferException):
def __init__(self, *args, **kwargs):
InBandTransferException.__init__(self, *args, **kwargs)
'''
Override of the threading.Event class to make the implementation work like
python 2.7
'''
def Event(*args, **kwargs):
if sys.version_info < (2,7):
return _Event(*args, **kwargs)
else:
return threading.Event(*args, **kwargs)
class _Event(object):
#Modification of Event class from python 2.6 because the 2.7 version is better
def __init__(self):
self.__cond = threading.Condition(threading.Lock())
self.__flag = False
def isSet(self):
return self.__flag
is_set = isSet
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()
|
{
"content_hash": "b2ab80244d6f2cd689af96786dd3b376",
"timestamp": "",
"source": "github",
"line_count": 602,
"max_line_length": 216,
"avg_line_length": 44.77242524916944,
"alnum_prop": 0.6030126516528772,
"repo_name": "EnerNOC/smallfoot-sleekxmpp",
"id": "6f7289e56b0b46c9b86e46174ba0106ab5e59abc",
"size": "26953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sleekxmpp/plugins/xep_0047.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "688554"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import cx_Oracle
import db_config
con = cx_Oracle.connect(db_config.user, db_config.pw, db_config.dsn)
cur = con.cursor()
cur.execute("select * from dept order by deptno")
res = cur.fetchall()
for row in res:
print(row)
cur.close()
con.close()
|
{
"content_hash": "6e883d33dd71b8abe7554b56d0032470",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.7103448275862069,
"repo_name": "cloudera/hue",
"id": "64d565d9264b186e14f3436689261bad1d77e09a",
"size": "712",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/cx_Oracle-6.4.1/samples/tutorial/solutions/query-2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
"""
This module has been moved pymatgen.io.gaussian. This sub module will
be removed in pymatgen 4.0.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.gaussianio has been moved pymatgen.io.gaussian. "
"This stub will be removed in pymatgen 4.0.", DeprecationWarning)
from .gaussian import *
|
{
"content_hash": "23002cfabcaeb63b94f611032dce2bd9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 28.94736842105263,
"alnum_prop": 0.6909090909090909,
"repo_name": "Bismarrck/pymatgen",
"id": "b0c05ce4cfe934262a48b092c486b29429a8ac22",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/gaussianio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "C",
"bytes": "1278109"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "4913914"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6094090"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
}
|
import spotipy_twisted
import sys
import pprint
if len(sys.argv) > 1:
search_str = sys.argv[1]
else:
search_str = 'Radiohead'
sp = spotipy_twisted.Spotify()
result = sp.search(search_str)
pprint.pprint(result)
|
{
"content_hash": "3ae6de23a19f9a598048d5a84e61e9fe",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 30,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.7136363636363636,
"repo_name": "jimcortez/spotipy_twisted",
"id": "e29fdac4e2b4a950d9b771942b48c8878a73b929",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62327"
},
{
"name": "Shell",
"bytes": "245"
}
],
"symlink_target": ""
}
|
import sys, os, string, argparse
import pyfits
import numpy as np
from PySpectrograph import Spectrum
from PySpectrograph.Utilities.fit import interfit
from redshift import xcor_redshift, loadtext, loadiraf, loadsdss
import pylab as pl
def redshift_all(infile, z1=0.0001, z2=1.200, plot_result=True, show=False):
if infile.count('fits'):
hdu=pyfits.open(infile)
spec=loadiraf(hdu)
else:
spec=loadtext(infile)
dirpath = os.path.dirname(__file__)
best_cc = 0
for i in range(23, 33):
template_name = 'spDR2-0{}.fit'.format(string.zfill(i, 2))
thdu=pyfits.open(dirpath+'/template/'+template_name)
template=loadsdss(thdu)
#z_arr, cc_arr=xcor_redshift(spec, template, z1=0.0, z2=5.5, zstep=0.01)
z_arr, cc_arr=xcor_redshift(spec, template, z1=z1, z2=z2, zstep=0.0001)
z=z_arr[cc_arr.argmax()]
if show: print(i, z, cc_arr.max(), cc_arr.argmax())
if best_cc < cc_arr.max():
best_cc = cc_arr.max()
best_z = z
best_i = i
best_arr = cc_arr
#z_arr, cc_arr=xcor_redshift(spec, template, z1=z-0.05, z2=z+0.05, zstep=0.0001)
#z=z_arr[cc_arr.argmax()]
z = best_z
i = best_i
cc_arr = best_arr
template_name = 'spDR2-0{}.fit'.format(string.zfill(i, 2))
thdu=pyfits.open(dirpath+'/template/'+template_name)
template=loadsdss(thdu)
if plot_result:
pl.figure()
pl.plot(z_arr, cc_arr)
pl.figure()
cflux=np.convolve(spec.flux, np.ones(10), mode='same')
pl.plot(spec.wavelength, cflux)
nflux=np.interp(spec.wavelength, (1+z)*template.wavelength, template.flux)
#pl.plot((1+z)*template.wavelength, template.flux*spec.flux.mean()/template.flux.mean())
pl.plot(spec.wavelength, nflux*cflux.mean()/nflux.mean())
pl.savefig(infile.replace('txt', 'png'))
if show: pl.show()
return best_i, best_z, best_arr.max()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Match redshift to template')
parser.add_argument('spectra', help='Spectra to measure redshift')
parser.add_argument('-n', dest='noplot', default=True, action='store_false',
help='do not plot the data')
parser.add_argument('--z1', dest='z1', default=0.0001, help='default lower redshift', type=float)
parser.add_argument('--z2', dest='z2', default=1.2001, help='default lower redshift', type=float)
args = parser.parse_args()
infile = args.spectra
z1 = args.z1
z2 = args.z2
print(redshift_all(infile, z1=z1, z2=z2, plot_result=True, show=args.noplot))
|
{
"content_hash": "5786b89210128d6917c374f540c06f3d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 100,
"avg_line_length": 33.74025974025974,
"alnum_prop": 0.6393379522709777,
"repo_name": "crawfordsm/zSALT",
"id": "10985af3c5dc7d0d9e3eac44ff84914c0f7833ff",
"size": "2598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zsalt/redshift_all.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "113327"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.