code
stringlengths
2k
1.04M
repo_path
stringlengths
5
517
parsed_code
stringlengths
0
1.04M
quality_prob
float64
0.02
0.95
learning_prob
float64
0.02
0.93
import math import csv import os import sys from collections import defaultdict from stemming.porter2 import stem from search import STOPWORDS SIM_THRESHOLD = .1 MIN_DOCUMENT_LENGTH = 3 class Document(): def tokenize(self): # lowercase strips = """\\.!?,(){}[]"'""" return [stem(c.strip(strips)) for c in self.document.lower().split() if self.stopwords.get(c.strip(strips)) is None] def __init__(self, corpus, obj, str=None, stopwords=STOPWORDS): if not str: str = unicode(obj) self.stopwords = stopwords self.corpus = corpus self.object = obj self.document = str self.tf = {} self._tf_idf = None words = self.tokenize() for word in set(words): self.tf[word] = words.count(word) / float(len(words)) def __repr__(self): return self.document def idf(self, cached=True): num_docs = len(self.corpus.docs) idf = {} for word in self.tf.keys(): num_occurences = self.corpus.words.get(word, 0) idf[word] = math.log(num_docs / (1.0 + num_occurences)) return idf def tf_idf(self, cached=True): if self._tf_idf and cached: return self._tf_idf self._tf_idf = {} idf = self.idf() for word in self.tf.keys(): self._tf_idf[word] = idf[word] * self.tf[word] return self._tf_idf class Corpus(): """Document corpus which calculates Term Frequency/Inverse Document Frequency.""" def __init__(self, similarity=SIM_THRESHOLD, stopwords=STOPWORDS): self.stopwords = stopwords self.similarity = similarity self.docs = {} self.words = defaultdict(int) self.index = defaultdict(dict) def add(self, document, key=None, str=None): """Adds a document to the corpus.""" if not key: try: key = document.id except AttributeError: key = document if not str: str = unicode(document) doc = Document(self, document, str=str, stopwords=self.stopwords) if len(doc.tf) < MIN_DOCUMENT_LENGTH: return for k in doc.tf.keys(): if k in self.words: self.words[k] += 1 self.docs[key] = doc def create_index(self): index = {} for id, doc in self.docs.iteritems(): for word, weight in doc.tf_idf().iteritems(): self.index[word][id] = weight def cluster(self): seen = {} scores = {} self.create_index() for key, doc in self.docs.iteritems(): if seen.get(key): continue seen[key] = 1 scores[key] = defaultdict(int) for word, o_weight in doc.tf_idf().iteritems(): if word in self.index: matches = self.index[word] for c_key, c_weight in matches.iteritems(): if c_key in seen: continue scores[key][c_key] += o_weight * c_weight scores[key] = dict(((k, v) for k, v in scores[key].iteritems() if v >= self.similarity)) seen.update(scores[key]) scores = sorted(scores.iteritems(), cmp=lambda x, y: cmp(len(x[1]), len(y[1])), reverse=True) groups = [] for key, similars in scores: if not similars: continue g = Group(self.docs[key].object) for id, similarity in similars.iteritems(): g.add_similar(self.docs[id].object, similarity) mycmp = lambda x, y: cmp(x['similarity'], y['similarity']) g.similars.sort(cmp=mycmp) groups.append(g) return groups class Group: def __init__(self, primary=None): self.primary = primary self.similars = [] def add_similar(self, obj, similarity): self.similars.append(dict(object=obj, similarity=similarity))
textcluster/cluster.py
import math import csv import os import sys from collections import defaultdict from stemming.porter2 import stem from search import STOPWORDS SIM_THRESHOLD = .1 MIN_DOCUMENT_LENGTH = 3 class Document(): def tokenize(self): # lowercase strips = """\\.!?,(){}[]"'""" return [stem(c.strip(strips)) for c in self.document.lower().split() if self.stopwords.get(c.strip(strips)) is None] def __init__(self, corpus, obj, str=None, stopwords=STOPWORDS): if not str: str = unicode(obj) self.stopwords = stopwords self.corpus = corpus self.object = obj self.document = str self.tf = {} self._tf_idf = None words = self.tokenize() for word in set(words): self.tf[word] = words.count(word) / float(len(words)) def __repr__(self): return self.document def idf(self, cached=True): num_docs = len(self.corpus.docs) idf = {} for word in self.tf.keys(): num_occurences = self.corpus.words.get(word, 0) idf[word] = math.log(num_docs / (1.0 + num_occurences)) return idf def tf_idf(self, cached=True): if self._tf_idf and cached: return self._tf_idf self._tf_idf = {} idf = self.idf() for word in self.tf.keys(): self._tf_idf[word] = idf[word] * self.tf[word] return self._tf_idf class Corpus(): """Document corpus which calculates Term Frequency/Inverse Document Frequency.""" def __init__(self, similarity=SIM_THRESHOLD, stopwords=STOPWORDS): self.stopwords = stopwords self.similarity = similarity self.docs = {} self.words = defaultdict(int) self.index = defaultdict(dict) def add(self, document, key=None, str=None): """Adds a document to the corpus.""" if not key: try: key = document.id except AttributeError: key = document if not str: str = unicode(document) doc = Document(self, document, str=str, stopwords=self.stopwords) if len(doc.tf) < MIN_DOCUMENT_LENGTH: return for k in doc.tf.keys(): if k in self.words: self.words[k] += 1 self.docs[key] = doc def create_index(self): index = {} for id, doc in self.docs.iteritems(): for word, weight in doc.tf_idf().iteritems(): self.index[word][id] = weight def cluster(self): seen = {} scores = {} self.create_index() for key, doc in self.docs.iteritems(): if seen.get(key): continue seen[key] = 1 scores[key] = defaultdict(int) for word, o_weight in doc.tf_idf().iteritems(): if word in self.index: matches = self.index[word] for c_key, c_weight in matches.iteritems(): if c_key in seen: continue scores[key][c_key] += o_weight * c_weight scores[key] = dict(((k, v) for k, v in scores[key].iteritems() if v >= self.similarity)) seen.update(scores[key]) scores = sorted(scores.iteritems(), cmp=lambda x, y: cmp(len(x[1]), len(y[1])), reverse=True) groups = [] for key, similars in scores: if not similars: continue g = Group(self.docs[key].object) for id, similarity in similars.iteritems(): g.add_similar(self.docs[id].object, similarity) mycmp = lambda x, y: cmp(x['similarity'], y['similarity']) g.similars.sort(cmp=mycmp) groups.append(g) return groups class Group: def __init__(self, primary=None): self.primary = primary self.similars = [] def add_similar(self, obj, similarity): self.similars.append(dict(object=obj, similarity=similarity))
0.363195
0.128689
import numpy as np from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt def make_DWF_globe(observatories, outdir, max_angle=10, step_size=1): """ Creates images (pngs) of the globe with the locations of DWF observatories. These can be combined into a fancy movie. Parameters ---------- observatories : dictionary The dictionary of observatories with coords, colours and markers. e.g {"MeerKAT" : [(21.444819, -30.712176), "red", "D"]} outdir : string The output directory for the images max_angle : integer The total angle of rotation in degrees for the final plot step_size : float or integer The angular size in degrees between each frame """ # Coordinate, Colour and Marker indexes in the dictionary cood_idx = 0 colr_idx = 1 mark_idx = 2 # The x, y offsets for the Observatory labels labx_offset = 2 laby_offset = 2 # Number of frames to make num_frames = int(max_angle / step_size) for i in range(num_frames): plt.figure(figsize=(8, 6), dpi=200) # m is the map Basemap oject m = Basemap(projection="ortho", lat_0=0, lon_0=step_size*i) # Use bluemarble background # Can also use m.shadedrelief() for a more atlas feel m.bluemarble() for label in observatories.keys(): # Get x, y corrds from dictionary and convert them to map coords x, y = m(observatories[label][cood_idx][0], observatories[label][cood_idx][1]) # Plot Observatories using markers and colours from dictionary m.scatter(x, y, marker=observatories[label][mark_idx], color=observatories[label][colr_idx]) # Create Observatory label positions labx, laby = m(observatories[label][cood_idx][0] + labx_offset, observatories[label][cood_idx][1] + laby_offset) # Plot labels plt.annotate(label, xy=(labx, laby), color='white') plt.savefig("{0}/DWF_globe_plot_{1:03}".format(outdir, i), transparent=True) plt.clf() print("COMPLETE") if __name__ == "__main__": outdir = "/Users/abatten/saraplots" observatories = {"MeerKAT" : [(21.444819, -30.712176), "red", "D"], "MWA" : [(116.670866, -26.702681), "green", "o"], "VIRGO" : [(10.505021, 43.631456), "magenta", "x"], "Etelman" : [(-64.956400, 18.352395), "cyan", "o"], "LIGO-LA" : [(-90.772039, 30.564180), "cyan", "x"] } make_DWF_globe(observatories, outdir, max_angle=360, step_size=10)
DWF_world_plot.py
import numpy as np from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt def make_DWF_globe(observatories, outdir, max_angle=10, step_size=1): """ Creates images (pngs) of the globe with the locations of DWF observatories. These can be combined into a fancy movie. Parameters ---------- observatories : dictionary The dictionary of observatories with coords, colours and markers. e.g {"MeerKAT" : [(21.444819, -30.712176), "red", "D"]} outdir : string The output directory for the images max_angle : integer The total angle of rotation in degrees for the final plot step_size : float or integer The angular size in degrees between each frame """ # Coordinate, Colour and Marker indexes in the dictionary cood_idx = 0 colr_idx = 1 mark_idx = 2 # The x, y offsets for the Observatory labels labx_offset = 2 laby_offset = 2 # Number of frames to make num_frames = int(max_angle / step_size) for i in range(num_frames): plt.figure(figsize=(8, 6), dpi=200) # m is the map Basemap oject m = Basemap(projection="ortho", lat_0=0, lon_0=step_size*i) # Use bluemarble background # Can also use m.shadedrelief() for a more atlas feel m.bluemarble() for label in observatories.keys(): # Get x, y corrds from dictionary and convert them to map coords x, y = m(observatories[label][cood_idx][0], observatories[label][cood_idx][1]) # Plot Observatories using markers and colours from dictionary m.scatter(x, y, marker=observatories[label][mark_idx], color=observatories[label][colr_idx]) # Create Observatory label positions labx, laby = m(observatories[label][cood_idx][0] + labx_offset, observatories[label][cood_idx][1] + laby_offset) # Plot labels plt.annotate(label, xy=(labx, laby), color='white') plt.savefig("{0}/DWF_globe_plot_{1:03}".format(outdir, i), transparent=True) plt.clf() print("COMPLETE") if __name__ == "__main__": outdir = "/Users/abatten/saraplots" observatories = {"MeerKAT" : [(21.444819, -30.712176), "red", "D"], "MWA" : [(116.670866, -26.702681), "green", "o"], "VIRGO" : [(10.505021, 43.631456), "magenta", "x"], "Etelman" : [(-64.956400, 18.352395), "cyan", "o"], "LIGO-LA" : [(-90.772039, 30.564180), "cyan", "x"] } make_DWF_globe(observatories, outdir, max_angle=360, step_size=10)
0.758868
0.655846
import pprint import re # noqa: F401 import six from thola_client.configuration import Configuration class CheckIdentifyRequest(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'device_data': 'DeviceData', 'expectations': 'Device', 'json_metrics': 'bool', 'model_diff_warning': 'bool', 'model_series_diff_warning': 'bool', 'os_diff_warning': 'bool', 'os_version_diff_warning': 'bool', 'print_performance_data': 'bool', 'serial_number_diff_warning': 'bool', 'timeout': 'int', 'vendor_diff_warning': 'bool' } attribute_map = { 'device_data': 'device_data', 'expectations': 'expectations', 'json_metrics': 'json_metrics', 'model_diff_warning': 'model_diff_warning', 'model_series_diff_warning': 'model_series_diff_warning', 'os_diff_warning': 'os_diff_warning', 'os_version_diff_warning': 'os_version_diff_warning', 'print_performance_data': 'print_performance_data', 'serial_number_diff_warning': 'serial_number_diff_warning', 'timeout': 'timeout', 'vendor_diff_warning': 'vendor_diff_warning' } def __init__(self, device_data=None, expectations=None, json_metrics=None, model_diff_warning=None, model_series_diff_warning=None, os_diff_warning=None, os_version_diff_warning=None, print_performance_data=None, serial_number_diff_warning=None, timeout=None, vendor_diff_warning=None, _configuration=None): # noqa: E501 """CheckIdentifyRequest - a model defined in Swagger""" # noqa: E501 if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._device_data = None self._expectations = None self._json_metrics = None self._model_diff_warning = None self._model_series_diff_warning = None self._os_diff_warning = None self._os_version_diff_warning = None self._print_performance_data = None self._serial_number_diff_warning = None self._timeout = None self._vendor_diff_warning = None self.discriminator = None if device_data is not None: self.device_data = device_data if expectations is not None: self.expectations = expectations if json_metrics is not None: self.json_metrics = json_metrics if model_diff_warning is not None: self.model_diff_warning = model_diff_warning if model_series_diff_warning is not None: self.model_series_diff_warning = model_series_diff_warning if os_diff_warning is not None: self.os_diff_warning = os_diff_warning if os_version_diff_warning is not None: self.os_version_diff_warning = os_version_diff_warning if print_performance_data is not None: self.print_performance_data = print_performance_data if serial_number_diff_warning is not None: self.serial_number_diff_warning = serial_number_diff_warning if timeout is not None: self.timeout = timeout if vendor_diff_warning is not None: self.vendor_diff_warning = vendor_diff_warning @property def device_data(self): """Gets the device_data of this CheckIdentifyRequest. # noqa: E501 :return: The device_data of this CheckIdentifyRequest. # noqa: E501 :rtype: DeviceData """ return self._device_data @device_data.setter def device_data(self, device_data): """Sets the device_data of this CheckIdentifyRequest. :param device_data: The device_data of this CheckIdentifyRequest. # noqa: E501 :type: DeviceData """ self._device_data = device_data @property def expectations(self): """Gets the expectations of this CheckIdentifyRequest. # noqa: E501 :return: The expectations of this CheckIdentifyRequest. # noqa: E501 :rtype: Device """ return self._expectations @expectations.setter def expectations(self, expectations): """Sets the expectations of this CheckIdentifyRequest. :param expectations: The expectations of this CheckIdentifyRequest. # noqa: E501 :type: Device """ self._expectations = expectations @property def json_metrics(self): """Gets the json_metrics of this CheckIdentifyRequest. # noqa: E501 :return: The json_metrics of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._json_metrics @json_metrics.setter def json_metrics(self, json_metrics): """Sets the json_metrics of this CheckIdentifyRequest. :param json_metrics: The json_metrics of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._json_metrics = json_metrics @property def model_diff_warning(self): """Gets the model_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The model_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._model_diff_warning @model_diff_warning.setter def model_diff_warning(self, model_diff_warning): """Sets the model_diff_warning of this CheckIdentifyRequest. :param model_diff_warning: The model_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._model_diff_warning = model_diff_warning @property def model_series_diff_warning(self): """Gets the model_series_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The model_series_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._model_series_diff_warning @model_series_diff_warning.setter def model_series_diff_warning(self, model_series_diff_warning): """Sets the model_series_diff_warning of this CheckIdentifyRequest. :param model_series_diff_warning: The model_series_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._model_series_diff_warning = model_series_diff_warning @property def os_diff_warning(self): """Gets the os_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The os_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._os_diff_warning @os_diff_warning.setter def os_diff_warning(self, os_diff_warning): """Sets the os_diff_warning of this CheckIdentifyRequest. :param os_diff_warning: The os_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._os_diff_warning = os_diff_warning @property def os_version_diff_warning(self): """Gets the os_version_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The os_version_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._os_version_diff_warning @os_version_diff_warning.setter def os_version_diff_warning(self, os_version_diff_warning): """Sets the os_version_diff_warning of this CheckIdentifyRequest. :param os_version_diff_warning: The os_version_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._os_version_diff_warning = os_version_diff_warning @property def print_performance_data(self): """Gets the print_performance_data of this CheckIdentifyRequest. # noqa: E501 :return: The print_performance_data of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._print_performance_data @print_performance_data.setter def print_performance_data(self, print_performance_data): """Sets the print_performance_data of this CheckIdentifyRequest. :param print_performance_data: The print_performance_data of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._print_performance_data = print_performance_data @property def serial_number_diff_warning(self): """Gets the serial_number_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The serial_number_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._serial_number_diff_warning @serial_number_diff_warning.setter def serial_number_diff_warning(self, serial_number_diff_warning): """Sets the serial_number_diff_warning of this CheckIdentifyRequest. :param serial_number_diff_warning: The serial_number_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._serial_number_diff_warning = serial_number_diff_warning @property def timeout(self): """Gets the timeout of this CheckIdentifyRequest. # noqa: E501 Timeout for the request (0 => no timeout) # noqa: E501 :return: The timeout of this CheckIdentifyRequest. # noqa: E501 :rtype: int """ return self._timeout @timeout.setter def timeout(self, timeout): """Sets the timeout of this CheckIdentifyRequest. Timeout for the request (0 => no timeout) # noqa: E501 :param timeout: The timeout of this CheckIdentifyRequest. # noqa: E501 :type: int """ self._timeout = timeout @property def vendor_diff_warning(self): """Gets the vendor_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The vendor_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._vendor_diff_warning @vendor_diff_warning.setter def vendor_diff_warning(self, vendor_diff_warning): """Sets the vendor_diff_warning of this CheckIdentifyRequest. :param vendor_diff_warning: The vendor_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._vendor_diff_warning = vendor_diff_warning def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(CheckIdentifyRequest, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CheckIdentifyRequest): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, CheckIdentifyRequest): return True return self.to_dict() != other.to_dict()
thola_client/models/check_identify_request.py
import pprint import re # noqa: F401 import six from thola_client.configuration import Configuration class CheckIdentifyRequest(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'device_data': 'DeviceData', 'expectations': 'Device', 'json_metrics': 'bool', 'model_diff_warning': 'bool', 'model_series_diff_warning': 'bool', 'os_diff_warning': 'bool', 'os_version_diff_warning': 'bool', 'print_performance_data': 'bool', 'serial_number_diff_warning': 'bool', 'timeout': 'int', 'vendor_diff_warning': 'bool' } attribute_map = { 'device_data': 'device_data', 'expectations': 'expectations', 'json_metrics': 'json_metrics', 'model_diff_warning': 'model_diff_warning', 'model_series_diff_warning': 'model_series_diff_warning', 'os_diff_warning': 'os_diff_warning', 'os_version_diff_warning': 'os_version_diff_warning', 'print_performance_data': 'print_performance_data', 'serial_number_diff_warning': 'serial_number_diff_warning', 'timeout': 'timeout', 'vendor_diff_warning': 'vendor_diff_warning' } def __init__(self, device_data=None, expectations=None, json_metrics=None, model_diff_warning=None, model_series_diff_warning=None, os_diff_warning=None, os_version_diff_warning=None, print_performance_data=None, serial_number_diff_warning=None, timeout=None, vendor_diff_warning=None, _configuration=None): # noqa: E501 """CheckIdentifyRequest - a model defined in Swagger""" # noqa: E501 if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._device_data = None self._expectations = None self._json_metrics = None self._model_diff_warning = None self._model_series_diff_warning = None self._os_diff_warning = None self._os_version_diff_warning = None self._print_performance_data = None self._serial_number_diff_warning = None self._timeout = None self._vendor_diff_warning = None self.discriminator = None if device_data is not None: self.device_data = device_data if expectations is not None: self.expectations = expectations if json_metrics is not None: self.json_metrics = json_metrics if model_diff_warning is not None: self.model_diff_warning = model_diff_warning if model_series_diff_warning is not None: self.model_series_diff_warning = model_series_diff_warning if os_diff_warning is not None: self.os_diff_warning = os_diff_warning if os_version_diff_warning is not None: self.os_version_diff_warning = os_version_diff_warning if print_performance_data is not None: self.print_performance_data = print_performance_data if serial_number_diff_warning is not None: self.serial_number_diff_warning = serial_number_diff_warning if timeout is not None: self.timeout = timeout if vendor_diff_warning is not None: self.vendor_diff_warning = vendor_diff_warning @property def device_data(self): """Gets the device_data of this CheckIdentifyRequest. # noqa: E501 :return: The device_data of this CheckIdentifyRequest. # noqa: E501 :rtype: DeviceData """ return self._device_data @device_data.setter def device_data(self, device_data): """Sets the device_data of this CheckIdentifyRequest. :param device_data: The device_data of this CheckIdentifyRequest. # noqa: E501 :type: DeviceData """ self._device_data = device_data @property def expectations(self): """Gets the expectations of this CheckIdentifyRequest. # noqa: E501 :return: The expectations of this CheckIdentifyRequest. # noqa: E501 :rtype: Device """ return self._expectations @expectations.setter def expectations(self, expectations): """Sets the expectations of this CheckIdentifyRequest. :param expectations: The expectations of this CheckIdentifyRequest. # noqa: E501 :type: Device """ self._expectations = expectations @property def json_metrics(self): """Gets the json_metrics of this CheckIdentifyRequest. # noqa: E501 :return: The json_metrics of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._json_metrics @json_metrics.setter def json_metrics(self, json_metrics): """Sets the json_metrics of this CheckIdentifyRequest. :param json_metrics: The json_metrics of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._json_metrics = json_metrics @property def model_diff_warning(self): """Gets the model_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The model_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._model_diff_warning @model_diff_warning.setter def model_diff_warning(self, model_diff_warning): """Sets the model_diff_warning of this CheckIdentifyRequest. :param model_diff_warning: The model_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._model_diff_warning = model_diff_warning @property def model_series_diff_warning(self): """Gets the model_series_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The model_series_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._model_series_diff_warning @model_series_diff_warning.setter def model_series_diff_warning(self, model_series_diff_warning): """Sets the model_series_diff_warning of this CheckIdentifyRequest. :param model_series_diff_warning: The model_series_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._model_series_diff_warning = model_series_diff_warning @property def os_diff_warning(self): """Gets the os_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The os_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._os_diff_warning @os_diff_warning.setter def os_diff_warning(self, os_diff_warning): """Sets the os_diff_warning of this CheckIdentifyRequest. :param os_diff_warning: The os_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._os_diff_warning = os_diff_warning @property def os_version_diff_warning(self): """Gets the os_version_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The os_version_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._os_version_diff_warning @os_version_diff_warning.setter def os_version_diff_warning(self, os_version_diff_warning): """Sets the os_version_diff_warning of this CheckIdentifyRequest. :param os_version_diff_warning: The os_version_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._os_version_diff_warning = os_version_diff_warning @property def print_performance_data(self): """Gets the print_performance_data of this CheckIdentifyRequest. # noqa: E501 :return: The print_performance_data of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._print_performance_data @print_performance_data.setter def print_performance_data(self, print_performance_data): """Sets the print_performance_data of this CheckIdentifyRequest. :param print_performance_data: The print_performance_data of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._print_performance_data = print_performance_data @property def serial_number_diff_warning(self): """Gets the serial_number_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The serial_number_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._serial_number_diff_warning @serial_number_diff_warning.setter def serial_number_diff_warning(self, serial_number_diff_warning): """Sets the serial_number_diff_warning of this CheckIdentifyRequest. :param serial_number_diff_warning: The serial_number_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._serial_number_diff_warning = serial_number_diff_warning @property def timeout(self): """Gets the timeout of this CheckIdentifyRequest. # noqa: E501 Timeout for the request (0 => no timeout) # noqa: E501 :return: The timeout of this CheckIdentifyRequest. # noqa: E501 :rtype: int """ return self._timeout @timeout.setter def timeout(self, timeout): """Sets the timeout of this CheckIdentifyRequest. Timeout for the request (0 => no timeout) # noqa: E501 :param timeout: The timeout of this CheckIdentifyRequest. # noqa: E501 :type: int """ self._timeout = timeout @property def vendor_diff_warning(self): """Gets the vendor_diff_warning of this CheckIdentifyRequest. # noqa: E501 :return: The vendor_diff_warning of this CheckIdentifyRequest. # noqa: E501 :rtype: bool """ return self._vendor_diff_warning @vendor_diff_warning.setter def vendor_diff_warning(self, vendor_diff_warning): """Sets the vendor_diff_warning of this CheckIdentifyRequest. :param vendor_diff_warning: The vendor_diff_warning of this CheckIdentifyRequest. # noqa: E501 :type: bool """ self._vendor_diff_warning = vendor_diff_warning def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(CheckIdentifyRequest, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CheckIdentifyRequest): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, CheckIdentifyRequest): return True return self.to_dict() != other.to_dict()
0.577734
0.203193
import torch from torch import nn, optim from torchvision import datasets, transforms, models from torch.utils.data import DataLoader import sys if __name__ == '__main__': # 数据预处理,transforms用来处理数据,用来数据增强 transform = transforms.Compose([ transforms.RandomResizedCrop(224), # 对图像进行随机裁剪,然后重新调整大小为224*224 transforms.RandomRotation(20), # 随机选择角度,顺或者逆时针20读 transforms.RandomHorizontalFlip(p=0.5), # 50%的概率进行随机水平翻转 transforms.ToTensor() ]) # 读取数据 train_dataset = datasets.ImageFolder('image/train', transform) test_dataset = datasets.ImageFolder('image/test', transform) # 导入数据,设置批次和打乱 train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=8, shuffle=True) # print(train_dataset.classes) model = models.vgg16(pretrained=False) print(model) # 加这句话,不训练卷积层,只训练模型的全连接层,不更新权值 # 因为他已经把卷积层训练得非常好了,我们可以不用去训练它 # 如果没加这句话,那准确率会更高,但是训练时间更长 for p in model.parameters(): p.requires_grad = False # 构建新的全连接层,25088和上面model的输入一致,输出是自定义的2个类 model.classifier = nn.Sequential( nn.Linear(25088, 100), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(100, 2) ) # 定义代价函数 entropy_loss = nn.CrossEntropyLoss() # 定义优化器 opt = optim.Adam(model.parameters(), lr=0.0003) def train(): model.train() for i, data in enumerate(train_loader): # 获取数据和对应标签 inputs, labels = data # 获得预测结果 out = model(inputs) # 代价函数 loss = entropy_loss(out, labels) # 梯度清零 loss.backward() # 修改权值 opt.step() def test(): model.eval() correct = 0 for i, data in enumerate(test_loader): # 获得数据和对应的标签 inputs, labels = data # 获得模型预测结果 out = model(inputs) # 获得最大值,以及最大值所在的位置 _, predicted = torch.max(out, 1) # 预测正确的数量 correct += (predicted == labels).sum() print("Test acc: {0}".format(correct.item() / len(test_dataset))) correct = 0 for i, data in enumerate(train_loader): # 获得数据和对应的标签 inputs, labels = data # 获得模型预测结果 out = model(inputs) # 获得最大值,以及最大值所在的位置 _, predicted = torch.max(out, 1) # 预测正确的数量 correct += (predicted == labels).sum() print("Train acc: {0}".format(correct.item() / len(train_dataset))) for epoch in range(10): print('周期:', epoch) train() test() # 保存该模型 torch.save(model.state_dict(), 'cat_dog.pth')
PytorchLearning/project/cat_dog_classify.py
import torch from torch import nn, optim from torchvision import datasets, transforms, models from torch.utils.data import DataLoader import sys if __name__ == '__main__': # 数据预处理,transforms用来处理数据,用来数据增强 transform = transforms.Compose([ transforms.RandomResizedCrop(224), # 对图像进行随机裁剪,然后重新调整大小为224*224 transforms.RandomRotation(20), # 随机选择角度,顺或者逆时针20读 transforms.RandomHorizontalFlip(p=0.5), # 50%的概率进行随机水平翻转 transforms.ToTensor() ]) # 读取数据 train_dataset = datasets.ImageFolder('image/train', transform) test_dataset = datasets.ImageFolder('image/test', transform) # 导入数据,设置批次和打乱 train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=8, shuffle=True) # print(train_dataset.classes) model = models.vgg16(pretrained=False) print(model) # 加这句话,不训练卷积层,只训练模型的全连接层,不更新权值 # 因为他已经把卷积层训练得非常好了,我们可以不用去训练它 # 如果没加这句话,那准确率会更高,但是训练时间更长 for p in model.parameters(): p.requires_grad = False # 构建新的全连接层,25088和上面model的输入一致,输出是自定义的2个类 model.classifier = nn.Sequential( nn.Linear(25088, 100), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(100, 2) ) # 定义代价函数 entropy_loss = nn.CrossEntropyLoss() # 定义优化器 opt = optim.Adam(model.parameters(), lr=0.0003) def train(): model.train() for i, data in enumerate(train_loader): # 获取数据和对应标签 inputs, labels = data # 获得预测结果 out = model(inputs) # 代价函数 loss = entropy_loss(out, labels) # 梯度清零 loss.backward() # 修改权值 opt.step() def test(): model.eval() correct = 0 for i, data in enumerate(test_loader): # 获得数据和对应的标签 inputs, labels = data # 获得模型预测结果 out = model(inputs) # 获得最大值,以及最大值所在的位置 _, predicted = torch.max(out, 1) # 预测正确的数量 correct += (predicted == labels).sum() print("Test acc: {0}".format(correct.item() / len(test_dataset))) correct = 0 for i, data in enumerate(train_loader): # 获得数据和对应的标签 inputs, labels = data # 获得模型预测结果 out = model(inputs) # 获得最大值,以及最大值所在的位置 _, predicted = torch.max(out, 1) # 预测正确的数量 correct += (predicted == labels).sum() print("Train acc: {0}".format(correct.item() / len(train_dataset))) for epoch in range(10): print('周期:', epoch) train() test() # 保存该模型 torch.save(model.state_dict(), 'cat_dog.pth')
0.418816
0.540985
from django.db.models.fields import CharField, DateTimeField from django.db.models.fields.related import ForeignKey from django.test import TestCase from test_plus.test import TestCase as TestPlusTestCase from octopus.users.models import User from ..models import TimeStampedModel, Thing class TestTimeStampedModel(TestCase): def test_model_has_fields(self): self.assertTrue(Thing.created) self.assertTrue(Thing.modified) def test_fields_classes(self): field = Thing._meta.get_field('created') class_expected = DateTimeField class_given = field.__class__ self.assertEqual(class_expected, class_given) field = Thing._meta.get_field('modified') class_expected = DateTimeField class_given = field.__class__ self.assertEqual(class_expected, class_given) def test_proper_values_of_boolean_fields(self): field = Thing._meta.get_field('created') self.assertTrue(field.auto_now_add) field = Thing._meta.get_field('modified') self.assertTrue(field.auto_now) class TestThing(TestCase): def test_model_inheritance(self): inheritance_expected = TimeStampedModel inheritance_given = Thing.__base__ self.assertEqual(inheritance_expected, inheritance_given) def test_model_has_fields(self): self.assertTrue(Thing.text) self.assertTrue(Thing.created_by) def test_field_class(self): field = Thing._meta.get_field('text') class_expected = CharField class_given = field.__class__ self.assertEqual(class_expected, class_given) def test_field_max_length(self): max_length_expected = 500 field = Thing._meta.get_field('text') max_length_given = field.max_length self.assertEqual(max_length_expected, max_length_given) def test_model_has_field_created_by(self): self.assertTrue(Thing._meta.get_field('created_by')) def test_created_by_class(self): field = Thing._meta.get_field('created_by') class_expected = ForeignKey class_given = field.__class__ self.assertEqual(class_expected, class_given) def test_created_by_field_releted_to_user(self): field = Thing._meta.get_field('created_by') class_expected = User class_given = field.related_model self.assertEqual(class_expected, class_given) def test_str_method(self): user = User.objects.create(name='joe') thing = Thing.objects.create(text='test1', created_by=user) self.assertEqual(str(thing), 'test1') def test_ordering(self): self.assertEqual(Thing._meta.ordering, ['-created']) def test_get_absolut_url(self): user = User.objects.create(name='joe') thing = Thing.objects.create(text='test1', created_by=user) thing = Thing.objects.get(id=1) self.assertEqual(thing.get_absolut_url(), '/things/detail/1/')
octopus/things/tests/test_models.py
from django.db.models.fields import CharField, DateTimeField from django.db.models.fields.related import ForeignKey from django.test import TestCase from test_plus.test import TestCase as TestPlusTestCase from octopus.users.models import User from ..models import TimeStampedModel, Thing class TestTimeStampedModel(TestCase): def test_model_has_fields(self): self.assertTrue(Thing.created) self.assertTrue(Thing.modified) def test_fields_classes(self): field = Thing._meta.get_field('created') class_expected = DateTimeField class_given = field.__class__ self.assertEqual(class_expected, class_given) field = Thing._meta.get_field('modified') class_expected = DateTimeField class_given = field.__class__ self.assertEqual(class_expected, class_given) def test_proper_values_of_boolean_fields(self): field = Thing._meta.get_field('created') self.assertTrue(field.auto_now_add) field = Thing._meta.get_field('modified') self.assertTrue(field.auto_now) class TestThing(TestCase): def test_model_inheritance(self): inheritance_expected = TimeStampedModel inheritance_given = Thing.__base__ self.assertEqual(inheritance_expected, inheritance_given) def test_model_has_fields(self): self.assertTrue(Thing.text) self.assertTrue(Thing.created_by) def test_field_class(self): field = Thing._meta.get_field('text') class_expected = CharField class_given = field.__class__ self.assertEqual(class_expected, class_given) def test_field_max_length(self): max_length_expected = 500 field = Thing._meta.get_field('text') max_length_given = field.max_length self.assertEqual(max_length_expected, max_length_given) def test_model_has_field_created_by(self): self.assertTrue(Thing._meta.get_field('created_by')) def test_created_by_class(self): field = Thing._meta.get_field('created_by') class_expected = ForeignKey class_given = field.__class__ self.assertEqual(class_expected, class_given) def test_created_by_field_releted_to_user(self): field = Thing._meta.get_field('created_by') class_expected = User class_given = field.related_model self.assertEqual(class_expected, class_given) def test_str_method(self): user = User.objects.create(name='joe') thing = Thing.objects.create(text='test1', created_by=user) self.assertEqual(str(thing), 'test1') def test_ordering(self): self.assertEqual(Thing._meta.ordering, ['-created']) def test_get_absolut_url(self): user = User.objects.create(name='joe') thing = Thing.objects.create(text='test1', created_by=user) thing = Thing.objects.get(id=1) self.assertEqual(thing.get_absolut_url(), '/things/detail/1/')
0.628863
0.397938
import cv2 import numpy as np import tensorflow as tf from feature_extractor.feature_extractor import YouTube8MFeatureExtractor CAP_PROP_POS_MSEC = 0 def _int64_list_feature(int64_list): return tf.train.Feature(int64_list=tf.train.Int64List(value=int64_list)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _make_bytes(int_array): if bytes == str: # Python2 return ''.join(map(chr, int_array)) else: return bytes(int_array) class FeatureExtractor(object): def __init__(self, model_dir): self.yt8m_extractor = YouTube8MFeatureExtractor(model_dir) def extract_feature(self, vid_path, fps): rgb_features = [] for rgb in self.frame_iterator(vid_path, every_ms=1000.0/fps): features = self.yt8m_extractor.extract_rgb_frame_features(rgb[:, :, ::-1]) rgb_features.append(tf.train.Feature(float_list=tf.train.FloatList(value=features))) feature_list = { "rgb": tf.train.FeatureList(feature=rgb_features), } context_features = { "labels": _int64_list_feature( sorted([0, 1])), "data_path": _bytes_feature(_make_bytes( map(ord, vid_path))), } example = tf.train.SequenceExample( context=tf.train.Features(feature=context_features), feature_lists=tf.train.FeatureLists(feature_list=feature_list)) return np.array([example.SerializeToString()]) def frame_iterator(self, filename, every_ms=1000, max_num_frames=300): """Uses OpenCV to iterate over all frames of filename at a given frequency. Args: filename: Path to video file (e.g. mp4) every_ms: The duration (in milliseconds) to skip between frames. max_num_frames: Maximum number of frames to process, taken from the beginning of the video. Yields: RGB frame with shape (image height, image width, channels) """ video_capture = cv2.VideoCapture() if not video_capture.open(filename): return last_ts = -99999 # The timestamp of last retrieved frame. num_retrieved = 0 while num_retrieved < max_num_frames: while video_capture.get(CAP_PROP_POS_MSEC) < every_ms + last_ts: if not video_capture.read()[0]: return last_ts = video_capture.get(CAP_PROP_POS_MSEC) has_frames, frame = video_capture.read() if not has_frames: break yield frame num_retrieved += 1
feature_extractor/gif_feature_extractor.py
import cv2 import numpy as np import tensorflow as tf from feature_extractor.feature_extractor import YouTube8MFeatureExtractor CAP_PROP_POS_MSEC = 0 def _int64_list_feature(int64_list): return tf.train.Feature(int64_list=tf.train.Int64List(value=int64_list)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _make_bytes(int_array): if bytes == str: # Python2 return ''.join(map(chr, int_array)) else: return bytes(int_array) class FeatureExtractor(object): def __init__(self, model_dir): self.yt8m_extractor = YouTube8MFeatureExtractor(model_dir) def extract_feature(self, vid_path, fps): rgb_features = [] for rgb in self.frame_iterator(vid_path, every_ms=1000.0/fps): features = self.yt8m_extractor.extract_rgb_frame_features(rgb[:, :, ::-1]) rgb_features.append(tf.train.Feature(float_list=tf.train.FloatList(value=features))) feature_list = { "rgb": tf.train.FeatureList(feature=rgb_features), } context_features = { "labels": _int64_list_feature( sorted([0, 1])), "data_path": _bytes_feature(_make_bytes( map(ord, vid_path))), } example = tf.train.SequenceExample( context=tf.train.Features(feature=context_features), feature_lists=tf.train.FeatureLists(feature_list=feature_list)) return np.array([example.SerializeToString()]) def frame_iterator(self, filename, every_ms=1000, max_num_frames=300): """Uses OpenCV to iterate over all frames of filename at a given frequency. Args: filename: Path to video file (e.g. mp4) every_ms: The duration (in milliseconds) to skip between frames. max_num_frames: Maximum number of frames to process, taken from the beginning of the video. Yields: RGB frame with shape (image height, image width, channels) """ video_capture = cv2.VideoCapture() if not video_capture.open(filename): return last_ts = -99999 # The timestamp of last retrieved frame. num_retrieved = 0 while num_retrieved < max_num_frames: while video_capture.get(CAP_PROP_POS_MSEC) < every_ms + last_ts: if not video_capture.read()[0]: return last_ts = video_capture.get(CAP_PROP_POS_MSEC) has_frames, frame = video_capture.read() if not has_frames: break yield frame num_retrieved += 1
0.732209
0.209834
import tensorflow as tf def get_matched_features(features_a, features_b, sinkhorn_lambda, nr_sinkhorn_iter): n = features_a.get_shape().as_list()[1] fa_batch1, fa_batch2 = tf.split(features_a, 2, axis=0) fb_batch1, fb_batch2 = tf.split(features_b, 2, axis=0) # calculate all distances dist_a1_a2 = [] dist_b2_b1 = [] dist_a1_b1 = [] dist_a1_b2 = [] dist_a2_b1 = [] dist_a2_b2 = [] asq = 0.5 * tf.reduce_mean(tf.square(fa_batch1), axis=1, keep_dims=True) dist_a1_a2.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fa_batch2), axis=1), [1, -1]) - tf.matmul(fa_batch1, fa_batch2, transpose_b=True) / n) dist_a1_b1.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch1), axis=1), [1, -1]) - tf.matmul(fa_batch1, fb_batch1, transpose_b=True) / n) dist_a1_b2.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch2), axis=1), [1, -1]) - tf.matmul(fa_batch1, fb_batch2, transpose_b=True) / n) asq = 0.5 * tf.reduce_mean(tf.square(fa_batch2), axis=1, keep_dims=True) bsq = 0.5 * tf.reduce_mean(tf.square(fb_batch2), axis=1, keep_dims=True) dist_a2_b1.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch1), axis=1), [1, -1]) - tf.matmul(fa_batch2, fb_batch1, transpose_b=True) / n) dist_a2_b2.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch2), axis=1), [1, -1]) - tf.matmul(fa_batch2, fb_batch2, transpose_b=True) / n) dist_b2_b1.append( bsq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch1), axis=1), [1, -1]) - tf.matmul(fb_batch2, fb_batch1, transpose_b=True) / n) distances = [tf.concat(dist_a1_a2, 0), tf.concat(dist_b2_b1, 0), tf.concat(dist_a1_b1, 0), tf.concat(dist_a1_b2, 0), tf.concat(dist_a2_b1, 0), tf.concat(dist_a2_b2, 0)] # use Sinkhorn algorithm to do soft assignment assignments = [] entropy = [] for i in range(len(distances)): log_a = -sinkhorn_lambda * distances[i] for it in range(nr_sinkhorn_iter): log_a -= tf.reduce_logsumexp(log_a, axis=1, keep_dims=True) log_a -= tf.reduce_logsumexp(log_a, axis=0, keep_dims=True) assignments.append(tf.nn.softmax(log_a)) entropy.append( tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=assignments[-1], logits=log_a))) assignment_a1_a2, assignment_b2_b1, assignment_a1_b1, assignment_a1_b2, \ assignment_a2_b1, assignment_a2_b2 = assignments entropy = sum(entropy) / len(entropy) # get matched features features_a1_a2_matched = tf.matmul(assignment_a1_a2, fa_batch2) features_b1_b2_matched = tf.matmul(assignment_b2_b1, fb_batch2, transpose_a=True) features_a1_b1_matched = tf.matmul(assignment_a1_b1, fb_batch1) features_a1_b2_matched = tf.matmul(assignment_a1_b2, fb_batch2) features_a2_b1_matched = tf.matmul(assignment_a2_b1, fb_batch1) features_a2_b2_matched = tf.matmul(assignment_a2_b2, fb_batch2) features_a2_a1_matched = tf.matmul(assignment_a1_a2, fa_batch1, transpose_a=True) features_b2_b1_matched = tf.matmul(assignment_b2_b1, fb_batch1) features_b1_a1_matched = tf.matmul(assignment_a1_b1, fa_batch1, transpose_a=True) features_b2_a1_matched = tf.matmul(assignment_a1_b2, fa_batch1, transpose_a=True) features_b1_a2_matched = tf.matmul(assignment_a2_b1, fa_batch2, transpose_a=True) features_b2_a2_matched = tf.matmul(assignment_a2_b2, fa_batch2, transpose_a=True) features_a_a = tf.concat([features_a1_a2_matched, features_a2_a1_matched], axis=0) features_b_b = tf.concat([features_b1_b2_matched, features_b2_b1_matched], axis=0) features_a_b = tf.concat([features_a1_b1_matched, features_a2_b1_matched], axis=0) + \ tf.concat([features_a1_b2_matched, features_a2_b2_matched], axis=0) features_a_b = features_a_b * 0.5 features_b_a = tf.concat([features_b1_a1_matched, features_b2_a1_matched], axis=0) + \ tf.concat([features_b1_a2_matched, features_b2_a2_matched], axis=0) features_b_a = features_b_a * 0.5 return features_a_a, features_b_b, features_a_b, features_b_a, entropy def get_matched_features_single_batch(features_a, features_b, sinkhorn_lambda, nr_sinkhorn_iter, batch_size): """ simplified, more efficient, but slightly wrong, version of the original (two-batch) matching code """ ngpu = len(features_a) # batch_size = features_a[0].get_shape().as_list()[0] n = features_a[0].get_shape().as_list()[1] # gather all features fa_all = tf.concat(features_a, axis=0) fa_all_sq = 0.5 * tf.reshape(tf.reduce_mean(tf.square(fa_all), axis=1), [1, -1]) fb_all = tf.concat(features_b, axis=0) fb_all_sq = 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_all), axis=1), [1, -1]) # calculate all distances dist_a_a = [] dist_b_b = [] dist_a_b = [] for i in range(ngpu): with tf.device('/gpu:%d' % i): asq = 0.5 * tf.reduce_mean(tf.square(features_a[i]), axis=1, keep_dims=True) bsq = 0.5 * tf.reduce_mean(tf.square(features_b[i]), axis=1, keep_dims=True) dist_a_a.append(asq + fa_all_sq - tf.matmul(features_a[i], fa_all, transpose_b=True) / n) dist_b_b.append(bsq + fb_all_sq - tf.matmul(features_b[i], fb_all, transpose_b=True) / n) dist_a_b.append(asq + fb_all_sq - tf.matmul(features_a[i], fb_all, transpose_b=True) / n) # combine results + add a bit to the diagonal to prevent self-matches distances = [tf.concat(dist_a_a, 0) + 999. * tf.eye(batch_size), tf.concat(dist_b_b, 0) + 999. * tf.eye(batch_size), tf.concat(dist_a_b, 0)] # use Sinkhorn algorithm to do soft assignment assignments = [] entropy = [] for i in range(len(distances)): with tf.device('/gpu:%d' % (i % ngpu)): log_a = -sinkhorn_lambda * distances[i] for it in range(nr_sinkhorn_iter): log_a -= tf.reduce_logsumexp(log_a, axis=1, keep_dims=True) log_a -= tf.reduce_logsumexp(log_a, axis=0, keep_dims=True) assignments.append(tf.nn.softmax(log_a)) entropy.append( tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=assignments[-1], logits=log_a))) assignment_a_a, assignment_b_b, assignment_a_b = assignments entropy = sum(entropy) / len(entropy) # get matched features features_a_a = tf.split(tf.matmul(assignment_a_a, fa_all), ngpu, 0) features_b_b = tf.split(tf.matmul(assignment_b_b, fb_all), ngpu, 0) features_a_b = tf.split(tf.matmul(assignment_a_b, fb_all), ngpu, 0) features_b_a = tf.split(tf.matmul(assignment_a_b, fa_all, transpose_a=True), ngpu, 0) return features_a_a, features_b_b, features_a_b, features_b_a, entropy def calc_distance(features_a, features_b, matched_features): features_a_a, features_b_b, features_a_b, features_b_a, _ = matched_features nd_a_a = tf.reduce_mean(features_a * features_a_a) nd_b_b = tf.reduce_mean(features_b * features_b_b) nd_a_b = tf.reduce_mean(features_a * features_a_b) total_dist = nd_b_b + nd_a_a - 2. * nd_a_b total_dist = total_dist / (2.) return total_dist
toy_example/matching_cpu.py
import tensorflow as tf def get_matched_features(features_a, features_b, sinkhorn_lambda, nr_sinkhorn_iter): n = features_a.get_shape().as_list()[1] fa_batch1, fa_batch2 = tf.split(features_a, 2, axis=0) fb_batch1, fb_batch2 = tf.split(features_b, 2, axis=0) # calculate all distances dist_a1_a2 = [] dist_b2_b1 = [] dist_a1_b1 = [] dist_a1_b2 = [] dist_a2_b1 = [] dist_a2_b2 = [] asq = 0.5 * tf.reduce_mean(tf.square(fa_batch1), axis=1, keep_dims=True) dist_a1_a2.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fa_batch2), axis=1), [1, -1]) - tf.matmul(fa_batch1, fa_batch2, transpose_b=True) / n) dist_a1_b1.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch1), axis=1), [1, -1]) - tf.matmul(fa_batch1, fb_batch1, transpose_b=True) / n) dist_a1_b2.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch2), axis=1), [1, -1]) - tf.matmul(fa_batch1, fb_batch2, transpose_b=True) / n) asq = 0.5 * tf.reduce_mean(tf.square(fa_batch2), axis=1, keep_dims=True) bsq = 0.5 * tf.reduce_mean(tf.square(fb_batch2), axis=1, keep_dims=True) dist_a2_b1.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch1), axis=1), [1, -1]) - tf.matmul(fa_batch2, fb_batch1, transpose_b=True) / n) dist_a2_b2.append( asq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch2), axis=1), [1, -1]) - tf.matmul(fa_batch2, fb_batch2, transpose_b=True) / n) dist_b2_b1.append( bsq + 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_batch1), axis=1), [1, -1]) - tf.matmul(fb_batch2, fb_batch1, transpose_b=True) / n) distances = [tf.concat(dist_a1_a2, 0), tf.concat(dist_b2_b1, 0), tf.concat(dist_a1_b1, 0), tf.concat(dist_a1_b2, 0), tf.concat(dist_a2_b1, 0), tf.concat(dist_a2_b2, 0)] # use Sinkhorn algorithm to do soft assignment assignments = [] entropy = [] for i in range(len(distances)): log_a = -sinkhorn_lambda * distances[i] for it in range(nr_sinkhorn_iter): log_a -= tf.reduce_logsumexp(log_a, axis=1, keep_dims=True) log_a -= tf.reduce_logsumexp(log_a, axis=0, keep_dims=True) assignments.append(tf.nn.softmax(log_a)) entropy.append( tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=assignments[-1], logits=log_a))) assignment_a1_a2, assignment_b2_b1, assignment_a1_b1, assignment_a1_b2, \ assignment_a2_b1, assignment_a2_b2 = assignments entropy = sum(entropy) / len(entropy) # get matched features features_a1_a2_matched = tf.matmul(assignment_a1_a2, fa_batch2) features_b1_b2_matched = tf.matmul(assignment_b2_b1, fb_batch2, transpose_a=True) features_a1_b1_matched = tf.matmul(assignment_a1_b1, fb_batch1) features_a1_b2_matched = tf.matmul(assignment_a1_b2, fb_batch2) features_a2_b1_matched = tf.matmul(assignment_a2_b1, fb_batch1) features_a2_b2_matched = tf.matmul(assignment_a2_b2, fb_batch2) features_a2_a1_matched = tf.matmul(assignment_a1_a2, fa_batch1, transpose_a=True) features_b2_b1_matched = tf.matmul(assignment_b2_b1, fb_batch1) features_b1_a1_matched = tf.matmul(assignment_a1_b1, fa_batch1, transpose_a=True) features_b2_a1_matched = tf.matmul(assignment_a1_b2, fa_batch1, transpose_a=True) features_b1_a2_matched = tf.matmul(assignment_a2_b1, fa_batch2, transpose_a=True) features_b2_a2_matched = tf.matmul(assignment_a2_b2, fa_batch2, transpose_a=True) features_a_a = tf.concat([features_a1_a2_matched, features_a2_a1_matched], axis=0) features_b_b = tf.concat([features_b1_b2_matched, features_b2_b1_matched], axis=0) features_a_b = tf.concat([features_a1_b1_matched, features_a2_b1_matched], axis=0) + \ tf.concat([features_a1_b2_matched, features_a2_b2_matched], axis=0) features_a_b = features_a_b * 0.5 features_b_a = tf.concat([features_b1_a1_matched, features_b2_a1_matched], axis=0) + \ tf.concat([features_b1_a2_matched, features_b2_a2_matched], axis=0) features_b_a = features_b_a * 0.5 return features_a_a, features_b_b, features_a_b, features_b_a, entropy def get_matched_features_single_batch(features_a, features_b, sinkhorn_lambda, nr_sinkhorn_iter, batch_size): """ simplified, more efficient, but slightly wrong, version of the original (two-batch) matching code """ ngpu = len(features_a) # batch_size = features_a[0].get_shape().as_list()[0] n = features_a[0].get_shape().as_list()[1] # gather all features fa_all = tf.concat(features_a, axis=0) fa_all_sq = 0.5 * tf.reshape(tf.reduce_mean(tf.square(fa_all), axis=1), [1, -1]) fb_all = tf.concat(features_b, axis=0) fb_all_sq = 0.5 * tf.reshape(tf.reduce_mean(tf.square(fb_all), axis=1), [1, -1]) # calculate all distances dist_a_a = [] dist_b_b = [] dist_a_b = [] for i in range(ngpu): with tf.device('/gpu:%d' % i): asq = 0.5 * tf.reduce_mean(tf.square(features_a[i]), axis=1, keep_dims=True) bsq = 0.5 * tf.reduce_mean(tf.square(features_b[i]), axis=1, keep_dims=True) dist_a_a.append(asq + fa_all_sq - tf.matmul(features_a[i], fa_all, transpose_b=True) / n) dist_b_b.append(bsq + fb_all_sq - tf.matmul(features_b[i], fb_all, transpose_b=True) / n) dist_a_b.append(asq + fb_all_sq - tf.matmul(features_a[i], fb_all, transpose_b=True) / n) # combine results + add a bit to the diagonal to prevent self-matches distances = [tf.concat(dist_a_a, 0) + 999. * tf.eye(batch_size), tf.concat(dist_b_b, 0) + 999. * tf.eye(batch_size), tf.concat(dist_a_b, 0)] # use Sinkhorn algorithm to do soft assignment assignments = [] entropy = [] for i in range(len(distances)): with tf.device('/gpu:%d' % (i % ngpu)): log_a = -sinkhorn_lambda * distances[i] for it in range(nr_sinkhorn_iter): log_a -= tf.reduce_logsumexp(log_a, axis=1, keep_dims=True) log_a -= tf.reduce_logsumexp(log_a, axis=0, keep_dims=True) assignments.append(tf.nn.softmax(log_a)) entropy.append( tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=assignments[-1], logits=log_a))) assignment_a_a, assignment_b_b, assignment_a_b = assignments entropy = sum(entropy) / len(entropy) # get matched features features_a_a = tf.split(tf.matmul(assignment_a_a, fa_all), ngpu, 0) features_b_b = tf.split(tf.matmul(assignment_b_b, fb_all), ngpu, 0) features_a_b = tf.split(tf.matmul(assignment_a_b, fb_all), ngpu, 0) features_b_a = tf.split(tf.matmul(assignment_a_b, fa_all, transpose_a=True), ngpu, 0) return features_a_a, features_b_b, features_a_b, features_b_a, entropy def calc_distance(features_a, features_b, matched_features): features_a_a, features_b_b, features_a_b, features_b_a, _ = matched_features nd_a_a = tf.reduce_mean(features_a * features_a_a) nd_b_b = tf.reduce_mean(features_b * features_b_b) nd_a_b = tf.reduce_mean(features_a * features_a_b) total_dist = nd_b_b + nd_a_a - 2. * nd_a_b total_dist = total_dist / (2.) return total_dist
0.557123
0.452536
import param class HoloViewsFramePlotMethods(HoloViewsSeriesPlotMethods): by = param.String(default=None) def __call__(self, x=None, y=None, kind='line', ax=None, subplots=False, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=False, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds): converter = HoloViewsFrameConverter(self._data, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, colormap=colormap, table=table, yerr=yerr, xerr=xerr, secondary_y=secondary_y, **kwds) return converter(kind, x, y) def line(self, x=None, y=None, **kwds): """Line plot Parameters ---------- x, y : label or position, optional Coordinates for each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='line', x=x, y=y, **kwds) def bar(self, x=None, y=None, **kwds): """Vertical bar plot Parameters ---------- x, y : label or position, optional Coordinates for each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='bar', x=x, y=y, **kwds) def barh(self, x=None, y=None, **kwds): """Horizontal bar plot Parameters ---------- x, y : label or position, optional Coordinates for each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='barh', x=x, y=y, **kwds) def box(self, by=None, **kwds): """Boxplot Parameters ---------- by : string or sequence Column in the DataFrame to group by. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='box', by=by, **kwds) def hist(self, by=None, bins=10, **kwds): """Histogram Parameters ---------- by : string or sequence Column in the DataFrame to group by. bins: integer, default 10 Number of histogram bins to be used **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='hist', by=by, bins=bins, **kwds) def kde(self, **kwds): """Kernel Density Estimate plot Parameters ---------- **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='kde', **kwds) density = kde def area(self, x=None, y=None, stacked=True, **kwds): """Area plot Parameters ---------- x, y : label or position, optional Coordinates for each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='area', x=x, y=y, stacked=stacked, **kwds) def scatter(self, x, y, s=None, c=None, **kwds): """Scatter plot Parameters ---------- x, y : label or position, optional Coordinates for each point. s : scalar or array_like, optional Size of each point. c : label or position, optional Color of each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
pdhv/frame.py
import param class HoloViewsFramePlotMethods(HoloViewsSeriesPlotMethods): by = param.String(default=None) def __call__(self, x=None, y=None, kind='line', ax=None, subplots=False, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=False, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds): converter = HoloViewsFrameConverter(self._data, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, colormap=colormap, table=table, yerr=yerr, xerr=xerr, secondary_y=secondary_y, **kwds) return converter(kind, x, y) def line(self, x=None, y=None, **kwds): """Line plot Parameters ---------- x, y : label or position, optional Coordinates for each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='line', x=x, y=y, **kwds) def bar(self, x=None, y=None, **kwds): """Vertical bar plot Parameters ---------- x, y : label or position, optional Coordinates for each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='bar', x=x, y=y, **kwds) def barh(self, x=None, y=None, **kwds): """Horizontal bar plot Parameters ---------- x, y : label or position, optional Coordinates for each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='barh', x=x, y=y, **kwds) def box(self, by=None, **kwds): """Boxplot Parameters ---------- by : string or sequence Column in the DataFrame to group by. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='box', by=by, **kwds) def hist(self, by=None, bins=10, **kwds): """Histogram Parameters ---------- by : string or sequence Column in the DataFrame to group by. bins: integer, default 10 Number of histogram bins to be used **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='hist', by=by, bins=bins, **kwds) def kde(self, **kwds): """Kernel Density Estimate plot Parameters ---------- **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='kde', **kwds) density = kde def area(self, x=None, y=None, stacked=True, **kwds): """Area plot Parameters ---------- x, y : label or position, optional Coordinates for each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='area', x=x, y=y, stacked=stacked, **kwds) def scatter(self, x, y, s=None, c=None, **kwds): """Scatter plot Parameters ---------- x, y : label or position, optional Coordinates for each point. s : scalar or array_like, optional Size of each point. c : label or position, optional Color of each point. **kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them """ return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
0.907045
0.424352
from django.core.urlresolvers import resolve from django.http import HttpRequest from django.http import QueryDict from django.test import TestCase from django.test import Client from django.contrib.auth.models import User from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.conf.urls.static import static, settings import json from registrar.models import Course from registrar.models import Teacher from registrar.models import Student from registrar.models import Quiz from registrar.models import QuizSubmission from registrar.models import TrueFalseQuestion from registrar.models import TrueFalseSubmission from student.views import quiz TEST_USER_EMAIL = "<EMAIL>" TEST_USER_USERNAME = "Ledo" TEST_USER_PASSWORD = "<PASSWORD>" TEST_USER_EMAIL2 = "<EMAIL>" TEST_USER_USERNAME2 = "whalesquid" TEST_USER_PASSWORD2 = "<PASSWORD>" class QuizTestCase(TestCase): def tearDown(self): courses = Course.objects.all() for course in courses: course.delete() User.objects.all().delete() def setUp(self): # Create our Trudy student User.objects.create_user( email=TEST_USER_EMAIL2, username=TEST_USER_USERNAME2, password=<PASSWORD> ) user = User.objects.get(email=TEST_USER_EMAIL2) teacher = Teacher.objects.create(user=user) Student.objects.create(user=user).save() # Create our Student. User.objects.create_user( email=TEST_USER_EMAIL, username=TEST_USER_USERNAME, password=<PASSWORD> ) user = User.objects.get(email=TEST_USER_EMAIL) teacher = Teacher.objects.create(user=user) Student.objects.create(user=user).save() # Create a test course. Course.objects.create( id=1, title="Comics Book Course", sub_title="The definitive course on comics!", category="", teacher=teacher, ).save() course = Course.objects.get(id=1) if course is None: self.assertTrue(False) # Create our quiz Quiz.objects.create( quiz_id=1, quiz_num=1, title="Hideauze", description="Anime related assignment.", worth=25, course=course, ) quiz = Quiz.objects.get(quiz_id=1) # Create questions TrueFalseQuestion.objects.create( question_id=1, quiz=quiz, title="Hideauze", description="Where the Hideauze human?", true_choice="Yes, former humans", false_choice="No, aliens", answer=True, ) def get_logged_in_client(self): client = Client() client.login( username=TEST_USER_USERNAME, password=<PASSWORD> ) return client def test_url_resolves_to_quizzes_page_view(self): found = resolve('/course/1/quizzes') self.assertEqual(found.func, quiz.quizzes_page) def test_quizzes_page_with_no_submissions(self): client = self.get_logged_in_client() response = client.post('/course/1/quizzes') self.assertEqual(response.status_code, 200) self.assertIn(b'Comics Book Course',response.content) self.assertIn(b'view_quiz(1);',response.content) def test_url_resolves_to_quizzes_table_view(self): found = resolve('/course/1/quizzes_table') self.assertEqual(found.func, quiz.quizzes_table) def test_quizzes_table_returns_with_no_submissions(self): client = self.get_logged_in_client() response = client.post('/course/1/quizzes_table') self.assertEqual(response.status_code, 200) self.assertIn(b'view_quiz(1);',response.content) def test_url_resolves_to_delete_quiz(self): found = resolve('/course/1/quiz_delete') self.assertEqual(found.func, quiz.delete_quiz) def test_delete_quiz_with_no_submissions(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz_delete',{ 'quiz_id': 1, }, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['status'], 'failed') self.assertEqual(array['message'], 'record does not exist') def test_delete_quiz_with_submissions_and_correct_user(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz/1/submit_quiz',{}, **kwargs) self.assertEqual(response.status_code, 200) response = client.post('/course/1/quiz_delete',{ 'quiz_id': 1, }, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['status'], 'success') self.assertEqual(array['message'], 'deleted') def test_delete_quiz_with_submissions_and_incorrect_user(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz/1/submit_quiz',{}, **kwargs) self.assertEqual(response.status_code, 200) client.logout() client.login( username=TEST_USER_USERNAME2, password=<PASSWORD> ) response = client.post('/course/1/quiz_delete',{ 'quiz_id': 1, }, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['status'], 'failed') self.assertEqual(array['message'], 'record does not exist') def test_url_resolves_to_quiz_page_view(self): found = resolve('/course/1/quiz/1') self.assertEqual(found.func, quiz.quiz_page) def test_quiz_page(self): client = self.get_logged_in_client() response = client.post('/course/1/quiz/1') self.assertEqual(response.status_code, 200) self.assertIn(b'Quiz #1',response.content) def test_submit_tf_assignment_answer_with_submission(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz/1/submit_tf_quiz_answer',{ 'question_id': 1, 'answer': 'true', }, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'submitted') self.assertEqual(array['status'], 'success') def test_submit_quiz_without_answering_questions(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz/1/submit_quiz',{}, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'submitted') self.assertEqual(array['status'], 'success') def test_submit_quiz_with_answering_questions(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() client.post('/course/1/quiz/1/submit_tf_quiz_answer',{ 'question_id': 1, 'answer': 'true', }, **kwargs) response = client.post('/course/1/quiz/1/submit_quiz',{}, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'submitted') self.assertEqual(array['status'], 'success')
academicstoday_project/student/tests/test_quiz.py
from django.core.urlresolvers import resolve from django.http import HttpRequest from django.http import QueryDict from django.test import TestCase from django.test import Client from django.contrib.auth.models import User from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.conf.urls.static import static, settings import json from registrar.models import Course from registrar.models import Teacher from registrar.models import Student from registrar.models import Quiz from registrar.models import QuizSubmission from registrar.models import TrueFalseQuestion from registrar.models import TrueFalseSubmission from student.views import quiz TEST_USER_EMAIL = "<EMAIL>" TEST_USER_USERNAME = "Ledo" TEST_USER_PASSWORD = "<PASSWORD>" TEST_USER_EMAIL2 = "<EMAIL>" TEST_USER_USERNAME2 = "whalesquid" TEST_USER_PASSWORD2 = "<PASSWORD>" class QuizTestCase(TestCase): def tearDown(self): courses = Course.objects.all() for course in courses: course.delete() User.objects.all().delete() def setUp(self): # Create our Trudy student User.objects.create_user( email=TEST_USER_EMAIL2, username=TEST_USER_USERNAME2, password=<PASSWORD> ) user = User.objects.get(email=TEST_USER_EMAIL2) teacher = Teacher.objects.create(user=user) Student.objects.create(user=user).save() # Create our Student. User.objects.create_user( email=TEST_USER_EMAIL, username=TEST_USER_USERNAME, password=<PASSWORD> ) user = User.objects.get(email=TEST_USER_EMAIL) teacher = Teacher.objects.create(user=user) Student.objects.create(user=user).save() # Create a test course. Course.objects.create( id=1, title="Comics Book Course", sub_title="The definitive course on comics!", category="", teacher=teacher, ).save() course = Course.objects.get(id=1) if course is None: self.assertTrue(False) # Create our quiz Quiz.objects.create( quiz_id=1, quiz_num=1, title="Hideauze", description="Anime related assignment.", worth=25, course=course, ) quiz = Quiz.objects.get(quiz_id=1) # Create questions TrueFalseQuestion.objects.create( question_id=1, quiz=quiz, title="Hideauze", description="Where the Hideauze human?", true_choice="Yes, former humans", false_choice="No, aliens", answer=True, ) def get_logged_in_client(self): client = Client() client.login( username=TEST_USER_USERNAME, password=<PASSWORD> ) return client def test_url_resolves_to_quizzes_page_view(self): found = resolve('/course/1/quizzes') self.assertEqual(found.func, quiz.quizzes_page) def test_quizzes_page_with_no_submissions(self): client = self.get_logged_in_client() response = client.post('/course/1/quizzes') self.assertEqual(response.status_code, 200) self.assertIn(b'Comics Book Course',response.content) self.assertIn(b'view_quiz(1);',response.content) def test_url_resolves_to_quizzes_table_view(self): found = resolve('/course/1/quizzes_table') self.assertEqual(found.func, quiz.quizzes_table) def test_quizzes_table_returns_with_no_submissions(self): client = self.get_logged_in_client() response = client.post('/course/1/quizzes_table') self.assertEqual(response.status_code, 200) self.assertIn(b'view_quiz(1);',response.content) def test_url_resolves_to_delete_quiz(self): found = resolve('/course/1/quiz_delete') self.assertEqual(found.func, quiz.delete_quiz) def test_delete_quiz_with_no_submissions(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz_delete',{ 'quiz_id': 1, }, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['status'], 'failed') self.assertEqual(array['message'], 'record does not exist') def test_delete_quiz_with_submissions_and_correct_user(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz/1/submit_quiz',{}, **kwargs) self.assertEqual(response.status_code, 200) response = client.post('/course/1/quiz_delete',{ 'quiz_id': 1, }, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['status'], 'success') self.assertEqual(array['message'], 'deleted') def test_delete_quiz_with_submissions_and_incorrect_user(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz/1/submit_quiz',{}, **kwargs) self.assertEqual(response.status_code, 200) client.logout() client.login( username=TEST_USER_USERNAME2, password=<PASSWORD> ) response = client.post('/course/1/quiz_delete',{ 'quiz_id': 1, }, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['status'], 'failed') self.assertEqual(array['message'], 'record does not exist') def test_url_resolves_to_quiz_page_view(self): found = resolve('/course/1/quiz/1') self.assertEqual(found.func, quiz.quiz_page) def test_quiz_page(self): client = self.get_logged_in_client() response = client.post('/course/1/quiz/1') self.assertEqual(response.status_code, 200) self.assertIn(b'Quiz #1',response.content) def test_submit_tf_assignment_answer_with_submission(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz/1/submit_tf_quiz_answer',{ 'question_id': 1, 'answer': 'true', }, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'submitted') self.assertEqual(array['status'], 'success') def test_submit_quiz_without_answering_questions(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() response = client.post('/course/1/quiz/1/submit_quiz',{}, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'submitted') self.assertEqual(array['status'], 'success') def test_submit_quiz_with_answering_questions(self): kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'} client = self.get_logged_in_client() client.post('/course/1/quiz/1/submit_tf_quiz_answer',{ 'question_id': 1, 'answer': 'true', }, **kwargs) response = client.post('/course/1/quiz/1/submit_quiz',{}, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'submitted') self.assertEqual(array['status'], 'success')
0.405213
0.19477
import getpass import os import requests import shutil import sys import tempfile from besspin_testing import * from besspin_pexpect import * from besspin_sudo import call_with_sudo NIXOS_SUBSTITUTER = 'https://cache.nixos.org/' NIXOS_PUBLIC_KEY = '<KEY> BESSPIN_SUBSTITUTER = 'https://artifactory.galois.com/besspin_generic-nix/' BESSPIN_PUBLIC_KEY = '<KEY> def get_netrc_username(): from_env = os.environ.get('BESSPIN_TEST_NETRC_USERNAME') if from_env is not None: return from_env return input('Artifactory username: ') def get_netrc_password(): from_env = os.environ.get('BESSPIN_TEST_NETRC_PASSWORD') if from_env is not None: return from_env return getpass.getpass('Artifactory API key: ') def sudo_command(cmd, reason=None): '''Pass environment variables through to sudo when running the command''' p = expect_program(('sudo', '-E') + cmd) add_sudo_password_handler(p, reason=reason) p.expect(pexpect.EOF) p.check_wait() def install_nix(): if shutil.which('nix-shell'): return print('Installing nix-shell') # Download the script to a temporary file, then run it f = tempfile.NamedTemporaryFile() r = requests.get('https://nixos.org/nix/install') f.write(r.content) f.flush() def answer_prompt(msg, resp): p.expect_exact(msg) p.expect_exact('[y/n] ') p.sendline(resp) def handle_error(p): p.expect_exact('\r\n') p.expect(pexpect.EOF) status = p.wait() report_error(status) raise OSError('nix installation failed') def report_error(status): lprint(' >>> Nix setup returned an error (code %d)' % status) print('An error occurred during Nix installation (exit code %d)' % status) print('See %s for details' % LOG_FILE_NAME) p = expect_program(('/bin/sh', f.name, '--daemon')) add_sudo_password_handler(p, 'to install Nix system-wide') p.add_handler('oh no!', handle_error) answer_prompt('see a more detailed list', 'n') answer_prompt('Can we use sudo?', 'y') answer_prompt('Ready to continue?', 'y') p.expect_exact('Alright! We\'re done!') p.expect(pexpect.EOF) status = p.wait() if status != 0: report_error(status) f.close() def edit_nix_conf(path): if os.path.exists(path): with open(path) as f: lines = f.readlines() else: lines = [] new_lines = [] need_substituter = True need_public_key = True for l in lines: k, sep, v = l.partition('=') if sep != '=': new_lines.append(l) continue ks = k.strip() if not v.startswith(' '): v = ' ' + v if ks == 'substituters': if BESSPIN_SUBSTITUTER not in v: l = k + sep + ' ' + BESSPIN_SUBSTITUTER + v # Either the substituter was already present, or we just added it. need_substituter = False if ks == 'trusted-public-keys': if BESSPIN_PUBLIC_KEY not in v: l = k + sep + ' ' + BESSPIN_PUBLIC_KEY + v need_public_key = False new_lines.append(l) if need_substituter: new_lines.append('substituters = %s %s\n' % (BESSPIN_SUBSTITUTER, NIXOS_SUBSTITUTER)) if need_public_key: new_lines.append('trusted-public-keys = %s %s\n' % (BESSPIN_PUBLIC_KEY, NIXOS_PUBLIC_KEY)) with open(path, 'w') as f: f.write(''.join(new_lines)) def edit_netrc(path): if os.path.exists(path): with open(path) as f: lines = f.readlines() else: lines = [] for l in lines: words = l.strip().split() if words == ['machine', 'artifactory.galois.com']: # Entry is already present return username = get_netrc_username() password = <PASSWORD>() if len(lines) > 0: lines.append('\n') lines.extend([ 'machine artifactory.galois.com\n', 'login %s\n' % username, 'password %<PASSWORD>' % password, ]) prev_umask = os.umask(0o077) try: with open(path, 'w') as f: f.write(''.join(lines)) finally: os.umask(prev_umask) def configure_nix(): print('Setting up Nix configuration in /etc/nix...') if not os.path.isdir('/etc/nix'): sudo_command(('mkdir', '-p', '/etc/nix'), reason='to create /etc/nix directory') print('- Edit /etc/nix/nix.conf') call_with_sudo(edit_nix_conf, '/etc/nix/nix.conf') print('- Edit /etc/nix/netrc') call_with_sudo(edit_netrc, '/etc/nix/netrc') sudo_command(('systemctl', 'restart', 'nix-daemon.service'), reason='to restart the Nix daemon') if __name__ == '__main__': init_log_file_for_script(__file__) install_nix() configure_nix()
scripts/testing/install_nix.py
import getpass import os import requests import shutil import sys import tempfile from besspin_testing import * from besspin_pexpect import * from besspin_sudo import call_with_sudo NIXOS_SUBSTITUTER = 'https://cache.nixos.org/' NIXOS_PUBLIC_KEY = '<KEY> BESSPIN_SUBSTITUTER = 'https://artifactory.galois.com/besspin_generic-nix/' BESSPIN_PUBLIC_KEY = '<KEY> def get_netrc_username(): from_env = os.environ.get('BESSPIN_TEST_NETRC_USERNAME') if from_env is not None: return from_env return input('Artifactory username: ') def get_netrc_password(): from_env = os.environ.get('BESSPIN_TEST_NETRC_PASSWORD') if from_env is not None: return from_env return getpass.getpass('Artifactory API key: ') def sudo_command(cmd, reason=None): '''Pass environment variables through to sudo when running the command''' p = expect_program(('sudo', '-E') + cmd) add_sudo_password_handler(p, reason=reason) p.expect(pexpect.EOF) p.check_wait() def install_nix(): if shutil.which('nix-shell'): return print('Installing nix-shell') # Download the script to a temporary file, then run it f = tempfile.NamedTemporaryFile() r = requests.get('https://nixos.org/nix/install') f.write(r.content) f.flush() def answer_prompt(msg, resp): p.expect_exact(msg) p.expect_exact('[y/n] ') p.sendline(resp) def handle_error(p): p.expect_exact('\r\n') p.expect(pexpect.EOF) status = p.wait() report_error(status) raise OSError('nix installation failed') def report_error(status): lprint(' >>> Nix setup returned an error (code %d)' % status) print('An error occurred during Nix installation (exit code %d)' % status) print('See %s for details' % LOG_FILE_NAME) p = expect_program(('/bin/sh', f.name, '--daemon')) add_sudo_password_handler(p, 'to install Nix system-wide') p.add_handler('oh no!', handle_error) answer_prompt('see a more detailed list', 'n') answer_prompt('Can we use sudo?', 'y') answer_prompt('Ready to continue?', 'y') p.expect_exact('Alright! We\'re done!') p.expect(pexpect.EOF) status = p.wait() if status != 0: report_error(status) f.close() def edit_nix_conf(path): if os.path.exists(path): with open(path) as f: lines = f.readlines() else: lines = [] new_lines = [] need_substituter = True need_public_key = True for l in lines: k, sep, v = l.partition('=') if sep != '=': new_lines.append(l) continue ks = k.strip() if not v.startswith(' '): v = ' ' + v if ks == 'substituters': if BESSPIN_SUBSTITUTER not in v: l = k + sep + ' ' + BESSPIN_SUBSTITUTER + v # Either the substituter was already present, or we just added it. need_substituter = False if ks == 'trusted-public-keys': if BESSPIN_PUBLIC_KEY not in v: l = k + sep + ' ' + BESSPIN_PUBLIC_KEY + v need_public_key = False new_lines.append(l) if need_substituter: new_lines.append('substituters = %s %s\n' % (BESSPIN_SUBSTITUTER, NIXOS_SUBSTITUTER)) if need_public_key: new_lines.append('trusted-public-keys = %s %s\n' % (BESSPIN_PUBLIC_KEY, NIXOS_PUBLIC_KEY)) with open(path, 'w') as f: f.write(''.join(new_lines)) def edit_netrc(path): if os.path.exists(path): with open(path) as f: lines = f.readlines() else: lines = [] for l in lines: words = l.strip().split() if words == ['machine', 'artifactory.galois.com']: # Entry is already present return username = get_netrc_username() password = <PASSWORD>() if len(lines) > 0: lines.append('\n') lines.extend([ 'machine artifactory.galois.com\n', 'login %s\n' % username, 'password %<PASSWORD>' % password, ]) prev_umask = os.umask(0o077) try: with open(path, 'w') as f: f.write(''.join(lines)) finally: os.umask(prev_umask) def configure_nix(): print('Setting up Nix configuration in /etc/nix...') if not os.path.isdir('/etc/nix'): sudo_command(('mkdir', '-p', '/etc/nix'), reason='to create /etc/nix directory') print('- Edit /etc/nix/nix.conf') call_with_sudo(edit_nix_conf, '/etc/nix/nix.conf') print('- Edit /etc/nix/netrc') call_with_sudo(edit_netrc, '/etc/nix/netrc') sudo_command(('systemctl', 'restart', 'nix-daemon.service'), reason='to restart the Nix daemon') if __name__ == '__main__': init_log_file_for_script(__file__) install_nix() configure_nix()
0.183813
0.101189
import uuid from django.db import models from django.utils.text import slugify from django.utils.translation import ugettext_lazy as _ class PsvReview(models.Model): id = models.UUIDField(_("Review Id"), primary_key=True, default=uuid.uuid4, editable=False) author = models.ForeignKey("users.Review", verbose_name=_("user_reviews"), on_delete=models.CASCADE) psv = models.ForeignKey("psv.Psv", verbose_name=_("reviewed P.S.V"), on_delete=models.CASCADE) body = models.CharField(_("PSV Review Body"), blank=True, max_length=255) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class Meta: verbose_name = _("Review") verbose_name_plural = _("Reviews") def __str__(self): return slugify(f'{self.author.get_full_name_slug} {self.psv.plate_registration_no} review') def get_absolute_url(self): return reverse("review:detail", kwargs={"pk": self.pk}) def get_image_filename(instance, filename): psv_id = instance.review.psv author_id = instance.review.author return f"user/{author_id}/psv_images/{psv_id}" class ReviewImage(models.Model): id = models.UUIDField(_("Review Image Id"), primary_key=True, default=uuid.uuid4, editable=False) review = models.ForeignKey("reviews.psvReview", verbose_name=_("Review whose image is attached"), id=instance.post.id on_delete=models.CASCADE) psv_image = models.ImageField(_("Review Image"), upload_to=get_image_filename, height_field=None, width_field=None, max_length=100) class Meta: verbose_name = _("ReviewImage") verbose_name_plural = _("ReviewImages") def __str__(self): return self.id def get_absolute_url(self): return reverse("review_image:detail", kwargs={"pk": self.pk})
lipo/reviews/models.py
import uuid from django.db import models from django.utils.text import slugify from django.utils.translation import ugettext_lazy as _ class PsvReview(models.Model): id = models.UUIDField(_("Review Id"), primary_key=True, default=uuid.uuid4, editable=False) author = models.ForeignKey("users.Review", verbose_name=_("user_reviews"), on_delete=models.CASCADE) psv = models.ForeignKey("psv.Psv", verbose_name=_("reviewed P.S.V"), on_delete=models.CASCADE) body = models.CharField(_("PSV Review Body"), blank=True, max_length=255) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class Meta: verbose_name = _("Review") verbose_name_plural = _("Reviews") def __str__(self): return slugify(f'{self.author.get_full_name_slug} {self.psv.plate_registration_no} review') def get_absolute_url(self): return reverse("review:detail", kwargs={"pk": self.pk}) def get_image_filename(instance, filename): psv_id = instance.review.psv author_id = instance.review.author return f"user/{author_id}/psv_images/{psv_id}" class ReviewImage(models.Model): id = models.UUIDField(_("Review Image Id"), primary_key=True, default=uuid.uuid4, editable=False) review = models.ForeignKey("reviews.psvReview", verbose_name=_("Review whose image is attached"), id=instance.post.id on_delete=models.CASCADE) psv_image = models.ImageField(_("Review Image"), upload_to=get_image_filename, height_field=None, width_field=None, max_length=100) class Meta: verbose_name = _("ReviewImage") verbose_name_plural = _("ReviewImages") def __str__(self): return self.id def get_absolute_url(self): return reverse("review_image:detail", kwargs={"pk": self.pk})
0.432303
0.085518
from __future__ import annotations import dataclasses import enum from typing import Generator, Optional from ._utils import filter_none class PressureLevel(enum.Enum): """Memory pressure level.""" MODERATE = "moderate" CRITICAL = "critical" @dataclasses.dataclass class SamplingProfileNode: """Heap profile sample. Attributes ---------- size: float Size of the sampled allocation. total: float Total bytes attributed to this sample. stack: list[str] Execution stack at the point of allocation. """ size: float total: float stack: list[str] @classmethod def from_json(cls, json: dict) -> SamplingProfileNode: return cls(json["size"], json["total"], json["stack"]) def to_json(self) -> dict: return {"size": self.size, "total": self.total, "stack": self.stack} @dataclasses.dataclass class SamplingProfile: """Array of heap profile samples. Attributes ---------- samples: list[SamplingProfileNode] modules: list[Module] """ samples: list[SamplingProfileNode] modules: list[Module] @classmethod def from_json(cls, json: dict) -> SamplingProfile: return cls( [SamplingProfileNode.from_json(s) for s in json["samples"]], [Module.from_json(m) for m in json["modules"]], ) def to_json(self) -> dict: return { "samples": [s.to_json() for s in self.samples], "modules": [m.to_json() for m in self.modules], } @dataclasses.dataclass class Module: """Executable module information Attributes ---------- name: str Name of the module. uuid: str UUID of the module. baseAddress: str Base address where the module is loaded into memory. Encoded as a decimal or hexadecimal (0x prefixed) string. size: float Size of the module in bytes. """ name: str uuid: str baseAddress: str size: float @classmethod def from_json(cls, json: dict) -> Module: return cls(json["name"], json["uuid"], json["baseAddress"], json["size"]) def to_json(self) -> dict: return { "name": self.name, "uuid": self.uuid, "baseAddress": self.baseAddress, "size": self.size, } def get_dom_counters() -> Generator[dict, dict, dict]: """ Returns ------- documents: int nodes: int jsEventListeners: int """ response = yield {"method": "Memory.getDOMCounters", "params": {}} return { "documents": response["documents"], "nodes": response["nodes"], "jsEventListeners": response["jsEventListeners"], } def prepare_for_leak_detection() -> dict: """""" return {"method": "Memory.prepareForLeakDetection", "params": {}} def forcibly_purge_java_script_memory() -> dict: """Simulate OomIntervention by purging V8 memory.""" return {"method": "Memory.forciblyPurgeJavaScriptMemory", "params": {}} def set_pressure_notifications_suppressed(suppressed: bool) -> dict: """Enable/disable suppressing memory pressure notifications in all processes. Parameters ---------- suppressed: bool If true, memory pressure notifications will be suppressed. """ return { "method": "Memory.setPressureNotificationsSuppressed", "params": {"suppressed": suppressed}, } def simulate_pressure_notification(level: PressureLevel) -> dict: """Simulate a memory pressure notification in all processes. Parameters ---------- level: PressureLevel Memory pressure level of the notification. """ return { "method": "Memory.simulatePressureNotification", "params": {"level": level.value}, } def start_sampling( samplingInterval: Optional[int] = None, suppressRandomness: Optional[bool] = None ) -> dict: """Start collecting native memory profile. Parameters ---------- samplingInterval: Optional[int] Average number of bytes between samples. suppressRandomness: Optional[bool] Do not randomize intervals between samples. """ return { "method": "Memory.startSampling", "params": filter_none( { "samplingInterval": samplingInterval, "suppressRandomness": suppressRandomness, } ), } def stop_sampling() -> dict: """Stop collecting native memory profile.""" return {"method": "Memory.stopSampling", "params": {}} def get_all_time_sampling_profile() -> Generator[dict, dict, SamplingProfile]: """Retrieve native memory allocations profile collected since renderer process startup. Returns ------- profile: SamplingProfile """ response = yield {"method": "Memory.getAllTimeSamplingProfile", "params": {}} return SamplingProfile.from_json(response["profile"]) def get_browser_sampling_profile() -> Generator[dict, dict, SamplingProfile]: """Retrieve native memory allocations profile collected since browser process startup. Returns ------- profile: SamplingProfile """ response = yield {"method": "Memory.getBrowserSamplingProfile", "params": {}} return SamplingProfile.from_json(response["profile"]) def get_sampling_profile() -> Generator[dict, dict, SamplingProfile]: """Retrieve native memory allocations profile collected since last `startSampling` call. Returns ------- profile: SamplingProfile """ response = yield {"method": "Memory.getSamplingProfile", "params": {}} return SamplingProfile.from_json(response["profile"])
cdpy/cdp/memory.py
from __future__ import annotations import dataclasses import enum from typing import Generator, Optional from ._utils import filter_none class PressureLevel(enum.Enum): """Memory pressure level.""" MODERATE = "moderate" CRITICAL = "critical" @dataclasses.dataclass class SamplingProfileNode: """Heap profile sample. Attributes ---------- size: float Size of the sampled allocation. total: float Total bytes attributed to this sample. stack: list[str] Execution stack at the point of allocation. """ size: float total: float stack: list[str] @classmethod def from_json(cls, json: dict) -> SamplingProfileNode: return cls(json["size"], json["total"], json["stack"]) def to_json(self) -> dict: return {"size": self.size, "total": self.total, "stack": self.stack} @dataclasses.dataclass class SamplingProfile: """Array of heap profile samples. Attributes ---------- samples: list[SamplingProfileNode] modules: list[Module] """ samples: list[SamplingProfileNode] modules: list[Module] @classmethod def from_json(cls, json: dict) -> SamplingProfile: return cls( [SamplingProfileNode.from_json(s) for s in json["samples"]], [Module.from_json(m) for m in json["modules"]], ) def to_json(self) -> dict: return { "samples": [s.to_json() for s in self.samples], "modules": [m.to_json() for m in self.modules], } @dataclasses.dataclass class Module: """Executable module information Attributes ---------- name: str Name of the module. uuid: str UUID of the module. baseAddress: str Base address where the module is loaded into memory. Encoded as a decimal or hexadecimal (0x prefixed) string. size: float Size of the module in bytes. """ name: str uuid: str baseAddress: str size: float @classmethod def from_json(cls, json: dict) -> Module: return cls(json["name"], json["uuid"], json["baseAddress"], json["size"]) def to_json(self) -> dict: return { "name": self.name, "uuid": self.uuid, "baseAddress": self.baseAddress, "size": self.size, } def get_dom_counters() -> Generator[dict, dict, dict]: """ Returns ------- documents: int nodes: int jsEventListeners: int """ response = yield {"method": "Memory.getDOMCounters", "params": {}} return { "documents": response["documents"], "nodes": response["nodes"], "jsEventListeners": response["jsEventListeners"], } def prepare_for_leak_detection() -> dict: """""" return {"method": "Memory.prepareForLeakDetection", "params": {}} def forcibly_purge_java_script_memory() -> dict: """Simulate OomIntervention by purging V8 memory.""" return {"method": "Memory.forciblyPurgeJavaScriptMemory", "params": {}} def set_pressure_notifications_suppressed(suppressed: bool) -> dict: """Enable/disable suppressing memory pressure notifications in all processes. Parameters ---------- suppressed: bool If true, memory pressure notifications will be suppressed. """ return { "method": "Memory.setPressureNotificationsSuppressed", "params": {"suppressed": suppressed}, } def simulate_pressure_notification(level: PressureLevel) -> dict: """Simulate a memory pressure notification in all processes. Parameters ---------- level: PressureLevel Memory pressure level of the notification. """ return { "method": "Memory.simulatePressureNotification", "params": {"level": level.value}, } def start_sampling( samplingInterval: Optional[int] = None, suppressRandomness: Optional[bool] = None ) -> dict: """Start collecting native memory profile. Parameters ---------- samplingInterval: Optional[int] Average number of bytes between samples. suppressRandomness: Optional[bool] Do not randomize intervals between samples. """ return { "method": "Memory.startSampling", "params": filter_none( { "samplingInterval": samplingInterval, "suppressRandomness": suppressRandomness, } ), } def stop_sampling() -> dict: """Stop collecting native memory profile.""" return {"method": "Memory.stopSampling", "params": {}} def get_all_time_sampling_profile() -> Generator[dict, dict, SamplingProfile]: """Retrieve native memory allocations profile collected since renderer process startup. Returns ------- profile: SamplingProfile """ response = yield {"method": "Memory.getAllTimeSamplingProfile", "params": {}} return SamplingProfile.from_json(response["profile"]) def get_browser_sampling_profile() -> Generator[dict, dict, SamplingProfile]: """Retrieve native memory allocations profile collected since browser process startup. Returns ------- profile: SamplingProfile """ response = yield {"method": "Memory.getBrowserSamplingProfile", "params": {}} return SamplingProfile.from_json(response["profile"]) def get_sampling_profile() -> Generator[dict, dict, SamplingProfile]: """Retrieve native memory allocations profile collected since last `startSampling` call. Returns ------- profile: SamplingProfile """ response = yield {"method": "Memory.getSamplingProfile", "params": {}} return SamplingProfile.from_json(response["profile"])
0.965763
0.465205
from SLIX.toolbox import * class TestToolbox: def test_all_peaks(self): # Create an absolute simple peak array arr = numpy.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]) real_peaks = numpy.argwhere(arr == 1).flatten() toolbox_peaks = all_peaks(arr, cut_edges=False) assert numpy.all(toolbox_peaks == real_peaks) # cut_edges should remove the peak position 1 toolbox_peaks = all_peaks(arr) assert numpy.all(toolbox_peaks == real_peaks[1:]) def test_peak_positions(self): # Create an absolute simple peak array arr = numpy.array([0, 1, 0, 0.07, 0, 1, 0, 0.07, 0, 1, 0]) # Test if high and low prominence separation is working as intended high_peaks = numpy.argwhere(arr == 1).flatten() low_peaks = numpy.argwhere(arr == 0.07).flatten() toolbox_peaks = all_peaks(arr, cut_edges=False) toolbox_high_peaks = accurate_peak_positions(toolbox_peaks, arr, centroid_calculation=False) toolbox_low_peaks = accurate_peak_positions(toolbox_peaks, arr, low_prominence=0, high_prominence=TARGET_PROMINENCE, centroid_calculation=False) assert numpy.all(high_peaks == toolbox_high_peaks) assert numpy.all(low_peaks == toolbox_low_peaks) def test_peakdistance(self): test_arr = numpy.array([0, 0, 1, 0, 0, 0, 0, 1, 0] + [0] * 15) expected_distance = 75 toolbox_peaks = all_peaks(test_arr, cut_edges=False) toolbox_distance = peakdistance(toolbox_peaks, 24) assert toolbox_distance == expected_distance def test_prominence(self): # Create an absolute simple peak array test_arr = numpy.array([0, 1, 0, 0.1, 0, 1, 0, 0.1, 0, 1, 0]) comparison = normalize(test_arr, kind_of_normalization=1) toolbox_peaks = all_peaks(test_arr, cut_edges=False) toolbox_prominence = prominence(toolbox_peaks, test_arr,) assert numpy.isclose(toolbox_prominence, numpy.mean(comparison[comparison > 0])) def test_peakwidth(self): test_arr = numpy.array([0, 0.5, 1, 0.5, 0] + [0] * 19) expected_width = 30 toolbox_peaks = all_peaks(test_arr, cut_edges=False) toolbox_width = peakwidth(toolbox_peaks, test_arr, 24) assert toolbox_width == expected_width def test_crossing_direction(self): # Test for one direction with 180°+-35° distance two_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) expected_direction = numpy.array([135, BACKGROUND_COLOR, BACKGROUND_COLOR]) peaks = all_peaks(two_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, two_peak_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(two_peak_arr)) assert numpy.all(expected_direction == toolbox_direction) # Test for two directions with 180°+-35° distance four_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]) expected_direction = numpy.array([135, 60, BACKGROUND_COLOR]) peaks = all_peaks(four_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, four_peak_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(two_peak_arr)) assert numpy.all(expected_direction == toolbox_direction) # Test for three directions with 180°+-35° distance six_peak_arr = numpy.array([0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0]) expected_direction = numpy.array([135, 105, 60]) peaks = all_peaks(six_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, six_peak_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(two_peak_arr)) assert numpy.all(expected_direction == toolbox_direction) # Test for angle outside of 180°+-35° distance error_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]) expected_direction = numpy.array([82.5, BACKGROUND_COLOR, BACKGROUND_COLOR]) peaks = all_peaks(error_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, error_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(error_arr)) assert numpy.all(expected_direction == toolbox_direction) error_arr = numpy.array([0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]) expected_direction = numpy.array([BACKGROUND_COLOR, BACKGROUND_COLOR, 60]) peaks = all_peaks(error_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, error_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(error_arr)) assert numpy.all(expected_direction == toolbox_direction) def test_non_crossing_direction(self): # Test for one peak one_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) expected_direction = 45 peaks = all_peaks(one_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, one_peak_arr, centroid_calculation=False) toolbox_direction = non_crossing_direction(high_peaks, len(one_peak_arr)) assert expected_direction == toolbox_direction # Test for two peaks two_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) expected_direction = 135 peaks = all_peaks(two_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, two_peak_arr, centroid_calculation=False) toolbox_direction = non_crossing_direction(high_peaks, len(two_peak_arr)) assert expected_direction == toolbox_direction # Test for four peaks four_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]) expected_direction = BACKGROUND_COLOR peaks = all_peaks(four_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, four_peak_arr, centroid_calculation=False) toolbox_direction = non_crossing_direction(high_peaks, len(two_peak_arr)) assert expected_direction == toolbox_direction def test_centroid_correction(self): # simple test case: one distinct peak test_array = numpy.array([0] * 9 + [1] + [0] * 14) test_high_peaks = numpy.array([9]) expected_centroid = numpy.array([9]) toolbox_centroid = centroid_correction(test_array, test_high_peaks) assert expected_centroid == toolbox_centroid # simple test case: one distinct peak test_array = numpy.array([0] * 8 + [0.5, 1, 0.5] + [0] * 13) test_high_peaks = numpy.array([9]) expected_centroid = numpy.array([9]) toolbox_centroid = centroid_correction(test_array, test_high_peaks) assert expected_centroid == toolbox_centroid # simple test case: centroid is between two measurements test_array = numpy.array([0] * 8 + [1, 1] + [0] * 14) test_high_peaks = numpy.array([8]) expected_centroid = numpy.array([8.5]) toolbox_centroid = centroid_correction(test_array, test_high_peaks) assert expected_centroid == toolbox_centroid # more complicated test case: wide peak plateau test_array = numpy.array([0] * 8 + [1, 1, 1] + [0] * 13) test_high_peaks = numpy.array([8]) expected_centroid = numpy.array([9]) toolbox_centroid = centroid_correction(test_array, test_high_peaks) assert numpy.isclose(expected_centroid, toolbox_centroid, 1e-2, 1e-2) def test_create_background_mask(self): test_array = (numpy.random.random(10000) * 256).astype('int') expected_results = test_array < 10 toolbox_mask = create_background_mask(test_array[..., numpy.newaxis]) assert numpy.all(expected_results == toolbox_mask) def test_normalize(self): test_array = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=numpy.float) # Normalization kind == 0 -> Scale to 0..1 expected_array = test_array / test_array.max() normalized_array = normalize(test_array) assert numpy.all(numpy.isclose(expected_array, normalized_array)) # Normalization kind == 1 -> Divide by mean value of array expected_array = test_array / test_array.mean() normalized_array = normalize(test_array, kind_of_normalization=1) assert numpy.all(numpy.isclose(expected_array, normalized_array)) def test_reshape_array_to_image(self): test_array = numpy.array([i for i in range(0, 100)]) # Test reshape for no roi size toolbox_image = reshape_array_to_image(test_array, 10, 1) assert toolbox_image.shape == (10, 10) # test if content of array is as expected for i in range(0, 10): for j in range(0, 10): assert toolbox_image[i, j] == test_array[i * 10 + j] # Test reshape for roi size of two toolbox_image = reshape_array_to_image(test_array, 10, 2) assert toolbox_image.shape == (5, 20) for i in range(0, 5): for j in range(0, 20): assert toolbox_image[i, j] == test_array[i * 20 + j]
tests/test_toolbox.py
from SLIX.toolbox import * class TestToolbox: def test_all_peaks(self): # Create an absolute simple peak array arr = numpy.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]) real_peaks = numpy.argwhere(arr == 1).flatten() toolbox_peaks = all_peaks(arr, cut_edges=False) assert numpy.all(toolbox_peaks == real_peaks) # cut_edges should remove the peak position 1 toolbox_peaks = all_peaks(arr) assert numpy.all(toolbox_peaks == real_peaks[1:]) def test_peak_positions(self): # Create an absolute simple peak array arr = numpy.array([0, 1, 0, 0.07, 0, 1, 0, 0.07, 0, 1, 0]) # Test if high and low prominence separation is working as intended high_peaks = numpy.argwhere(arr == 1).flatten() low_peaks = numpy.argwhere(arr == 0.07).flatten() toolbox_peaks = all_peaks(arr, cut_edges=False) toolbox_high_peaks = accurate_peak_positions(toolbox_peaks, arr, centroid_calculation=False) toolbox_low_peaks = accurate_peak_positions(toolbox_peaks, arr, low_prominence=0, high_prominence=TARGET_PROMINENCE, centroid_calculation=False) assert numpy.all(high_peaks == toolbox_high_peaks) assert numpy.all(low_peaks == toolbox_low_peaks) def test_peakdistance(self): test_arr = numpy.array([0, 0, 1, 0, 0, 0, 0, 1, 0] + [0] * 15) expected_distance = 75 toolbox_peaks = all_peaks(test_arr, cut_edges=False) toolbox_distance = peakdistance(toolbox_peaks, 24) assert toolbox_distance == expected_distance def test_prominence(self): # Create an absolute simple peak array test_arr = numpy.array([0, 1, 0, 0.1, 0, 1, 0, 0.1, 0, 1, 0]) comparison = normalize(test_arr, kind_of_normalization=1) toolbox_peaks = all_peaks(test_arr, cut_edges=False) toolbox_prominence = prominence(toolbox_peaks, test_arr,) assert numpy.isclose(toolbox_prominence, numpy.mean(comparison[comparison > 0])) def test_peakwidth(self): test_arr = numpy.array([0, 0.5, 1, 0.5, 0] + [0] * 19) expected_width = 30 toolbox_peaks = all_peaks(test_arr, cut_edges=False) toolbox_width = peakwidth(toolbox_peaks, test_arr, 24) assert toolbox_width == expected_width def test_crossing_direction(self): # Test for one direction with 180°+-35° distance two_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) expected_direction = numpy.array([135, BACKGROUND_COLOR, BACKGROUND_COLOR]) peaks = all_peaks(two_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, two_peak_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(two_peak_arr)) assert numpy.all(expected_direction == toolbox_direction) # Test for two directions with 180°+-35° distance four_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]) expected_direction = numpy.array([135, 60, BACKGROUND_COLOR]) peaks = all_peaks(four_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, four_peak_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(two_peak_arr)) assert numpy.all(expected_direction == toolbox_direction) # Test for three directions with 180°+-35° distance six_peak_arr = numpy.array([0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0]) expected_direction = numpy.array([135, 105, 60]) peaks = all_peaks(six_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, six_peak_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(two_peak_arr)) assert numpy.all(expected_direction == toolbox_direction) # Test for angle outside of 180°+-35° distance error_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]) expected_direction = numpy.array([82.5, BACKGROUND_COLOR, BACKGROUND_COLOR]) peaks = all_peaks(error_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, error_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(error_arr)) assert numpy.all(expected_direction == toolbox_direction) error_arr = numpy.array([0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]) expected_direction = numpy.array([BACKGROUND_COLOR, BACKGROUND_COLOR, 60]) peaks = all_peaks(error_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, error_arr, centroid_calculation=False) toolbox_direction = crossing_direction(high_peaks, len(error_arr)) assert numpy.all(expected_direction == toolbox_direction) def test_non_crossing_direction(self): # Test for one peak one_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) expected_direction = 45 peaks = all_peaks(one_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, one_peak_arr, centroid_calculation=False) toolbox_direction = non_crossing_direction(high_peaks, len(one_peak_arr)) assert expected_direction == toolbox_direction # Test for two peaks two_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) expected_direction = 135 peaks = all_peaks(two_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, two_peak_arr, centroid_calculation=False) toolbox_direction = non_crossing_direction(high_peaks, len(two_peak_arr)) assert expected_direction == toolbox_direction # Test for four peaks four_peak_arr = numpy.array([0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]) expected_direction = BACKGROUND_COLOR peaks = all_peaks(four_peak_arr, cut_edges=False) high_peaks = accurate_peak_positions(peaks, four_peak_arr, centroid_calculation=False) toolbox_direction = non_crossing_direction(high_peaks, len(two_peak_arr)) assert expected_direction == toolbox_direction def test_centroid_correction(self): # simple test case: one distinct peak test_array = numpy.array([0] * 9 + [1] + [0] * 14) test_high_peaks = numpy.array([9]) expected_centroid = numpy.array([9]) toolbox_centroid = centroid_correction(test_array, test_high_peaks) assert expected_centroid == toolbox_centroid # simple test case: one distinct peak test_array = numpy.array([0] * 8 + [0.5, 1, 0.5] + [0] * 13) test_high_peaks = numpy.array([9]) expected_centroid = numpy.array([9]) toolbox_centroid = centroid_correction(test_array, test_high_peaks) assert expected_centroid == toolbox_centroid # simple test case: centroid is between two measurements test_array = numpy.array([0] * 8 + [1, 1] + [0] * 14) test_high_peaks = numpy.array([8]) expected_centroid = numpy.array([8.5]) toolbox_centroid = centroid_correction(test_array, test_high_peaks) assert expected_centroid == toolbox_centroid # more complicated test case: wide peak plateau test_array = numpy.array([0] * 8 + [1, 1, 1] + [0] * 13) test_high_peaks = numpy.array([8]) expected_centroid = numpy.array([9]) toolbox_centroid = centroid_correction(test_array, test_high_peaks) assert numpy.isclose(expected_centroid, toolbox_centroid, 1e-2, 1e-2) def test_create_background_mask(self): test_array = (numpy.random.random(10000) * 256).astype('int') expected_results = test_array < 10 toolbox_mask = create_background_mask(test_array[..., numpy.newaxis]) assert numpy.all(expected_results == toolbox_mask) def test_normalize(self): test_array = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=numpy.float) # Normalization kind == 0 -> Scale to 0..1 expected_array = test_array / test_array.max() normalized_array = normalize(test_array) assert numpy.all(numpy.isclose(expected_array, normalized_array)) # Normalization kind == 1 -> Divide by mean value of array expected_array = test_array / test_array.mean() normalized_array = normalize(test_array, kind_of_normalization=1) assert numpy.all(numpy.isclose(expected_array, normalized_array)) def test_reshape_array_to_image(self): test_array = numpy.array([i for i in range(0, 100)]) # Test reshape for no roi size toolbox_image = reshape_array_to_image(test_array, 10, 1) assert toolbox_image.shape == (10, 10) # test if content of array is as expected for i in range(0, 10): for j in range(0, 10): assert toolbox_image[i, j] == test_array[i * 10 + j] # Test reshape for roi size of two toolbox_image = reshape_array_to_image(test_array, 10, 2) assert toolbox_image.shape == (5, 20) for i in range(0, 5): for j in range(0, 20): assert toolbox_image[i, j] == test_array[i * 20 + j]
0.695855
0.771198
import re from .game import Game class Commands: def __init__(self, data): self.game = Game() self.data = data self.size = len(data) def process(self): command = self.__command() if command: self.__run_command(command) else: self.__help() def __command(self): command = None if self.size > 1: command = self.data[1] return command def __run_command(self, command): if command in ["solve", "s"]: self.__solve_command() elif command in ["help", "h"]: self.__help() else: self.__help() def __solve_command(self): name = self.__param("name=", "([a-zA-Z]+)") if name: self.game.process(name) else: print("Error!: A name is required to run the project") self.__help() def __param(self, param, pattern): """ Returns the queried param using regex Parameters ---------- param : str variable name to search pattern : str variable pattern equivalent Return ------ value : str the param value if a match occurs None otherwise """ value = None if self.size > 2: data_str = ';'.join(self.data[2:]) value = re.search(param + pattern, data_str) if value: value = value.group(1).replace(param, '') if value.group(1) else None return value def __help(self): """ Shows help for the app Return ------ None """ print("Senku solver - A simple Senku solver") print() print("Usage:") print(" python main.py [command]") print("or if you want to take times") print(" time python main.py [command]") print() print("Available Commands:") print(" [solve | s] <name=> solve the senku game using the given algorithm") print(" [help | h] help about commands") print() print("") print("Examples:") print(" BFS Algorithm: Breadth First Search") print(" python main.py solve name=bfs") print() print(" DFS Algorithm: Depth First Search") print(" python main.py solve name=dfs") print() print(" Greedy Algorithm") print(" python main.py solve name=greedy") print() print(" A*: A Star") print(" python main.py solve name=astar")
src/commands.py
import re from .game import Game class Commands: def __init__(self, data): self.game = Game() self.data = data self.size = len(data) def process(self): command = self.__command() if command: self.__run_command(command) else: self.__help() def __command(self): command = None if self.size > 1: command = self.data[1] return command def __run_command(self, command): if command in ["solve", "s"]: self.__solve_command() elif command in ["help", "h"]: self.__help() else: self.__help() def __solve_command(self): name = self.__param("name=", "([a-zA-Z]+)") if name: self.game.process(name) else: print("Error!: A name is required to run the project") self.__help() def __param(self, param, pattern): """ Returns the queried param using regex Parameters ---------- param : str variable name to search pattern : str variable pattern equivalent Return ------ value : str the param value if a match occurs None otherwise """ value = None if self.size > 2: data_str = ';'.join(self.data[2:]) value = re.search(param + pattern, data_str) if value: value = value.group(1).replace(param, '') if value.group(1) else None return value def __help(self): """ Shows help for the app Return ------ None """ print("Senku solver - A simple Senku solver") print() print("Usage:") print(" python main.py [command]") print("or if you want to take times") print(" time python main.py [command]") print() print("Available Commands:") print(" [solve | s] <name=> solve the senku game using the given algorithm") print(" [help | h] help about commands") print() print("") print("Examples:") print(" BFS Algorithm: Breadth First Search") print(" python main.py solve name=bfs") print() print(" DFS Algorithm: Depth First Search") print(" python main.py solve name=dfs") print() print(" Greedy Algorithm") print(" python main.py solve name=greedy") print() print(" A*: A Star") print(" python main.py solve name=astar")
0.489259
0.154408
from typing import Any, Iterable, Tuple, TypeVar, Generic, Callable, Union import numpy as np class SetGet: """ Provides a useful set and get interface """ def set(self, **kwargs): """ Set attributes:: obj.set(attr1='value', attr2=35, attr3=True) :param kwargs: ``name=value`` pairs of attributes to set :return: self """ for key in kwargs: setattr(self, key, kwargs[key]) return self def get(self, *args) -> Iterable[Any]: """ Get a number of attributes:: obj.get('attr1', 'attr2', 'attr3') :param args: a number of attribute names to return :return: An iterable containing the attributes """ return (getattr(self, key) for key in args) def construct_component_inds(axis: int, n_dims: int, n_components: int, min_ndims: int=2) -> Tuple[Tuple]: """ Construct a tuple of tuples, where each element extracts the correct component values. :param axis: :param n_dims: :param n_components: :param min_ndims: :return: """ # noinspection PyTypeChecker return tuple( tuple(slice(i, i+1) if dim == axis else (slice(None) if dim < n_dims else np.newaxis) for dim in range(max(n_dims, min_ndims))) for i in range(n_components)) def get_matching_axis(shape: Tuple, length: int) -> int: """ Infers the correct axis to use :param shape: the shape of the input :param length: the desired length of the axis :return: the correct axis. If multiple axes match, then it returns the last one. """ # noinspection PyUnresolvedReferences axis_candidates = np.nonzero(np.array(shape) == length)[0] if len(axis_candidates) == 0: raise ValueError('Unable to infer axis tue to shape mismatch: ' '{} =/= {}.'.format(shape, length)) return axis_candidates[-1] A = TypeVar('A') T = TypeVar('T') GetMethod = Callable[..., A] SetMethod = Callable[[T, A], None] DelMethod = Callable[[T], None] # noinspection PyPep8Naming class lazy_property(Generic[A]): """ A property-like descriptor that does not bind to a function, but to the name of the function. That way subclasses can easily override the getter/setter/ delete """ def __init__(self, getter_method: GetMethod=None, setter_method: SetMethod=None, deleter_method: DelMethod=None, doc=None): self.getter_method = getter_method self.setter_method = setter_method self.deleter_method = deleter_method if doc is None: if getter_method.__doc__ is not None: doc = getter_method.__doc__ elif setter_method is not None: doc = setter_method.__doc__ self.__doc__ = doc def __get__(self, obj, cls=None) -> Union['lazy_property', A]: if obj is None: return self if self.getter_method is None: raise AttributeError('unreadable attribute') try: fget = getattr(obj, self.getter_method.__name__) except AttributeError: raise TypeError(f'{type(obj).__name__} object does not have ' f'a {self.getter_method.__name__} method') return fget() def __set__(self, obj, value: A): if self.setter_method is None: raise AttributeError("can't set attribute") try: fset = getattr(obj, self.setter_method.__name__) except AttributeError: raise TypeError(f'{type(obj).__name__} object does not have ' f'a {self.setter_method.__name__} method.') fset(value) def __delete__(self, obj): if self.deleter_method is None: raise AttributeError("can't delete attribute") try: fdel = getattr(obj, self.deleter_method.__name__) except AttributeError: raise TypeError(f'{type(obj).__name__} object does not have ' f'a {self.deleter_method.__name__} method') fdel()
chromathicity/util.py
from typing import Any, Iterable, Tuple, TypeVar, Generic, Callable, Union import numpy as np class SetGet: """ Provides a useful set and get interface """ def set(self, **kwargs): """ Set attributes:: obj.set(attr1='value', attr2=35, attr3=True) :param kwargs: ``name=value`` pairs of attributes to set :return: self """ for key in kwargs: setattr(self, key, kwargs[key]) return self def get(self, *args) -> Iterable[Any]: """ Get a number of attributes:: obj.get('attr1', 'attr2', 'attr3') :param args: a number of attribute names to return :return: An iterable containing the attributes """ return (getattr(self, key) for key in args) def construct_component_inds(axis: int, n_dims: int, n_components: int, min_ndims: int=2) -> Tuple[Tuple]: """ Construct a tuple of tuples, where each element extracts the correct component values. :param axis: :param n_dims: :param n_components: :param min_ndims: :return: """ # noinspection PyTypeChecker return tuple( tuple(slice(i, i+1) if dim == axis else (slice(None) if dim < n_dims else np.newaxis) for dim in range(max(n_dims, min_ndims))) for i in range(n_components)) def get_matching_axis(shape: Tuple, length: int) -> int: """ Infers the correct axis to use :param shape: the shape of the input :param length: the desired length of the axis :return: the correct axis. If multiple axes match, then it returns the last one. """ # noinspection PyUnresolvedReferences axis_candidates = np.nonzero(np.array(shape) == length)[0] if len(axis_candidates) == 0: raise ValueError('Unable to infer axis tue to shape mismatch: ' '{} =/= {}.'.format(shape, length)) return axis_candidates[-1] A = TypeVar('A') T = TypeVar('T') GetMethod = Callable[..., A] SetMethod = Callable[[T, A], None] DelMethod = Callable[[T], None] # noinspection PyPep8Naming class lazy_property(Generic[A]): """ A property-like descriptor that does not bind to a function, but to the name of the function. That way subclasses can easily override the getter/setter/ delete """ def __init__(self, getter_method: GetMethod=None, setter_method: SetMethod=None, deleter_method: DelMethod=None, doc=None): self.getter_method = getter_method self.setter_method = setter_method self.deleter_method = deleter_method if doc is None: if getter_method.__doc__ is not None: doc = getter_method.__doc__ elif setter_method is not None: doc = setter_method.__doc__ self.__doc__ = doc def __get__(self, obj, cls=None) -> Union['lazy_property', A]: if obj is None: return self if self.getter_method is None: raise AttributeError('unreadable attribute') try: fget = getattr(obj, self.getter_method.__name__) except AttributeError: raise TypeError(f'{type(obj).__name__} object does not have ' f'a {self.getter_method.__name__} method') return fget() def __set__(self, obj, value: A): if self.setter_method is None: raise AttributeError("can't set attribute") try: fset = getattr(obj, self.setter_method.__name__) except AttributeError: raise TypeError(f'{type(obj).__name__} object does not have ' f'a {self.setter_method.__name__} method.') fset(value) def __delete__(self, obj): if self.deleter_method is None: raise AttributeError("can't delete attribute") try: fdel = getattr(obj, self.deleter_method.__name__) except AttributeError: raise TypeError(f'{type(obj).__name__} object does not have ' f'a {self.deleter_method.__name__} method') fdel()
0.889472
0.33353
import copy import yaml import bibtexparser from bibtexparser.bwriter import BibTexWriter from bibtexparser.bibdatabase import BibDatabase from . import normalizetex def bibtex_dump(data): r""" Turns dict into BibTex string Args: data (List[dict]): data to be transformed Returns: str: BibTex representation of dict data Example: >>> data = [{"ENTRYTYPE": "article", ... "ID": "MR3395349", ... "author": ... ("<NAME>. and " ... "<NAME>. and " ... "<NAME>"), ... "journal": "J. Symb. Log.", ... "number": "3", ... "pages": "763--784", ... "title": r"Almost {G}alois {$\omega$}-stable classes", ... "volume": "80", ... "year": "2015", ... }] >>> print(bibtex_dump(data)) @article{MR3395349, author = {<NAME>. and <NAME>. and <NAME>}, journal = {J. Symb. Log.}, number = {3}, pages = {763--784}, title = {Almost {G}alois {$\omega$}-stable classes}, volume = {80}, year = {2015} } """ db = BibDatabase() db.entries = data writer = BibTexWriter() return writer.write(db) def bibtex_load_list(handle): """ Loads bibtex data from handle Args: handle (handle): file handle of bibliography Returns: List[dict]: entry list of bibliography """ entry_list = bibtexparser.load(handle).get_entry_list() for entry in entry_list: if Bibliography.MERGEKEY.lower() in entry: key = entry[Bibliography.MERGEKEY.lower()] del entry[Bibliography.MERGEKEY.lower()] entry[Bibliography.MERGEKEY] = key return entry_list class Bibliography(object): """ Class for handling bibliographic data """ READERS = {'bib': bibtex_load_list, 'yaml': yaml.load } """ Supported readers """ WRITERS = {'bib': bibtex_dump, 'yaml': yaml.dump } """ Supported writers """ MERGEKEY = 'KEY' """ Name of the field used for merging in :func:`merge` and created in :func:`make_key`. """ def __init__(self, data=None): if not data: data = [] self._data = None self.data = data @property def data(self): """ Property containing the bibliographic data ``data`` must be a list of entries, where each entry is a ``dict`` containing the keys "ENTRYTYPE" and "ID". These ID-s must be unique. Raises: RuntimeError: if fields are missing or the ID-s are not unique TypeError: if ``data`` is of incorrect type Example: The first example raises an error since the argument supplied to ``data`` is not of correct type. The second example succeeds. >>> bib = Bibliography() >>> bib.data [] >>> bib.data = 'Katze' Traceback (most recent call last): ... TypeError: Expected data as list of bibliographic entries got <class 'str'> >>> bib.data = [{'ENTRYTYPE': 'article', 'ID': 'test'}] """ return self._data @data.setter def data(self, data): if not isinstance(data, list): raise TypeError('Expected data as list of bibliographic entries ' 'got %s' % type(data)) if not all(map(self._test_entry, data)): raise RuntimeError('There is something wrong with your data. ' 'Either one of your entries is not a ' 'dictionary or does not contain both ' 'keys "ENTRYTYPE" and "ID".') ids = [e['ID'] for e in data] if len(ids) > len(set(ids)): raise RuntimeError('Your bibliography contains duplicate ' 'ID-s.') self._data = data @data.deleter def data(self): del self._data def load(self, handle, reader='yaml'): """ Loads bibliography from handle Args: handle (handle): file handle of biblography reader (Optional[str]): name of reader (see :const:`READERS`) Example: Assuming that the file 'bib.yaml' exists, one can load its data into a bibliography as follows. >>> bib = Bibliography() # doctest: +SKIP >>> with open('bib.yaml', 'r') as handle: # doctest: +SKIP ... bib.load(handle, reader='yaml') """ self.data = self.READERS[reader](handle) def dump(self, writer='yaml'): """ Serializes :attr:`data` using one of the predefinded writers in :const:`WRITERS` Args: writer (Optional[str]): name of one of the predefined writers Returns: str: representation of :attr:`data` as a string. Example: >>> data = [{"ENTRYTYPE": "article", ... "ID": "MR3395349", ... "author": ... ("<NAME>. and " ... "<NAME>. and " ... "<NAME>"), ... "journal": "J. Symb. Log.", ... "number": "3", ... "pages": "763--784", ... "title": r"Almost {G}alois {$\omega$}-stable classes", ... "volume": "80", ... "year": "2015", ... }] >>> bib = Bibliography(data) >>> print(bib.dump(writer='bib')) @article{MR3395349, author = {<NAME>. and <NAME>. and Shelah, Saharon}, journal = {J. Symb. Log.}, number = {3}, pages = {763--784}, title = {Almost {G}alois {$\omega$}-stable classes}, volume = {80}, year = {2015} } """ return self.WRITERS[writer](self.data) def __iter__(self): return self.data.__iter__() def __next__(self): return self.data.__next__() next = __next__ # python 2 compatibility def union(self, other): """ Returns the union of two bibliographies. This is a special case of :func:`merge` were the merge key is just the field 'ID' Args: other (Bibliography): bibliography to be joined Returns: Bibliography: union of the bibliographies entries Note: ``union`` is *not* commutative. See example below. Example: >>> data1 = [{'ENTRYTYPE': 'article', 'ID': 'test1'}, ... {'ENTRYTYPE': 'article', 'ID': 'test2'}] >>> data2 = [{'ENTRYTYPE': 'book', 'ID': 'test2'}, ... {'ENTRYTYPE': 'article', 'ID': 'test3'}] >>> bib1 = Bibliography(data1) >>> bib2 = Bibliography(data2) >>> uni = bib1.union(bib2) >>> uni.data [{'ENTRYTYPE': 'article', 'ID': 'test1'}, {'ENTRYTYPE': 'article', 'ID': 'test2'}, {'ENTRYTYPE': 'article', 'ID': 'test3'}] >>> uni.data == bib2.union(bib1).data False """ self.make_key('ID') other.make_key('ID') return self.merge(other, union=True) def merge(self, other, union=True, keep_key=False): """ Merges two bibliographies using the merge key in field :attr:`MERGEKEY` Args: other (Bibliography): The bibliography to be merged union (Optional[bool]): Do you want the new database to contain the union of the keys? Otherwise only the entries of the left bibliography will be updated and entries not contained in it will be ignored. Defaults to ``True`` keep_key (Optional[bool]): Do you want to keep the merge key? Defaults to ``False`` Returns: Bibliography: Bibliography containing the merged dataset """ self_by_key = {e[self.MERGEKEY]: e for e in self} other_by_key = {e[self.MERGEKEY]: e for e in other} joined = copy.deepcopy(self_by_key) if union: joined.update(other_by_key) # Creates the union of both keys for key, entry in joined.items(): if key in other_by_key: entry.update(other_by_key[key]) if key in self_by_key: entry.update(self_by_key[key]) bib = Bibliography(list(joined.values())) if not keep_key: bib.del_fields(self.MERGEKEY) return bib def add_fields(self, **kargs): """ Adds fields to bibliography For each entry of ``kargs`` a field corresponding to the key of the entry is added. The value of the entry must be a unary function accepting an entry of the bibliography as its argument. Args: kargs (Dict[str, function]): Dictionary of field names and construction functions Example: In this example the author field is concatenated with itself and stored in the field 'doubleauthor'. >>> data = [{'year': '1981', ... 'title': 'Weak compactness and the structure', ... 'author': '<NAME>. and <NAME>.', ... 'ENTRYTYPE': 'incollection', ... 'ID': 'MR645920' ... }, ... {'year': '1981', ... 'title': 'Iterated forcing and changing cofinalities', ... 'author': '<NAME>', ... 'ENTRYTYPE': 'article', ... 'ID': 'MR636904' ... } ... ] >>> bib = Bibliography(data) >>> f = lambda entry: entry['author'] * 2 >>> bib.add_fields(doubleauthor=f) >>> [e['doubleauthor'] for e in bib] ['<NAME>. and <NAME>. and <NAME>.', '<NAME>'] """ for key, func in kargs.items(): for entry in self: entry.update({key: func(entry)}) def del_fields(self, *fields): """ Deletes the specified fields from the database Args: fields (List[str]): names of fields to be deleted. If an entry does not contain a field with the specified name, nothing happens. Example: >>> data = [{'year': '1981', ... 'title': 'Weak compactness and the structure', ... 'author': '<NAME>. and <NAME>.', ... 'ENTRYTYPE': 'incollection', ... 'ID': 'MR645920' ... }, ... {'year': '1981', ... 'title': 'Iterated forcing and changing cofinalities', ... 'author': '<NAME>', ... 'ENTRYTYPE': 'article', ... 'ID': 'MR636904' ... } ... ] >>> bib = Bibliography(data) >>> bib.del_fields('title', 'year') >>> bib.data [{'author': '<NAME>. and <NAME>.', 'ENTRYTYPE': 'incollection', 'ID': 'MR645920'}, {'author': '<NAME>', 'ENTRYTYPE': 'article', 'ID': 'MR636904'}] """ for e in self: for k in fields: if k in e.keys(): del e[k] def make_key(self, *keys): """ Creates a merge key formed out of the fields specified in ``keys`` Args: keys (List[str]): List of field names Raises: RuntimeError: if the merge keys are not unique Example: Note how the first example produces a ``RuntimeError`` since the years coincide. Using a combination of author and year fixes this. >>> data = [{'year': '1981', ... 'title': 'Weak compactness and the structure', ... 'author': '<NAME>. and <NAME>.', ... 'ENTRYTYPE': 'incollection', ... 'ID': 'MR645920' ... }, ... {'year': '1981', ... 'title': 'Iterated forcing and changing cofinalities', ... 'author': '<NAME>', ... 'ENTRYTYPE': 'article', ... 'ID': 'MR636904' ... } ... ] >>> bib = Bibliography(data) >>> bib.make_key('year') Traceback (most recent call last): ... RuntimeError: The following merge keys (key, ID)are duplicates: [('1981', 'MR645920'), ('1981', 'MR636904')] >>> bib.make_key('author', 'year') >>> [e['KEY'] for e in bib] ['<NAME>. and Shelah, S.-1981', 'Shelah, Saharon-1981'] """ func = lambda r: normalizetex.make_key(r, *keys) self.add_fields(**{self.MERGEKEY: func}) keys = [e[self.MERGEKEY] for e in self] ids = [e['ID'] for e in self] duplicates = [(k, i) for k, i in zip(keys, ids) if keys.count(k) > 1] if duplicates: raise RuntimeError('The following merge keys (key, ID)' 'are duplicates: %s' % duplicates) @staticmethod def _test_entry(entry): if not isinstance(entry, dict): return False return 'ENTRYTYPE' in entry and 'ID' in entry if __name__ == '__main__': data1 = [{'ENTRYTYPE': 'article', 'ID': 'test1'}, {'ENTRYTYPE': 'article', 'ID': 'test2'}] data2 = [{'ENTRYTYPE': 'book', 'ID': 'test2'}, {'ENTRYTYPE': 'article', 'ID': 'test3'}] bib1 = Bibliography(data1) print(bib1.data) bib2 = Bibliography(data2) print(bib1.union(bib2).data) print(bib1.data) """ bib1 = Bibliography() bib2 = Bibliography() with open('files/test1.bib', 'r') as bibtex: bib1.load(bibtex, reader='bibtex') with open('files/test2.bib', 'r') as bibtex: bib2.load(bibtex, reader='bibtex') bib1.add_fields(normauthor=normalizetex.norm_author, normtitle=normalizetex.norm_title) bib2.add_fields(normauthor=normalizetex.norm_author, normtitle=normalizetex.norm_title) bib1.merge(bib2, 'normauthor', 'normtitle', 'year', union=False) bib1.del_fields('normauthor', 'normtitle') with open('files/merge.bib', 'w') as bibtex: bibtex.write(bibtex_dump(bib1.data)) """
listb/pybibtools.py
import copy import yaml import bibtexparser from bibtexparser.bwriter import BibTexWriter from bibtexparser.bibdatabase import BibDatabase from . import normalizetex def bibtex_dump(data): r""" Turns dict into BibTex string Args: data (List[dict]): data to be transformed Returns: str: BibTex representation of dict data Example: >>> data = [{"ENTRYTYPE": "article", ... "ID": "MR3395349", ... "author": ... ("<NAME>. and " ... "<NAME>. and " ... "<NAME>"), ... "journal": "J. Symb. Log.", ... "number": "3", ... "pages": "763--784", ... "title": r"Almost {G}alois {$\omega$}-stable classes", ... "volume": "80", ... "year": "2015", ... }] >>> print(bibtex_dump(data)) @article{MR3395349, author = {<NAME>. and <NAME>. and <NAME>}, journal = {J. Symb. Log.}, number = {3}, pages = {763--784}, title = {Almost {G}alois {$\omega$}-stable classes}, volume = {80}, year = {2015} } """ db = BibDatabase() db.entries = data writer = BibTexWriter() return writer.write(db) def bibtex_load_list(handle): """ Loads bibtex data from handle Args: handle (handle): file handle of bibliography Returns: List[dict]: entry list of bibliography """ entry_list = bibtexparser.load(handle).get_entry_list() for entry in entry_list: if Bibliography.MERGEKEY.lower() in entry: key = entry[Bibliography.MERGEKEY.lower()] del entry[Bibliography.MERGEKEY.lower()] entry[Bibliography.MERGEKEY] = key return entry_list class Bibliography(object): """ Class for handling bibliographic data """ READERS = {'bib': bibtex_load_list, 'yaml': yaml.load } """ Supported readers """ WRITERS = {'bib': bibtex_dump, 'yaml': yaml.dump } """ Supported writers """ MERGEKEY = 'KEY' """ Name of the field used for merging in :func:`merge` and created in :func:`make_key`. """ def __init__(self, data=None): if not data: data = [] self._data = None self.data = data @property def data(self): """ Property containing the bibliographic data ``data`` must be a list of entries, where each entry is a ``dict`` containing the keys "ENTRYTYPE" and "ID". These ID-s must be unique. Raises: RuntimeError: if fields are missing or the ID-s are not unique TypeError: if ``data`` is of incorrect type Example: The first example raises an error since the argument supplied to ``data`` is not of correct type. The second example succeeds. >>> bib = Bibliography() >>> bib.data [] >>> bib.data = 'Katze' Traceback (most recent call last): ... TypeError: Expected data as list of bibliographic entries got <class 'str'> >>> bib.data = [{'ENTRYTYPE': 'article', 'ID': 'test'}] """ return self._data @data.setter def data(self, data): if not isinstance(data, list): raise TypeError('Expected data as list of bibliographic entries ' 'got %s' % type(data)) if not all(map(self._test_entry, data)): raise RuntimeError('There is something wrong with your data. ' 'Either one of your entries is not a ' 'dictionary or does not contain both ' 'keys "ENTRYTYPE" and "ID".') ids = [e['ID'] for e in data] if len(ids) > len(set(ids)): raise RuntimeError('Your bibliography contains duplicate ' 'ID-s.') self._data = data @data.deleter def data(self): del self._data def load(self, handle, reader='yaml'): """ Loads bibliography from handle Args: handle (handle): file handle of biblography reader (Optional[str]): name of reader (see :const:`READERS`) Example: Assuming that the file 'bib.yaml' exists, one can load its data into a bibliography as follows. >>> bib = Bibliography() # doctest: +SKIP >>> with open('bib.yaml', 'r') as handle: # doctest: +SKIP ... bib.load(handle, reader='yaml') """ self.data = self.READERS[reader](handle) def dump(self, writer='yaml'): """ Serializes :attr:`data` using one of the predefinded writers in :const:`WRITERS` Args: writer (Optional[str]): name of one of the predefined writers Returns: str: representation of :attr:`data` as a string. Example: >>> data = [{"ENTRYTYPE": "article", ... "ID": "MR3395349", ... "author": ... ("<NAME>. and " ... "<NAME>. and " ... "<NAME>"), ... "journal": "J. Symb. Log.", ... "number": "3", ... "pages": "763--784", ... "title": r"Almost {G}alois {$\omega$}-stable classes", ... "volume": "80", ... "year": "2015", ... }] >>> bib = Bibliography(data) >>> print(bib.dump(writer='bib')) @article{MR3395349, author = {<NAME>. and <NAME>. and Shelah, Saharon}, journal = {J. Symb. Log.}, number = {3}, pages = {763--784}, title = {Almost {G}alois {$\omega$}-stable classes}, volume = {80}, year = {2015} } """ return self.WRITERS[writer](self.data) def __iter__(self): return self.data.__iter__() def __next__(self): return self.data.__next__() next = __next__ # python 2 compatibility def union(self, other): """ Returns the union of two bibliographies. This is a special case of :func:`merge` were the merge key is just the field 'ID' Args: other (Bibliography): bibliography to be joined Returns: Bibliography: union of the bibliographies entries Note: ``union`` is *not* commutative. See example below. Example: >>> data1 = [{'ENTRYTYPE': 'article', 'ID': 'test1'}, ... {'ENTRYTYPE': 'article', 'ID': 'test2'}] >>> data2 = [{'ENTRYTYPE': 'book', 'ID': 'test2'}, ... {'ENTRYTYPE': 'article', 'ID': 'test3'}] >>> bib1 = Bibliography(data1) >>> bib2 = Bibliography(data2) >>> uni = bib1.union(bib2) >>> uni.data [{'ENTRYTYPE': 'article', 'ID': 'test1'}, {'ENTRYTYPE': 'article', 'ID': 'test2'}, {'ENTRYTYPE': 'article', 'ID': 'test3'}] >>> uni.data == bib2.union(bib1).data False """ self.make_key('ID') other.make_key('ID') return self.merge(other, union=True) def merge(self, other, union=True, keep_key=False): """ Merges two bibliographies using the merge key in field :attr:`MERGEKEY` Args: other (Bibliography): The bibliography to be merged union (Optional[bool]): Do you want the new database to contain the union of the keys? Otherwise only the entries of the left bibliography will be updated and entries not contained in it will be ignored. Defaults to ``True`` keep_key (Optional[bool]): Do you want to keep the merge key? Defaults to ``False`` Returns: Bibliography: Bibliography containing the merged dataset """ self_by_key = {e[self.MERGEKEY]: e for e in self} other_by_key = {e[self.MERGEKEY]: e for e in other} joined = copy.deepcopy(self_by_key) if union: joined.update(other_by_key) # Creates the union of both keys for key, entry in joined.items(): if key in other_by_key: entry.update(other_by_key[key]) if key in self_by_key: entry.update(self_by_key[key]) bib = Bibliography(list(joined.values())) if not keep_key: bib.del_fields(self.MERGEKEY) return bib def add_fields(self, **kargs): """ Adds fields to bibliography For each entry of ``kargs`` a field corresponding to the key of the entry is added. The value of the entry must be a unary function accepting an entry of the bibliography as its argument. Args: kargs (Dict[str, function]): Dictionary of field names and construction functions Example: In this example the author field is concatenated with itself and stored in the field 'doubleauthor'. >>> data = [{'year': '1981', ... 'title': 'Weak compactness and the structure', ... 'author': '<NAME>. and <NAME>.', ... 'ENTRYTYPE': 'incollection', ... 'ID': 'MR645920' ... }, ... {'year': '1981', ... 'title': 'Iterated forcing and changing cofinalities', ... 'author': '<NAME>', ... 'ENTRYTYPE': 'article', ... 'ID': 'MR636904' ... } ... ] >>> bib = Bibliography(data) >>> f = lambda entry: entry['author'] * 2 >>> bib.add_fields(doubleauthor=f) >>> [e['doubleauthor'] for e in bib] ['<NAME>. and <NAME>. and <NAME>.', '<NAME>'] """ for key, func in kargs.items(): for entry in self: entry.update({key: func(entry)}) def del_fields(self, *fields): """ Deletes the specified fields from the database Args: fields (List[str]): names of fields to be deleted. If an entry does not contain a field with the specified name, nothing happens. Example: >>> data = [{'year': '1981', ... 'title': 'Weak compactness and the structure', ... 'author': '<NAME>. and <NAME>.', ... 'ENTRYTYPE': 'incollection', ... 'ID': 'MR645920' ... }, ... {'year': '1981', ... 'title': 'Iterated forcing and changing cofinalities', ... 'author': '<NAME>', ... 'ENTRYTYPE': 'article', ... 'ID': 'MR636904' ... } ... ] >>> bib = Bibliography(data) >>> bib.del_fields('title', 'year') >>> bib.data [{'author': '<NAME>. and <NAME>.', 'ENTRYTYPE': 'incollection', 'ID': 'MR645920'}, {'author': '<NAME>', 'ENTRYTYPE': 'article', 'ID': 'MR636904'}] """ for e in self: for k in fields: if k in e.keys(): del e[k] def make_key(self, *keys): """ Creates a merge key formed out of the fields specified in ``keys`` Args: keys (List[str]): List of field names Raises: RuntimeError: if the merge keys are not unique Example: Note how the first example produces a ``RuntimeError`` since the years coincide. Using a combination of author and year fixes this. >>> data = [{'year': '1981', ... 'title': 'Weak compactness and the structure', ... 'author': '<NAME>. and <NAME>.', ... 'ENTRYTYPE': 'incollection', ... 'ID': 'MR645920' ... }, ... {'year': '1981', ... 'title': 'Iterated forcing and changing cofinalities', ... 'author': '<NAME>', ... 'ENTRYTYPE': 'article', ... 'ID': 'MR636904' ... } ... ] >>> bib = Bibliography(data) >>> bib.make_key('year') Traceback (most recent call last): ... RuntimeError: The following merge keys (key, ID)are duplicates: [('1981', 'MR645920'), ('1981', 'MR636904')] >>> bib.make_key('author', 'year') >>> [e['KEY'] for e in bib] ['<NAME>. and Shelah, S.-1981', 'Shelah, Saharon-1981'] """ func = lambda r: normalizetex.make_key(r, *keys) self.add_fields(**{self.MERGEKEY: func}) keys = [e[self.MERGEKEY] for e in self] ids = [e['ID'] for e in self] duplicates = [(k, i) for k, i in zip(keys, ids) if keys.count(k) > 1] if duplicates: raise RuntimeError('The following merge keys (key, ID)' 'are duplicates: %s' % duplicates) @staticmethod def _test_entry(entry): if not isinstance(entry, dict): return False return 'ENTRYTYPE' in entry and 'ID' in entry if __name__ == '__main__': data1 = [{'ENTRYTYPE': 'article', 'ID': 'test1'}, {'ENTRYTYPE': 'article', 'ID': 'test2'}] data2 = [{'ENTRYTYPE': 'book', 'ID': 'test2'}, {'ENTRYTYPE': 'article', 'ID': 'test3'}] bib1 = Bibliography(data1) print(bib1.data) bib2 = Bibliography(data2) print(bib1.union(bib2).data) print(bib1.data) """ bib1 = Bibliography() bib2 = Bibliography() with open('files/test1.bib', 'r') as bibtex: bib1.load(bibtex, reader='bibtex') with open('files/test2.bib', 'r') as bibtex: bib2.load(bibtex, reader='bibtex') bib1.add_fields(normauthor=normalizetex.norm_author, normtitle=normalizetex.norm_title) bib2.add_fields(normauthor=normalizetex.norm_author, normtitle=normalizetex.norm_title) bib1.merge(bib2, 'normauthor', 'normtitle', 'year', union=False) bib1.del_fields('normauthor', 'normtitle') with open('files/merge.bib', 'w') as bibtex: bibtex.write(bibtex_dump(bib1.data)) """
0.824533
0.361559
import random from abc import ABC, abstractmethod from typing import List from serving_dataclasses import SessionRequest from serving_system import ServingSystem class RequestSorter(ABC): """Defines the interface for request sorting algorithms.""" @abstractmethod def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests for a given serving system in sorted order. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: sorted list of session requests """ pass class RandomRequestSorter(RequestSorter): """Randomly shuffles lists of requests, mimicking online serving of requests.""" def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests for a given serving system in random order. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: shuffled list of session requests """ requests = list(serving_system.requests.values()) random.shuffle(requests) return requests class ASRequestSorter(RequestSorter): """Request sorter that sorts first by minimum accuracy (A) and then by transmission speed (S).""" def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests sorted first by minimum accuracy and then by transmission speed. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: sorted list of session requests """ sorted_requests = sorted( serving_system.requests.values(), key=lambda request: (request.min_accuracy, -request.transmission_speed), reverse=True, ) return sorted_requests class ARRequestSorter(RequestSorter): """Request sorter that sorts first by minimum accuracy (A) and then by arrival rate (R).""" def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests sorted first by minimum accuracy and then by arrival rate. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: sorted list of session requests """ sorted_requests = sorted( serving_system.requests.values(), key=lambda request: (request.min_accuracy, request.arrival_rate), reverse=True, ) return sorted_requests class RRequestSorter(RequestSorter): """Request sorter that sorts by arrival rate (R).""" def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests sorted by arrival rate. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: sorted list of session requests """ sorted_requests = sorted( serving_system.requests.values(), key=lambda request: (request.arrival_rate), reverse=True, ) return sorted_requests
solver/solver/request_sorter.py
import random from abc import ABC, abstractmethod from typing import List from serving_dataclasses import SessionRequest from serving_system import ServingSystem class RequestSorter(ABC): """Defines the interface for request sorting algorithms.""" @abstractmethod def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests for a given serving system in sorted order. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: sorted list of session requests """ pass class RandomRequestSorter(RequestSorter): """Randomly shuffles lists of requests, mimicking online serving of requests.""" def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests for a given serving system in random order. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: shuffled list of session requests """ requests = list(serving_system.requests.values()) random.shuffle(requests) return requests class ASRequestSorter(RequestSorter): """Request sorter that sorts first by minimum accuracy (A) and then by transmission speed (S).""" def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests sorted first by minimum accuracy and then by transmission speed. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: sorted list of session requests """ sorted_requests = sorted( serving_system.requests.values(), key=lambda request: (request.min_accuracy, -request.transmission_speed), reverse=True, ) return sorted_requests class ARRequestSorter(RequestSorter): """Request sorter that sorts first by minimum accuracy (A) and then by arrival rate (R).""" def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests sorted first by minimum accuracy and then by arrival rate. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: sorted list of session requests """ sorted_requests = sorted( serving_system.requests.values(), key=lambda request: (request.min_accuracy, request.arrival_rate), reverse=True, ) return sorted_requests class RRequestSorter(RequestSorter): """Request sorter that sorts by arrival rate (R).""" def sort(self, serving_system: ServingSystem) -> List[SessionRequest]: """Return the session requests sorted by arrival rate. Args: serving_system (ServingSystem): model of the inference serving problem instance Returns: List[SessionRequest]: sorted list of session requests """ sorted_requests = sorted( serving_system.requests.values(), key=lambda request: (request.arrival_rate), reverse=True, ) return sorted_requests
0.937505
0.292482
from django.conf.urls import url from django.urls import include, path from drf_yasg import openapi from drf_yasg.views import get_schema_view from rest_framework import permissions from rest_framework.authtoken.views import obtain_auth_token from rest_framework.routers import DefaultRouter from . import views app_name = 'reservation' schema_view = get_schema_view( openapi.Info( title='Reserv API', default_version='v1', description='Для работы с системой требуется авторизация (токен). ' 'Получите его перед выполнением запросов', terms_of_service='https://www.google.com/policies/terms/', contact=openapi.Contact(email='<EMAIL>'), license=openapi.License(name='BSD License'), ), public=True, permission_classes=(permissions.AllowAny,), ) urlpatterns = [ url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'), url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), ] router = DefaultRouter() router.register('reservations', views.ReservationViewSet, basename='ReservationView') router.register('rooms', views.RoomViewSet, basename='RoomsView') router.register('users', views.UserViewSet, basename='UserView') urlpatterns += [ path('', views.index, name='index'), path('api/v1/', include(router.urls)), path('room/<slug:slug>/', views.room_reservations, name='room'), path('new/', views.new_reservation, name="new_reservation"), path('<str:username>/', views.profile, name='profile'), path('<str:username>/<int:reservation_id>/', views.reservation_view, name='reservation'), path( '<str:username>/<int:reservation_id>/edit/', views.reservation_edit, name='reservation_edit' ), path('api/v1/api-token-auth/', obtain_auth_token), ]
reservation/urls.py
from django.conf.urls import url from django.urls import include, path from drf_yasg import openapi from drf_yasg.views import get_schema_view from rest_framework import permissions from rest_framework.authtoken.views import obtain_auth_token from rest_framework.routers import DefaultRouter from . import views app_name = 'reservation' schema_view = get_schema_view( openapi.Info( title='Reserv API', default_version='v1', description='Для работы с системой требуется авторизация (токен). ' 'Получите его перед выполнением запросов', terms_of_service='https://www.google.com/policies/terms/', contact=openapi.Contact(email='<EMAIL>'), license=openapi.License(name='BSD License'), ), public=True, permission_classes=(permissions.AllowAny,), ) urlpatterns = [ url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'), url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'), url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'), ] router = DefaultRouter() router.register('reservations', views.ReservationViewSet, basename='ReservationView') router.register('rooms', views.RoomViewSet, basename='RoomsView') router.register('users', views.UserViewSet, basename='UserView') urlpatterns += [ path('', views.index, name='index'), path('api/v1/', include(router.urls)), path('room/<slug:slug>/', views.room_reservations, name='room'), path('new/', views.new_reservation, name="new_reservation"), path('<str:username>/', views.profile, name='profile'), path('<str:username>/<int:reservation_id>/', views.reservation_view, name='reservation'), path( '<str:username>/<int:reservation_id>/edit/', views.reservation_edit, name='reservation_edit' ), path('api/v1/api-token-auth/', obtain_auth_token), ]
0.314682
0.106319
# 1 simple # 2 multiple agent # plot reward of simple / multi-agent / rsu """ # 1***Simple agent ***** import matplotlib.pyplot as plt import pickle var = "rc" pdf_plot = var # R_c, C_o, C_u, k lstt = [var]#, "C_o", "C_u", "k"] for pdf_plot in lstt: with open('z_20ep_resources_'+var+'_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream ppo = pickle.load(filehandle) zipped_lists = zip(ppo[0], ppo[1]) # zip of unused shared and own resources ppo_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(ppo[2], ppo[3]) ppo_unsatisfied = [x + y for (x, y) in zipped_lists] with open('z_20ep_resources_'+var+'_ddpg.data', 'rb') as filehandle: # 02_five_fifty_R_c.data # read the data as binary data stream ddpg =pickle.load(filehandle) zipped_lists = zip(ddpg[0], ddpg[1]) # zip of unused shared and own resources ddpg_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(ddpg[2], ddpg[3]) # zip of unused shared and own resources ddpg_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list with open('z_20ep_resources_'+var+'_td3.data', 'rb') as filehandle: # 1_ddpg4442C_o # read the data as binary data stream td3 = pickle.load(filehandle) zipped_lists = zip(td3[0], td3[1]) # zip of unused shared and own resources td3_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(td3[2], td3[3]) # zip of unused shared and own resources td3_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list times = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20] plt.plot(times , ppo_unused, color='orange', linestyle='dotted', marker='x' ,label='PPO_unused') # unused shared 'ppo_$Unused$' plt.plot(times , ddpg_unused, color='red', linestyle='dashed', marker='D' ,label='DDPG_unused') # unused shared plt.plot(times , td3_unused, color='blue', linestyle='--', marker='2' ,label='TD3_unused') # unused shared plt.plot(times , ppo_unsatisfied, color='green', linestyle='dotted', marker='s' ,label='PPO_unsatisfied') # unused shared 'ppo_$Unused$' plt.plot(times , ddpg_unsatisfied, color='pink', linestyle='solid', marker='<' ,label='DDPG_unsatisfied') # unused shared plt.plot(times , td3_unsatisfied, color='brown', linestyle='--', marker='2' ,label='TD3_unsatisfied') # unused shared plt.ylabel('Caching Resources', size= 8 ) #resource plt.xlabel('$'+var+'$', size= 10) #'$'+pdf_plot[para]+'$ ' $'+var+'$ Communication range plt.xticks((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),size = 7) plt.yticks(size = 7) plt.grid() plt.legend()#ncol=1, bbox_to_anchor=(1, 0.5))#c_u plt.grid() plt.savefig('zz_caching_'+var+'_g+o_z.pdf') #abbbs_ b_test_five_'+var+'_plot.pdf #plt.show() print("EEND") print("End") """ """ # 2***multi agent ***** import matplotlib.pyplot as plt import pickle var = "rc" pdf_plot = var # R_c, C_o, C_u, k lstt = [var]#, "C_o", "C_u", "k"] for pdf_plot in lstt: with open('z_20ep_multi_agent_'+var+'_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream ppo = pickle.load(filehandle) zipped_lists = zip(ppo[0], ppo[1]) # zip of unused shared and own resources ppo_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(ppo[2], ppo[3]) ppo_unsatisfied = [x + y for (x, y) in zipped_lists] with open('z_20ep_multi_agent_'+var+'_ddpg.data', 'rb') as filehandle: # 02_five_fifty_R_c.data # read the data as binary data stream ddpg =pickle.load(filehandle) zipped_lists = zip(ddpg[0], ddpg[1]) # zip of unused shared and own resources ddpg_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(ddpg[2], ddpg[3]) # zip of unused shared and own resources ddpg_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list with open('z_20ep_multi_agent_'+var+'_td3.data', 'rb') as filehandle: # 1_ddpg4442C_o # read the data as binary data stream td3 = pickle.load(filehandle) zipped_lists = zip(td3[0], td3[1]) # zip of unused shared and own resources td3_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(td3[2], td3[3]) # zip of unused shared and own resources td3_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list times = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20] plt.plot(times , ppo_unused, color='orange', linestyle='dotted', marker='x' ,label='PPO_unused') # unused shared 'ppo_$Unused$' plt.plot(times , ddpg_unused, color='red', linestyle='dashed', marker='D' ,label='DDPG_unused') # unused shared plt.plot(times , td3_unused, color='blue', linestyle='--', marker='2' ,label='TD3_unused') # unused shared plt.plot(times , ppo_unsatisfied, color='green', linestyle='dotted', marker='s' ,label='PPO_unsatisfied') # unused shared 'ppo_$Unused$' plt.plot(times , ddpg_unsatisfied, color='pink', linestyle='solid', marker='<' ,label='DDPG_unsatisfied') # unused shared plt.plot(times , td3_unsatisfied, color='brown', linestyle='--', marker='2' ,label='TD3_unsatisfied') # unused shared plt.ylabel('Caching Resources', size= 8 ) #resource plt.xlabel('$'+var+'$', size= 10) #'$'+pdf_plot[para]+'$ ' $'+var+'$ Communication range plt.xticks((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),size = 7) plt.yticks(size = 7) plt.grid() plt.legend()#ncol=1, bbox_to_anchor=(1, 0.5))#c_u plt.grid() plt.savefig('zz_multi_caching_'+var+'_g+o_z.pdf') #abbbs_ b_test_five_'+var+'_plot.pdf #plt.show() print("End") """ """ # 3***reward simple / multi-agent / rsu ***** import matplotlib.pyplot as plt import pickle import numpy as np var = "k" pdf_plot = var # R_c, C_o, C_u, k lstt = [var]#, "C_o", "C_u", "k"] print("okokokok") for pdf_plot in lstt: with open('Reward_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream single = pickle.load(filehandle) single = single[0][:219999] window_width= 100 cumsum_vec = np.cumsum(np.insert(single, 0, 0)) single = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width with open('Reward_multi_agentppo.data', 'rb') as filehandle: # 02_five_fifty_R_c.data # read the data as binary data stream multi =pickle.load(filehandle) multi = multi[0][:219999] window_width= 100 cumsum_vec = np.cumsum(np.insert(multi, 0, 0)) multi = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width with open('Reward_multi_agent_RSUppo.data', 'rb') as filehandle: # 1_ddpg4442C_o # read the data as binary data stream rsu = pickle.load(filehandle) rsu = rsu[0][:219999] window_width= 100 cumsum_vec = np.cumsum(np.insert(multi, 0, 0)) multi = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width plt.plot(single, color='orange', linestyle='dotted', marker='x' ,label='Single_agent') # unused shared 'ppo_$Unused$' plt.plot(multi, color='red', linestyle='dashed', marker='D' ,label='Multi_agent') # unused shared plt.plot(rsu, color='blue', linestyle='--', marker='2' ,label='Multi_agent_RSU') # unused shared plt.ylabel('Reward', size= 8 ) #resource plt.xlabel('Epochs', size= 10) #'$'+pdf_plot[para]+'$ ' $'+var+'$ Communication range #plt.xticks((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),size = 7) plt.yticks(size = 7) plt.grid() plt.legend()#ncol=1, bbox_to_anchor=(1, 0.5))#c_u plt.grid() plt.savefig('zz_reward_all.pdf') #abbbs_ b_test_five_'+var+'_plot.pdf #plt.show() print("End") """ import matplotlib.pyplot as plt import numpy as np import pickle with open('Reward_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream single = pickle.load(filehandle) print("LEN SINGLE = ", len(single[0])) single = single[0][:20000] single = [ single[xx] for xx in range(len(single)) if xx%20==0 ] window_width= 100 cumsum_vec = np.cumsum(np.insert(single, 0, 0)) single = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width with open('Reward_multi_agentppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream multi = pickle.load(filehandle) print("LEN multi = ", len(multi[0])) multi = multi[0][:20000] multi = [ multi[xx] for xx in range(len(multi)) if xx%20==0 ] window_width= 100 cumsum_vec = np.cumsum(np.insert(multi, 0, 0)) multi = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width with open('Reward_multi_agent_RSUppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream rsu = pickle.load(filehandle) print("LEN rsu = ", len(rsu[0])) rsu = rsu[0][:20000] rsu = [ rsu[xx] for xx in range(len(rsu)) if xx%20==0 ] window_width= 100 cumsum_vec = np.cumsum(np.insert(rsu, 0, 0)) rsu = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width print("LEN SINGLE = ", len(single)) print("LEN multi = ", len(multi)) print("LEN rsu = ", len(rsu)) x = np.arange(len(single)) times = range(len(single)) # plot our data along a line fig,ax = plt.subplots() ax.plot(times, single, '-', color='tab:blue', linestyle='dotted', marker='x' ,label='Single_agent') ax.plot(times, multi, '-', color='tab:orange', linestyle='dashed', marker='D' ,label='Multi_agent') ax.plot(times, rsu, '-', color='tab:red', linestyle='--', marker='2' ,label='Multi_agent_RSU') ax.set_title('') plt.xticks(np.arange(min(x), max(x)+1, 200)) # [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] plt.xlabel('Epochs', size= 10) ax.set_ylabel('Reward') # create a confidence band of +/- 10% error y_lower = [i - 0.1 * i for i in single] y_upper = [i + 0.1 * i for i in single] y_lower_multi = [i - 0.1 * i for i in multi] y_upper_multi = [i + 0.1 * i for i in multi] y_lower_rsu = [i - 0.1 * i for i in rsu] y_upper_rsu= [i + 0.1 * i for i in rsu] # plot our confidence band ax.fill_between(times, y_lower, y_upper, alpha=0.2, color='tab:blue') ax.fill_between(times, y_lower_multi, y_upper_multi, alpha=0.2, color='tab:orange') ax.fill_between(times, y_lower_rsu, y_upper_rsu, alpha=0.2, color='tab:red') print("min = ", min(x)) print("max = ", max(x)) print("len x = ", len(x)) plt.legend() plt.grid() plt.savefig('zz_reward_all.pdf') plt.show() #"""
rllib/examples/model/zzz_plot_simple_multi_all.py
# 1 simple # 2 multiple agent # plot reward of simple / multi-agent / rsu """ # 1***Simple agent ***** import matplotlib.pyplot as plt import pickle var = "rc" pdf_plot = var # R_c, C_o, C_u, k lstt = [var]#, "C_o", "C_u", "k"] for pdf_plot in lstt: with open('z_20ep_resources_'+var+'_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream ppo = pickle.load(filehandle) zipped_lists = zip(ppo[0], ppo[1]) # zip of unused shared and own resources ppo_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(ppo[2], ppo[3]) ppo_unsatisfied = [x + y for (x, y) in zipped_lists] with open('z_20ep_resources_'+var+'_ddpg.data', 'rb') as filehandle: # 02_five_fifty_R_c.data # read the data as binary data stream ddpg =pickle.load(filehandle) zipped_lists = zip(ddpg[0], ddpg[1]) # zip of unused shared and own resources ddpg_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(ddpg[2], ddpg[3]) # zip of unused shared and own resources ddpg_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list with open('z_20ep_resources_'+var+'_td3.data', 'rb') as filehandle: # 1_ddpg4442C_o # read the data as binary data stream td3 = pickle.load(filehandle) zipped_lists = zip(td3[0], td3[1]) # zip of unused shared and own resources td3_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(td3[2], td3[3]) # zip of unused shared and own resources td3_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list times = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20] plt.plot(times , ppo_unused, color='orange', linestyle='dotted', marker='x' ,label='PPO_unused') # unused shared 'ppo_$Unused$' plt.plot(times , ddpg_unused, color='red', linestyle='dashed', marker='D' ,label='DDPG_unused') # unused shared plt.plot(times , td3_unused, color='blue', linestyle='--', marker='2' ,label='TD3_unused') # unused shared plt.plot(times , ppo_unsatisfied, color='green', linestyle='dotted', marker='s' ,label='PPO_unsatisfied') # unused shared 'ppo_$Unused$' plt.plot(times , ddpg_unsatisfied, color='pink', linestyle='solid', marker='<' ,label='DDPG_unsatisfied') # unused shared plt.plot(times , td3_unsatisfied, color='brown', linestyle='--', marker='2' ,label='TD3_unsatisfied') # unused shared plt.ylabel('Caching Resources', size= 8 ) #resource plt.xlabel('$'+var+'$', size= 10) #'$'+pdf_plot[para]+'$ ' $'+var+'$ Communication range plt.xticks((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),size = 7) plt.yticks(size = 7) plt.grid() plt.legend()#ncol=1, bbox_to_anchor=(1, 0.5))#c_u plt.grid() plt.savefig('zz_caching_'+var+'_g+o_z.pdf') #abbbs_ b_test_five_'+var+'_plot.pdf #plt.show() print("EEND") print("End") """ """ # 2***multi agent ***** import matplotlib.pyplot as plt import pickle var = "rc" pdf_plot = var # R_c, C_o, C_u, k lstt = [var]#, "C_o", "C_u", "k"] for pdf_plot in lstt: with open('z_20ep_multi_agent_'+var+'_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream ppo = pickle.load(filehandle) zipped_lists = zip(ppo[0], ppo[1]) # zip of unused shared and own resources ppo_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(ppo[2], ppo[3]) ppo_unsatisfied = [x + y for (x, y) in zipped_lists] with open('z_20ep_multi_agent_'+var+'_ddpg.data', 'rb') as filehandle: # 02_five_fifty_R_c.data # read the data as binary data stream ddpg =pickle.load(filehandle) zipped_lists = zip(ddpg[0], ddpg[1]) # zip of unused shared and own resources ddpg_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(ddpg[2], ddpg[3]) # zip of unused shared and own resources ddpg_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list with open('z_20ep_multi_agent_'+var+'_td3.data', 'rb') as filehandle: # 1_ddpg4442C_o # read the data as binary data stream td3 = pickle.load(filehandle) zipped_lists = zip(td3[0], td3[1]) # zip of unused shared and own resources td3_unused = [x + y for (x, y) in zipped_lists] # sum list zipped_lists = zip(td3[2], td3[3]) # zip of unused shared and own resources td3_unsatisfied = [x + y for (x, y) in zipped_lists] # sum list times = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20] plt.plot(times , ppo_unused, color='orange', linestyle='dotted', marker='x' ,label='PPO_unused') # unused shared 'ppo_$Unused$' plt.plot(times , ddpg_unused, color='red', linestyle='dashed', marker='D' ,label='DDPG_unused') # unused shared plt.plot(times , td3_unused, color='blue', linestyle='--', marker='2' ,label='TD3_unused') # unused shared plt.plot(times , ppo_unsatisfied, color='green', linestyle='dotted', marker='s' ,label='PPO_unsatisfied') # unused shared 'ppo_$Unused$' plt.plot(times , ddpg_unsatisfied, color='pink', linestyle='solid', marker='<' ,label='DDPG_unsatisfied') # unused shared plt.plot(times , td3_unsatisfied, color='brown', linestyle='--', marker='2' ,label='TD3_unsatisfied') # unused shared plt.ylabel('Caching Resources', size= 8 ) #resource plt.xlabel('$'+var+'$', size= 10) #'$'+pdf_plot[para]+'$ ' $'+var+'$ Communication range plt.xticks((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),size = 7) plt.yticks(size = 7) plt.grid() plt.legend()#ncol=1, bbox_to_anchor=(1, 0.5))#c_u plt.grid() plt.savefig('zz_multi_caching_'+var+'_g+o_z.pdf') #abbbs_ b_test_five_'+var+'_plot.pdf #plt.show() print("End") """ """ # 3***reward simple / multi-agent / rsu ***** import matplotlib.pyplot as plt import pickle import numpy as np var = "k" pdf_plot = var # R_c, C_o, C_u, k lstt = [var]#, "C_o", "C_u", "k"] print("okokokok") for pdf_plot in lstt: with open('Reward_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream single = pickle.load(filehandle) single = single[0][:219999] window_width= 100 cumsum_vec = np.cumsum(np.insert(single, 0, 0)) single = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width with open('Reward_multi_agentppo.data', 'rb') as filehandle: # 02_five_fifty_R_c.data # read the data as binary data stream multi =pickle.load(filehandle) multi = multi[0][:219999] window_width= 100 cumsum_vec = np.cumsum(np.insert(multi, 0, 0)) multi = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width with open('Reward_multi_agent_RSUppo.data', 'rb') as filehandle: # 1_ddpg4442C_o # read the data as binary data stream rsu = pickle.load(filehandle) rsu = rsu[0][:219999] window_width= 100 cumsum_vec = np.cumsum(np.insert(multi, 0, 0)) multi = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width plt.plot(single, color='orange', linestyle='dotted', marker='x' ,label='Single_agent') # unused shared 'ppo_$Unused$' plt.plot(multi, color='red', linestyle='dashed', marker='D' ,label='Multi_agent') # unused shared plt.plot(rsu, color='blue', linestyle='--', marker='2' ,label='Multi_agent_RSU') # unused shared plt.ylabel('Reward', size= 8 ) #resource plt.xlabel('Epochs', size= 10) #'$'+pdf_plot[para]+'$ ' $'+var+'$ Communication range #plt.xticks((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),size = 7) plt.yticks(size = 7) plt.grid() plt.legend()#ncol=1, bbox_to_anchor=(1, 0.5))#c_u plt.grid() plt.savefig('zz_reward_all.pdf') #abbbs_ b_test_five_'+var+'_plot.pdf #plt.show() print("End") """ import matplotlib.pyplot as plt import numpy as np import pickle with open('Reward_ppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream single = pickle.load(filehandle) print("LEN SINGLE = ", len(single[0])) single = single[0][:20000] single = [ single[xx] for xx in range(len(single)) if xx%20==0 ] window_width= 100 cumsum_vec = np.cumsum(np.insert(single, 0, 0)) single = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width with open('Reward_multi_agentppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream multi = pickle.load(filehandle) print("LEN multi = ", len(multi[0])) multi = multi[0][:20000] multi = [ multi[xx] for xx in range(len(multi)) if xx%20==0 ] window_width= 100 cumsum_vec = np.cumsum(np.insert(multi, 0, 0)) multi = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width with open('Reward_multi_agent_RSUppo.data', 'rb') as filehandle: # 1_ddpg4442C_o #07_five_rc_all_'+var+'.data' # read the data as binary data stream rsu = pickle.load(filehandle) print("LEN rsu = ", len(rsu[0])) rsu = rsu[0][:20000] rsu = [ rsu[xx] for xx in range(len(rsu)) if xx%20==0 ] window_width= 100 cumsum_vec = np.cumsum(np.insert(rsu, 0, 0)) rsu = (cumsum_vec[window_width:] - cumsum_vec[:-window_width]) / window_width print("LEN SINGLE = ", len(single)) print("LEN multi = ", len(multi)) print("LEN rsu = ", len(rsu)) x = np.arange(len(single)) times = range(len(single)) # plot our data along a line fig,ax = plt.subplots() ax.plot(times, single, '-', color='tab:blue', linestyle='dotted', marker='x' ,label='Single_agent') ax.plot(times, multi, '-', color='tab:orange', linestyle='dashed', marker='D' ,label='Multi_agent') ax.plot(times, rsu, '-', color='tab:red', linestyle='--', marker='2' ,label='Multi_agent_RSU') ax.set_title('') plt.xticks(np.arange(min(x), max(x)+1, 200)) # [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] plt.xlabel('Epochs', size= 10) ax.set_ylabel('Reward') # create a confidence band of +/- 10% error y_lower = [i - 0.1 * i for i in single] y_upper = [i + 0.1 * i for i in single] y_lower_multi = [i - 0.1 * i for i in multi] y_upper_multi = [i + 0.1 * i for i in multi] y_lower_rsu = [i - 0.1 * i for i in rsu] y_upper_rsu= [i + 0.1 * i for i in rsu] # plot our confidence band ax.fill_between(times, y_lower, y_upper, alpha=0.2, color='tab:blue') ax.fill_between(times, y_lower_multi, y_upper_multi, alpha=0.2, color='tab:orange') ax.fill_between(times, y_lower_rsu, y_upper_rsu, alpha=0.2, color='tab:red') print("min = ", min(x)) print("max = ", max(x)) print("len x = ", len(x)) plt.legend() plt.grid() plt.savefig('zz_reward_all.pdf') plt.show() #"""
0.287368
0.479991
import os import shutil SCRIPTDIR = os.path.dirname(os.path.realpath(__file__)) TEMPLATEFILE = os.path.join(SCRIPTDIR, 'Template', 'template.c') TESTSDIR = os.path.join(SCRIPTDIR, 'Tests') function_pointer_list = [] STACK_UNIT_SIZE = 1024 STACK_UNIT_NUM = 3 def generate_functions(filename, includes, mainfunction, variables, targetcall): generated = open(os.path.join(TESTSDIR, filename+'.c'), "a") generated_header = open(os.path.join(TESTSDIR, filename+'.h'), "a") template = open(TEMPLATEFILE, "r") main_added = False vars_added = False # insert includes if exist if len(includes) != 0: for inc in includes.split(';'): generated.write('#include \"' + inc + '\"\n') # insert test main function for line in template: if main_added == True and vars_added == False: if line.strip() == "{": continue # insert variables if exist if len(variables) != 0: for var in variables.split(';'): if len(var.strip(' ')) == 0: continue generated.write("\t"+var.strip(' ')+";\n") generated.write(line) vars_added = True continue if line.strip() == "int stackUsage_Template()": generated_header.write("int "+mainfunction + "();\n") generated.write("int "+mainfunction + "()\n{\n") main_added = True if mainfunction in function_pointer_list: print "Error! This function already exist in the functions vector\n" raise Exception('This function: "'+ mainfunction + '" is already exist in the functions vector') function_pointer_list.append(mainfunction) elif line.strip() == "Template_Func();": generated.write("\t"+targetcall+"\n") else: generated.write(line) generated.write('/***********************************************************************/\n') generated.write("\n\n") generated.write('/***********************************************************************/\n') generated.close() generated_header.close() template.close() def generate_new_source_files(filename): if os.path.exists(TESTSDIR): shutil.rmtree(TESTSDIR) os.mkdir(TESTSDIR) generated_c = open(os.path.join(TESTSDIR, filename+'.c'), "w") generated_h = open(os.path.join(TESTSDIR, filename+'.h'), "w") generated_c.write('/* This file was generated automatically */\n') generated_c.write("#include \""+filename+".h\"\n") generated_c.write("\n\n") generated_c.close() generated_h.write('/**********************************************************************\n') generated_h.write('* This file was generated automatically and it describes the generated\n') generated_h.write('* Source file "'+filename+'.c" which contains stack usage test functions\n') generated_h.write('* for the listed functions in the "TargetFunction.txt" file.\n') generated_h.write('* Please do NOT edit these files (header and source) for requests please\n') generated_h.write('* send an email to PAL team leader: <EMAIL>in<EMAIL> \n') generated_h.write('**********************************************************************/\n\n') generated_h.write("#ifndef _FUNCTIONS_VECTOR_H_\n") generated_h.write("#define _FUNCTIONS_VECTOR_H_\n") generated_h.write("#include <stdlib.h>\n") generated_h.write("#include <stdio.h>\n") generated_h.write("#include <stdbool.h>\n") generated_h.write("#define STACK_UNIT_SIZE "+ str(STACK_UNIT_SIZE) +"\n") generated_h.write("#define STACK_UNIT_NUM "+ str(STACK_UNIT_NUM) +"\n") generated_h.write("char* paintStack();\n\n") generated_h.close() def create_functions_struct(filename): add_comma = False generated_h = open(os.path.join(TESTSDIR, filename+'.h'), "a") generated_h.write("\n\n#define BORDER_ARRAY_SIZE 32\n") generated_h.write("#define FUNC_NUM "+ str(len(function_pointer_list)) +"\n") generated_h.write("#define PATTERN_SIZE 4\n") generated_h.write("#define INIT_VALUE 190\n") generated_h.write("#define MAX_CHAR_VALUE 256\n") generated_h.write("\ntypedef struct{\n\tint (*fp)();\n\tchar name[64];\n}functionNode;\n") generated_h.write("\nstatic char memPattern[PATTERN_SIZE] = {0xDE, 0xAD, 0xFA, 0xCE};\n") generated_h.write("\nstatic functionNode funcArray[FUNC_NUM] = {\n") for func in function_pointer_list: if add_comma: generated_h.write(",\n") generated_h.write('{'+func+",\""+func[len("StackUsage_"):]+"()\"}") add_comma = True generated_h.write("};\n\n") generated_h.write("#endif //_FUNCTIONS_VECTOR_H_\n") generated_h.close() def create_runner_source(vector_filename): runner_c = open(os.path.join(TESTSDIR, 'TestSuiteRunner.c'), "w") runner_c.write('/* This file was generated automatically */\n') runner_c.write("#include \""+vector_filename+".h\"\n\n") runner_c.write("void main()\n{\n") runner_c.write("\tint i = 0;\n") runner_c.write("\tfor (; i < FUNC_NUM ; ++i)\n\t{\n") runner_c.write("\t\tprintf(\"%s function used: %d bytes in the stack\\n\", funcArray[i].name ,funcArray[i].fp());\n\t}\n") runner_c.write("\tprintf(\"Test Finished!\\n\");\n") runner_c.write("}\n") def create_runner_header(): generated_h = open(os.path.join(TESTSDIR, 'TestSuiteRunner.h'), "w") generated_h.write('/**********************************************************************\n') generated_h.write('* This file was generated automatically and it describes the generated\n') generated_h.write('* Source file "TestSuiteRunner.c" which contains the test runner function\n') generated_h.write('* for the listed functions in the "TargetFunction.txt" file.\n') generated_h.write('* Please do NOT edit these files (header and source) for requests please\n') generated_h.write('* send an email to PAL team \n') generated_h.write('**********************************************************************/\n\n') generated_h.write("#ifndef _TES_SUITE_RUNNER_H_\n") generated_h.write("#define _TES_SUITE_RUNNER_H_\n") generated_h.write("\nint TestSuiteRunner();\n\n") generated_h.write("#endif //_TES_SUITE_RUNNER_H_\n") def generate_paint_stack(): runner_c = open(os.path.join(TESTSDIR, 'functionsVector.c'), "a") runner_c.write("#pragma GCC diagnostic push\n") runner_c.write("#pragma GCC diagnostic ignored \"-Wreturn-local-addr\"\n") runner_c.write("/* We can not return the address of the (stackArr) directly, it will be NULL in run-time\n") runner_c.write("* Therefore new pointer (arrayPtr) required to hold the address of the (stackArr)*/\n") runner_c.write("char* paintStack()\n{\n") runner_c.write("\tchar* arrayPtr = NULL;\n") runner_c.write("\tchar stackArr[STACK_UNIT_NUM*STACK_UNIT_SIZE] = {0};\n") runner_c.write("\tint i = 0;\n") runner_c.write("\tfor(i=0; i < STACK_UNIT_NUM*STACK_UNIT_SIZE; ++i)\n\t{\n") runner_c.write("\t\t// Painting the stack with memory pattern (DEADFACE) XORed running index to make the stack more unique\n") runner_c.write("\t\tstackArr[i] = memPattern[i%PATTERN_SIZE] ^ (i%MAX_CHAR_VALUE);\n\t}\n") runner_c.write("\tarrayPtr = stackArr;\n") runner_c.write("\treturn arrayPtr;\n}\n") runner_c.write("#pragma GCC diagnostic pop\n\n") def main(): generate_new_source_files('functionsVector') f = open('TargetFunctions.txt', 'r') for line in f: if line.strip() == 'List Finished!': print "Code generation finished successfully" break if line[0] == "#" or line[0] == "\n": continue print "Reading new line: "+ line list = line.strip().split('$') generate_functions('functionsVector', list[0].strip(' '), list[1].strip(' '), list[2].strip(' '), list[3].strip(' ')) create_functions_struct('functionsVector') create_runner_source('functionsVector') generate_paint_stack() create_runner_header() if __name__ == '__main__': main()
mbed-client-pal/Utils/stackUsage/TestSuite/TestsGenarator.py
import os import shutil SCRIPTDIR = os.path.dirname(os.path.realpath(__file__)) TEMPLATEFILE = os.path.join(SCRIPTDIR, 'Template', 'template.c') TESTSDIR = os.path.join(SCRIPTDIR, 'Tests') function_pointer_list = [] STACK_UNIT_SIZE = 1024 STACK_UNIT_NUM = 3 def generate_functions(filename, includes, mainfunction, variables, targetcall): generated = open(os.path.join(TESTSDIR, filename+'.c'), "a") generated_header = open(os.path.join(TESTSDIR, filename+'.h'), "a") template = open(TEMPLATEFILE, "r") main_added = False vars_added = False # insert includes if exist if len(includes) != 0: for inc in includes.split(';'): generated.write('#include \"' + inc + '\"\n') # insert test main function for line in template: if main_added == True and vars_added == False: if line.strip() == "{": continue # insert variables if exist if len(variables) != 0: for var in variables.split(';'): if len(var.strip(' ')) == 0: continue generated.write("\t"+var.strip(' ')+";\n") generated.write(line) vars_added = True continue if line.strip() == "int stackUsage_Template()": generated_header.write("int "+mainfunction + "();\n") generated.write("int "+mainfunction + "()\n{\n") main_added = True if mainfunction in function_pointer_list: print "Error! This function already exist in the functions vector\n" raise Exception('This function: "'+ mainfunction + '" is already exist in the functions vector') function_pointer_list.append(mainfunction) elif line.strip() == "Template_Func();": generated.write("\t"+targetcall+"\n") else: generated.write(line) generated.write('/***********************************************************************/\n') generated.write("\n\n") generated.write('/***********************************************************************/\n') generated.close() generated_header.close() template.close() def generate_new_source_files(filename): if os.path.exists(TESTSDIR): shutil.rmtree(TESTSDIR) os.mkdir(TESTSDIR) generated_c = open(os.path.join(TESTSDIR, filename+'.c'), "w") generated_h = open(os.path.join(TESTSDIR, filename+'.h'), "w") generated_c.write('/* This file was generated automatically */\n') generated_c.write("#include \""+filename+".h\"\n") generated_c.write("\n\n") generated_c.close() generated_h.write('/**********************************************************************\n') generated_h.write('* This file was generated automatically and it describes the generated\n') generated_h.write('* Source file "'+filename+'.c" which contains stack usage test functions\n') generated_h.write('* for the listed functions in the "TargetFunction.txt" file.\n') generated_h.write('* Please do NOT edit these files (header and source) for requests please\n') generated_h.write('* send an email to PAL team leader: <EMAIL>in<EMAIL> \n') generated_h.write('**********************************************************************/\n\n') generated_h.write("#ifndef _FUNCTIONS_VECTOR_H_\n") generated_h.write("#define _FUNCTIONS_VECTOR_H_\n") generated_h.write("#include <stdlib.h>\n") generated_h.write("#include <stdio.h>\n") generated_h.write("#include <stdbool.h>\n") generated_h.write("#define STACK_UNIT_SIZE "+ str(STACK_UNIT_SIZE) +"\n") generated_h.write("#define STACK_UNIT_NUM "+ str(STACK_UNIT_NUM) +"\n") generated_h.write("char* paintStack();\n\n") generated_h.close() def create_functions_struct(filename): add_comma = False generated_h = open(os.path.join(TESTSDIR, filename+'.h'), "a") generated_h.write("\n\n#define BORDER_ARRAY_SIZE 32\n") generated_h.write("#define FUNC_NUM "+ str(len(function_pointer_list)) +"\n") generated_h.write("#define PATTERN_SIZE 4\n") generated_h.write("#define INIT_VALUE 190\n") generated_h.write("#define MAX_CHAR_VALUE 256\n") generated_h.write("\ntypedef struct{\n\tint (*fp)();\n\tchar name[64];\n}functionNode;\n") generated_h.write("\nstatic char memPattern[PATTERN_SIZE] = {0xDE, 0xAD, 0xFA, 0xCE};\n") generated_h.write("\nstatic functionNode funcArray[FUNC_NUM] = {\n") for func in function_pointer_list: if add_comma: generated_h.write(",\n") generated_h.write('{'+func+",\""+func[len("StackUsage_"):]+"()\"}") add_comma = True generated_h.write("};\n\n") generated_h.write("#endif //_FUNCTIONS_VECTOR_H_\n") generated_h.close() def create_runner_source(vector_filename): runner_c = open(os.path.join(TESTSDIR, 'TestSuiteRunner.c'), "w") runner_c.write('/* This file was generated automatically */\n') runner_c.write("#include \""+vector_filename+".h\"\n\n") runner_c.write("void main()\n{\n") runner_c.write("\tint i = 0;\n") runner_c.write("\tfor (; i < FUNC_NUM ; ++i)\n\t{\n") runner_c.write("\t\tprintf(\"%s function used: %d bytes in the stack\\n\", funcArray[i].name ,funcArray[i].fp());\n\t}\n") runner_c.write("\tprintf(\"Test Finished!\\n\");\n") runner_c.write("}\n") def create_runner_header(): generated_h = open(os.path.join(TESTSDIR, 'TestSuiteRunner.h'), "w") generated_h.write('/**********************************************************************\n') generated_h.write('* This file was generated automatically and it describes the generated\n') generated_h.write('* Source file "TestSuiteRunner.c" which contains the test runner function\n') generated_h.write('* for the listed functions in the "TargetFunction.txt" file.\n') generated_h.write('* Please do NOT edit these files (header and source) for requests please\n') generated_h.write('* send an email to PAL team \n') generated_h.write('**********************************************************************/\n\n') generated_h.write("#ifndef _TES_SUITE_RUNNER_H_\n") generated_h.write("#define _TES_SUITE_RUNNER_H_\n") generated_h.write("\nint TestSuiteRunner();\n\n") generated_h.write("#endif //_TES_SUITE_RUNNER_H_\n") def generate_paint_stack(): runner_c = open(os.path.join(TESTSDIR, 'functionsVector.c'), "a") runner_c.write("#pragma GCC diagnostic push\n") runner_c.write("#pragma GCC diagnostic ignored \"-Wreturn-local-addr\"\n") runner_c.write("/* We can not return the address of the (stackArr) directly, it will be NULL in run-time\n") runner_c.write("* Therefore new pointer (arrayPtr) required to hold the address of the (stackArr)*/\n") runner_c.write("char* paintStack()\n{\n") runner_c.write("\tchar* arrayPtr = NULL;\n") runner_c.write("\tchar stackArr[STACK_UNIT_NUM*STACK_UNIT_SIZE] = {0};\n") runner_c.write("\tint i = 0;\n") runner_c.write("\tfor(i=0; i < STACK_UNIT_NUM*STACK_UNIT_SIZE; ++i)\n\t{\n") runner_c.write("\t\t// Painting the stack with memory pattern (DEADFACE) XORed running index to make the stack more unique\n") runner_c.write("\t\tstackArr[i] = memPattern[i%PATTERN_SIZE] ^ (i%MAX_CHAR_VALUE);\n\t}\n") runner_c.write("\tarrayPtr = stackArr;\n") runner_c.write("\treturn arrayPtr;\n}\n") runner_c.write("#pragma GCC diagnostic pop\n\n") def main(): generate_new_source_files('functionsVector') f = open('TargetFunctions.txt', 'r') for line in f: if line.strip() == 'List Finished!': print "Code generation finished successfully" break if line[0] == "#" or line[0] == "\n": continue print "Reading new line: "+ line list = line.strip().split('$') generate_functions('functionsVector', list[0].strip(' '), list[1].strip(' '), list[2].strip(' '), list[3].strip(' ')) create_functions_struct('functionsVector') create_runner_source('functionsVector') generate_paint_stack() create_runner_header() if __name__ == '__main__': main()
0.121829
0.05902
import time from decimal import Decimal to_year = time.strftime("%Y", time.localtime()) # 教师总表转dict(管理员查看) def teacher_workload_to_dict(item, year=to_year, confirm_status=''): lic = [] total_money = 0 temp = {} scientific_money = 0 for i in item: if confirm_status == '': if i.teaching_workload_user: for a in i.teaching_workload_user: if a.year == year: total_money += a.total_money temp.update({ 'id': i.id, 'username': i.name, 'workNumber': i.work_number, 'jobCatecory': i.job_catecory, 'teacherTitle': i.teacher_title, 'teachingWorkloadTotalMoney': a.total_money, 'confirmStatus': a.confirm_status }) if i.scientific_workload_user: for a in i.scientific_workload_user: if a.year == year: total_money += a.scientific_money scientific_money += a.scientific_money temp.update({'scientificWorkloadTotalMoney': scientific_money}) if i.others_workload_user: for a in i.others_workload_user: if a.year == year: total_money += a.total_money temp.update({'othersWorkloadTotalMoney': a.total_money}) if i.counselors_workload_user: for a in i.counselors_workload_user: if a.year == year: total_money += a.total_money temp.update({'counselorsWorkloadTotalMoney': a.total_money}) temp.update({'totalMoney': total_money}) lic.append(temp) total_money = 0 temp = {} scientific_money = 0 else: if i.teaching_workload_user: for a in i.teaching_workload_user: if a.year == year and a.confirm_status == confirm_status: total_money += a.total_money temp.update({ 'id': i.id, 'username': i.name, 'workNumber': i.work_number, 'jobCatecory': i.job_catecory, 'teacherTitle': i.teacher_title, 'teachingWorkloadTotalMoney': a.total_money, 'confirmStatus': a.confirm_status }) if i.scientific_workload_user: for a in i.scientific_workload_user: if a.year == year: total_money += a.scientific_money scientific_money += a.scientific_money temp.update({'scientificWorkloadTotalMoney': scientific_money}) if i.others_workload_user: for a in i.others_workload_user: if a.year == year: total_money += a.total_money temp.update({'othersWorkloadTotalMoney': a.total_money}) temp.update({'totalMoney': total_money}) lic.append(temp) total_money = 0 temp = {} scientific_money = 0 return lic # 工作量参数转dict def workload_options_to_dict(item): lic = [] for i in item: lic.append( { 'id': i.id, 'key': i.key_name, 'score': i.score, 'display_name': i.workload_name } ) return lic # 教学工作量转dict def teaching_workload_to_dict(item): lic = [] for i in item: lic.append( { 'id': i.id, 'teachingWorkload': i.teaching_workload, 'teachingQualifiedWorkload': i.teaching_qualified_workload, 'teachingExcellentWorkload': i.teaching_excellent_workload, 'teachingBeyondWorkload': i.teaching_beyond_workload, 'teachingBeyondWorkloadNum': i.teaching_beyond_workload_num, 'teachingBeyondWorkloadMoney': i.teaching_beyond_workload_money, 'userWorkload': i.user_workload, 'totalMoney': i.total_money, 'year': i.year, 'confirmStatus': i.confirm_status } ) return lic # 系数表转dict def coefficient_to_dict(item): lic = [] for i in item: lic.append( { 'id': i.id, 'name': i.name, 'coefficient': i.coefficient } ) return lic # float转decimal def float_to_decimal(data): return Decimal.from_float(data).quantize(Decimal('0.00')) # 用户信息转dict def user_to_dict(item): lic = [] for i in item: lic.append( { 'id': i.id, 'name': i.name, 'work_number': i.work_number, 'job_catecory': i.job_catecory, 'teacher_title': i.teacher_title, 'teacher_title_num': i.teacher_title_num, 'teacher_postion': i.teacher_postion, 'teacher_postion_num': i.teacher_postion_num, 'notes': i.notes, } )
app/utils/utils.py
import time from decimal import Decimal to_year = time.strftime("%Y", time.localtime()) # 教师总表转dict(管理员查看) def teacher_workload_to_dict(item, year=to_year, confirm_status=''): lic = [] total_money = 0 temp = {} scientific_money = 0 for i in item: if confirm_status == '': if i.teaching_workload_user: for a in i.teaching_workload_user: if a.year == year: total_money += a.total_money temp.update({ 'id': i.id, 'username': i.name, 'workNumber': i.work_number, 'jobCatecory': i.job_catecory, 'teacherTitle': i.teacher_title, 'teachingWorkloadTotalMoney': a.total_money, 'confirmStatus': a.confirm_status }) if i.scientific_workload_user: for a in i.scientific_workload_user: if a.year == year: total_money += a.scientific_money scientific_money += a.scientific_money temp.update({'scientificWorkloadTotalMoney': scientific_money}) if i.others_workload_user: for a in i.others_workload_user: if a.year == year: total_money += a.total_money temp.update({'othersWorkloadTotalMoney': a.total_money}) if i.counselors_workload_user: for a in i.counselors_workload_user: if a.year == year: total_money += a.total_money temp.update({'counselorsWorkloadTotalMoney': a.total_money}) temp.update({'totalMoney': total_money}) lic.append(temp) total_money = 0 temp = {} scientific_money = 0 else: if i.teaching_workload_user: for a in i.teaching_workload_user: if a.year == year and a.confirm_status == confirm_status: total_money += a.total_money temp.update({ 'id': i.id, 'username': i.name, 'workNumber': i.work_number, 'jobCatecory': i.job_catecory, 'teacherTitle': i.teacher_title, 'teachingWorkloadTotalMoney': a.total_money, 'confirmStatus': a.confirm_status }) if i.scientific_workload_user: for a in i.scientific_workload_user: if a.year == year: total_money += a.scientific_money scientific_money += a.scientific_money temp.update({'scientificWorkloadTotalMoney': scientific_money}) if i.others_workload_user: for a in i.others_workload_user: if a.year == year: total_money += a.total_money temp.update({'othersWorkloadTotalMoney': a.total_money}) temp.update({'totalMoney': total_money}) lic.append(temp) total_money = 0 temp = {} scientific_money = 0 return lic # 工作量参数转dict def workload_options_to_dict(item): lic = [] for i in item: lic.append( { 'id': i.id, 'key': i.key_name, 'score': i.score, 'display_name': i.workload_name } ) return lic # 教学工作量转dict def teaching_workload_to_dict(item): lic = [] for i in item: lic.append( { 'id': i.id, 'teachingWorkload': i.teaching_workload, 'teachingQualifiedWorkload': i.teaching_qualified_workload, 'teachingExcellentWorkload': i.teaching_excellent_workload, 'teachingBeyondWorkload': i.teaching_beyond_workload, 'teachingBeyondWorkloadNum': i.teaching_beyond_workload_num, 'teachingBeyondWorkloadMoney': i.teaching_beyond_workload_money, 'userWorkload': i.user_workload, 'totalMoney': i.total_money, 'year': i.year, 'confirmStatus': i.confirm_status } ) return lic # 系数表转dict def coefficient_to_dict(item): lic = [] for i in item: lic.append( { 'id': i.id, 'name': i.name, 'coefficient': i.coefficient } ) return lic # float转decimal def float_to_decimal(data): return Decimal.from_float(data).quantize(Decimal('0.00')) # 用户信息转dict def user_to_dict(item): lic = [] for i in item: lic.append( { 'id': i.id, 'name': i.name, 'work_number': i.work_number, 'job_catecory': i.job_catecory, 'teacher_title': i.teacher_title, 'teacher_title_num': i.teacher_title_num, 'teacher_postion': i.teacher_postion, 'teacher_postion_num': i.teacher_postion_num, 'notes': i.notes, } )
0.180359
0.203411
class ThoughtSpotTable(object): """ A class that represents a ThoughtSpot table. ... Attributes ---------- logger: an instance of the Python logging class The logging class used by the application. There may be more pythonic ways to handle. TBD database: python list Used to consolidate all the stdout messages schema: python list Used to consolidate all the stderr messages table_name: str Used to pass the Thoughtspot table name columns: python dictionary A python dictionary of the column names and types for the table primary_keys: str A comma delimited string of primary keys for the table foreign_keys: python Series A series of foreign keys and the appropriate metadata for the alter table commands application Methods ------- __init__table() Instantiates the creation of the ThoughtSpot specific table metadata get_schema(name, column_df) Creates the DDL for the create table statement that is specific to ThoughtSpot add_constraints() Creates the DDL for the alter table statements that are specific to ThoughtSpot """ def __init__(self, logger, database, schema, table_name, columns, primary_keys, foreign_keys, partition_keys, hash_number): """ :param logger: An instance of the python Logger class :param database: A string that represents the name of the ThoughtSpot database :param schema: A string that represents the schema used for the ThoughtSpot database :param table_name: A string that represents the table to be created :param columns: A python dictionary with the Column Name and ThoughtSpot column type for the DDL :param primary_keys: A comma separated string that represents the primary keys. :param foreign_keys: A series of foreign keys and the appropriate metadata for the alter table commands :param partition_keys: A series of columns and the appropriate metadata for the partition statement :param hash_number: The number of shards for the table """ self.logger = logger self.database = database self.schema = schema self.table_name = table_name self.columns = columns self.primary_key = primary_keys self.foreign_keys = foreign_keys self.partition_keys = partition_keys self.hash_number = hash_number self.ddl_string = "" self.alter_statements = "" self.__init__table() def __repr__(self): return self.ddl_string def __init__table(self): """ controller method to create DDL for the drop and create statement that meets the tql specification :return: Instantiated ThoughtSpot table Object """ self.ddl_string = "" self.alter_statements = "" self.ddl_string += ("\nUSE \"%s\";\n" % self.database) if self.schema != "falcon_default_schema": self.ddl_string += ("CREATE SCHEMA \"%s\";\n" % self.schema) self.table_name = ('\"%s\".\"%s\"' % (self.schema, self.table_name)) self.ddl_string += "\nDROP TABLE %s;\n" % self.table_name self.ddl_string += ("\n%s" % self.get_schema(self.table_name, self.columns)) if self.primary_key is not None: self.ddl_string += ("CONSTRAINT PRIMARY KEY (\"%s\")\n" % self.primary_key) else: self.ddl_string = self.ddl_string[:-4] self.ddl_string = self.ddl_string + ")\n" self.logger.info(self.ddl_string) if self.hash_number > 1: self.ddl_string += ("PARTITION BY HASH (%d)\n " % self.hash_number) if self.partition_keys is not None: self.ddl_string += ("KEY (\"%s\")\n" % self.partition_keys) # else: # self.ddl_string = self.ddl_string[:-4] self.ddl_string = self.ddl_string + ";\n" self.logger.info(self.ddl_string) self.add_constraints() @staticmethod def get_schema(name, columns_series): """ Specifically creates the create table section of the DDL :param name: The name of the table :param columns_series: A series of columns and corresponding ThoughtSpot column types :return: Create table portion of the ddl """ columns = "" for index, row in columns_series.iterrows(): columns += '"{}" {},\n '.format(row['Column'], row['Column_Type']) template_create = """CREATE TABLE %(name)s ( %(columns)s""" create_ddl = template_create % {'name': name, 'columns': columns} return create_ddl def add_constraints(self): """ Formats the Alter Table DDL statements that meet tql specification :return: NA """ if len(self.foreign_keys) > 0: self.alter_statements += ("\nUSE \"%s\";\n" % self.database) foreign_keys_grouped = self.foreign_keys.groupby('FK_NAME') for FK_NAME, foreign_keys in foreign_keys_grouped: table_column_str = '", "'.join(str(i) for i in foreign_keys.COLUMN_NAME) foreign_column_str = '", "'.join(str(i) for i in foreign_keys.FOREIGN_KEY) foreign_table = foreign_keys['FOREIGN_TABLE'].iloc[0] self.alter_statements += ("\nALTER TABLE \"%s\".\"%s\"\n" % (self.schema, self.table_name)) self.alter_statements += (" ADD CONSTRAINT \"%s\" FOREIGN KEY (\"%s\")\n" % (FK_NAME, table_column_str)) self.alter_statements += (" REFERENCES \"%s\".\"%s\" (\"%s\");\n" % (self.schema, foreign_table, foreign_column_str)) self.logger.debug(self.alter_statements)
Alteryx_Bulk_Loader/Code/tsLoad/ts_table.py
class ThoughtSpotTable(object): """ A class that represents a ThoughtSpot table. ... Attributes ---------- logger: an instance of the Python logging class The logging class used by the application. There may be more pythonic ways to handle. TBD database: python list Used to consolidate all the stdout messages schema: python list Used to consolidate all the stderr messages table_name: str Used to pass the Thoughtspot table name columns: python dictionary A python dictionary of the column names and types for the table primary_keys: str A comma delimited string of primary keys for the table foreign_keys: python Series A series of foreign keys and the appropriate metadata for the alter table commands application Methods ------- __init__table() Instantiates the creation of the ThoughtSpot specific table metadata get_schema(name, column_df) Creates the DDL for the create table statement that is specific to ThoughtSpot add_constraints() Creates the DDL for the alter table statements that are specific to ThoughtSpot """ def __init__(self, logger, database, schema, table_name, columns, primary_keys, foreign_keys, partition_keys, hash_number): """ :param logger: An instance of the python Logger class :param database: A string that represents the name of the ThoughtSpot database :param schema: A string that represents the schema used for the ThoughtSpot database :param table_name: A string that represents the table to be created :param columns: A python dictionary with the Column Name and ThoughtSpot column type for the DDL :param primary_keys: A comma separated string that represents the primary keys. :param foreign_keys: A series of foreign keys and the appropriate metadata for the alter table commands :param partition_keys: A series of columns and the appropriate metadata for the partition statement :param hash_number: The number of shards for the table """ self.logger = logger self.database = database self.schema = schema self.table_name = table_name self.columns = columns self.primary_key = primary_keys self.foreign_keys = foreign_keys self.partition_keys = partition_keys self.hash_number = hash_number self.ddl_string = "" self.alter_statements = "" self.__init__table() def __repr__(self): return self.ddl_string def __init__table(self): """ controller method to create DDL for the drop and create statement that meets the tql specification :return: Instantiated ThoughtSpot table Object """ self.ddl_string = "" self.alter_statements = "" self.ddl_string += ("\nUSE \"%s\";\n" % self.database) if self.schema != "falcon_default_schema": self.ddl_string += ("CREATE SCHEMA \"%s\";\n" % self.schema) self.table_name = ('\"%s\".\"%s\"' % (self.schema, self.table_name)) self.ddl_string += "\nDROP TABLE %s;\n" % self.table_name self.ddl_string += ("\n%s" % self.get_schema(self.table_name, self.columns)) if self.primary_key is not None: self.ddl_string += ("CONSTRAINT PRIMARY KEY (\"%s\")\n" % self.primary_key) else: self.ddl_string = self.ddl_string[:-4] self.ddl_string = self.ddl_string + ")\n" self.logger.info(self.ddl_string) if self.hash_number > 1: self.ddl_string += ("PARTITION BY HASH (%d)\n " % self.hash_number) if self.partition_keys is not None: self.ddl_string += ("KEY (\"%s\")\n" % self.partition_keys) # else: # self.ddl_string = self.ddl_string[:-4] self.ddl_string = self.ddl_string + ";\n" self.logger.info(self.ddl_string) self.add_constraints() @staticmethod def get_schema(name, columns_series): """ Specifically creates the create table section of the DDL :param name: The name of the table :param columns_series: A series of columns and corresponding ThoughtSpot column types :return: Create table portion of the ddl """ columns = "" for index, row in columns_series.iterrows(): columns += '"{}" {},\n '.format(row['Column'], row['Column_Type']) template_create = """CREATE TABLE %(name)s ( %(columns)s""" create_ddl = template_create % {'name': name, 'columns': columns} return create_ddl def add_constraints(self): """ Formats the Alter Table DDL statements that meet tql specification :return: NA """ if len(self.foreign_keys) > 0: self.alter_statements += ("\nUSE \"%s\";\n" % self.database) foreign_keys_grouped = self.foreign_keys.groupby('FK_NAME') for FK_NAME, foreign_keys in foreign_keys_grouped: table_column_str = '", "'.join(str(i) for i in foreign_keys.COLUMN_NAME) foreign_column_str = '", "'.join(str(i) for i in foreign_keys.FOREIGN_KEY) foreign_table = foreign_keys['FOREIGN_TABLE'].iloc[0] self.alter_statements += ("\nALTER TABLE \"%s\".\"%s\"\n" % (self.schema, self.table_name)) self.alter_statements += (" ADD CONSTRAINT \"%s\" FOREIGN KEY (\"%s\")\n" % (FK_NAME, table_column_str)) self.alter_statements += (" REFERENCES \"%s\".\"%s\" (\"%s\");\n" % (self.schema, foreign_table, foreign_column_str)) self.logger.debug(self.alter_statements)
0.75037
0.307631
import openpyxl import pprint import itertools from Emotions import * from Xlsx_operation import * from Parsing import * # xlsxファイル読み取り wb = openpyxl.load_workbook('origin_dic_emo.xlsx') sheet = wb['感情分類'] sheet1 = wb['作業者A'] sheet2 = wb['作業者B'] sheet3 = wb['作業者C'] def get_value_list(t_2d): return([[cell.value for cell in row] for row in t_2d]) def get_list_2d(sheet, start_row, end_row, start_col, end_col): return get_value_list(sheet.iter_rows(min_row=start_row, max_row=end_row, min_col=start_col, max_col=end_col)) l_2d1 = get_list_2d(sheet, 2, 50, 1, 2) # モデル読み込み model = gensim.models.KeyedVectors.load_word2vec_format('model.vec', binary=False) #「非常にポジティブな単語」と「非常にネガティブな単語」を任意で指定 '''happy = ['喜び'], angle = ['怒り'], sad = ['悲しい'], joy = ['楽しい']''' happy = [] angle = [] sad = [] joy = [] for i in l_2d1: if i[1] == '喜': happy.append(i[0]) elif i[1] == '怒': angle.append(i[0]) elif i[1] == '哀': sad.append(i[0]) elif i[1] == '楽': joy.append(i[0]) x = '' setup = Emotion(happy, angle, sad, joy, model) ### ___名詞用xlsx読み込み処理 xe = XlsxExist() # elsxファイルがあるか確認 xe.xlsxCheck() xwr = XlsxWR() # 名詞用elsxファイルの読み書き loaddata = xwr.xlsxRead() # 過去の名詞感情データ読み取り print(loaddata) worddic = {} # 辞書作成 loaddataを入れる # worddic辞書の形で既存の情報を読みとり for l in loaddata: # key = 名詞 value = [NOUN, ポジネガの値] if None not in l: worddic[l[0]] = l[1:] print('worddicの過去名詞感情データ') print(worddic) ### _GiNZAを使って構文解析する ps = Parsing(spacy.load('ja_ginza'), setup) # メイン処理 print('終了する際は end と入力してください\n') while True: x = input() text = ps.word_pars(x,worddic) if x == 'end': break # xlsxfileのリストに代入処理 ------------------------------- # worddicに入っているxlsxの内容と入力された単語の感情情報を付加したものを比較して更新 text = list(itertools.chain.from_iterable(text)) # textの返り値 # [[None, None, None], ['店', 'NOUN', -1], ['ごはん', 'NOUN', -1]] や [['Linux', 'NOUN', -1], ['Windows', 'NOUN', 0], ['ゴミ', 'NOUN', 0]] # 同じ言葉が二重に入っていた際に最後の値を残す for x,j in zip(text[:-1], text[1:]): if x[0] == j[0]: text.remove(x) # worddicに入力されたtextを代入、値が変わっていれば更新 for w in text: if w[0] in worddic: pos_neg = worddic[w[0]][-1] + w[-1] if pos_neg < 0: pos_neg = -1 elif pos_neg > 0: pos_neg = 1 else: pos_neg = 0 worddic[w[0]] = [w[1],pos_neg] else: worddic[w[0]] = w[1:] print('最新辞書情報') print(worddic) # xlsxファイルに書き込むためリストに変更 wordall= [] for k, v in worddic.items(): wordall.append([k,v[0],v[1]]) print(wordall) xwr.xlsxWrite(wordall) # 名詞感情データ保存 xwr.xlsxWrite(worddic) print('名詞感情データに保存しました。')
index.py
import openpyxl import pprint import itertools from Emotions import * from Xlsx_operation import * from Parsing import * # xlsxファイル読み取り wb = openpyxl.load_workbook('origin_dic_emo.xlsx') sheet = wb['感情分類'] sheet1 = wb['作業者A'] sheet2 = wb['作業者B'] sheet3 = wb['作業者C'] def get_value_list(t_2d): return([[cell.value for cell in row] for row in t_2d]) def get_list_2d(sheet, start_row, end_row, start_col, end_col): return get_value_list(sheet.iter_rows(min_row=start_row, max_row=end_row, min_col=start_col, max_col=end_col)) l_2d1 = get_list_2d(sheet, 2, 50, 1, 2) # モデル読み込み model = gensim.models.KeyedVectors.load_word2vec_format('model.vec', binary=False) #「非常にポジティブな単語」と「非常にネガティブな単語」を任意で指定 '''happy = ['喜び'], angle = ['怒り'], sad = ['悲しい'], joy = ['楽しい']''' happy = [] angle = [] sad = [] joy = [] for i in l_2d1: if i[1] == '喜': happy.append(i[0]) elif i[1] == '怒': angle.append(i[0]) elif i[1] == '哀': sad.append(i[0]) elif i[1] == '楽': joy.append(i[0]) x = '' setup = Emotion(happy, angle, sad, joy, model) ### ___名詞用xlsx読み込み処理 xe = XlsxExist() # elsxファイルがあるか確認 xe.xlsxCheck() xwr = XlsxWR() # 名詞用elsxファイルの読み書き loaddata = xwr.xlsxRead() # 過去の名詞感情データ読み取り print(loaddata) worddic = {} # 辞書作成 loaddataを入れる # worddic辞書の形で既存の情報を読みとり for l in loaddata: # key = 名詞 value = [NOUN, ポジネガの値] if None not in l: worddic[l[0]] = l[1:] print('worddicの過去名詞感情データ') print(worddic) ### _GiNZAを使って構文解析する ps = Parsing(spacy.load('ja_ginza'), setup) # メイン処理 print('終了する際は end と入力してください\n') while True: x = input() text = ps.word_pars(x,worddic) if x == 'end': break # xlsxfileのリストに代入処理 ------------------------------- # worddicに入っているxlsxの内容と入力された単語の感情情報を付加したものを比較して更新 text = list(itertools.chain.from_iterable(text)) # textの返り値 # [[None, None, None], ['店', 'NOUN', -1], ['ごはん', 'NOUN', -1]] や [['Linux', 'NOUN', -1], ['Windows', 'NOUN', 0], ['ゴミ', 'NOUN', 0]] # 同じ言葉が二重に入っていた際に最後の値を残す for x,j in zip(text[:-1], text[1:]): if x[0] == j[0]: text.remove(x) # worddicに入力されたtextを代入、値が変わっていれば更新 for w in text: if w[0] in worddic: pos_neg = worddic[w[0]][-1] + w[-1] if pos_neg < 0: pos_neg = -1 elif pos_neg > 0: pos_neg = 1 else: pos_neg = 0 worddic[w[0]] = [w[1],pos_neg] else: worddic[w[0]] = w[1:] print('最新辞書情報') print(worddic) # xlsxファイルに書き込むためリストに変更 wordall= [] for k, v in worddic.items(): wordall.append([k,v[0],v[1]]) print(wordall) xwr.xlsxWrite(wordall) # 名詞感情データ保存 xwr.xlsxWrite(worddic) print('名詞感情データに保存しました。')
0.130175
0.322366
from itertools import combinations rows = 'ABCDEFGHI' cols = '123456789' digits = '123456789' assignments = [] def assign_value(values, box, value): """ Please use this function to update your values dictionary! Assigns a value to a given box. If it updates the board record it. """ values[box] = value if len(value) == 1: assignments.append(values.copy()) return values def cross(A, B): "Cross product of elements in A and elements in B." return [s + t for s in A for t in B] boxes = cross(rows, cols) row_units = [cross(r, cols) for r in rows] column_units = [cross(rows, c) for c in cols] square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')] # Two additional diagonal units diagonal_units = [list(map(''.join, zip(rows, cols)))] + [list(map(''.join, zip(rows, cols[::-1])))] unitlist = row_units + column_units + square_units + diagonal_units units = dict((s, [u for u in unitlist if s in u]) for s in boxes) peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes) def grid_values(grid): """ Convert grid into a dict of {square: char} with '123456789' for empties. Args: grid(string) - A grid in string form. Returns: A grid in dictionary form Keys: The boxes, e.g., 'A1' Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'. """ assert(len(grid) == len(rows) * len(cols)) return dict(zip(boxes, [c if c != '.' else '123456789' for c in grid])) def display(values): """ Display the values as a 2-D grid. Args: values(dict): The sudoku in dictionary form """ width = 1 + max(len(values[s]) for s in boxes) line = '+'.join(['-' * (width * 3)] * 3) for r in rows: print(''.join(values[r + c].center(width) + ('|' if c in '36' else '') for c in cols)) if r in 'CF': print(line) print def eliminate(values): """Eliminate values from peers of each box with a single value. Go through all the boxes, and whenever there is a box with a single value, eliminate this value from the set of values of all its peers. Args: values: Sudoku in dictionary form. Returns: Resulting Sudoku in dictionary form after eliminating values. """ solved_values = [box for box in values.keys() if len(values[box]) == 1] for box in solved_values: digit = values[box] for peer in peers[box]: assign_value(values, peer, values[peer].replace(digit, '')) return values def only_choice(values): """Finalize all values that are the only choice for a unit. Go through all the units, and whenever there is a unit with a value that only fits in one box, assign the value to this box. Input: Sudoku in dictionary form. Output: Resulting Sudoku in dictionary form after filling in only choices. """ for unit in unitlist: for digit in '123456789': dplaces = [box for box in unit if digit in values[box]] if len(dplaces) == 1: assign_value(values, dplaces[0], digit) return values def naked_twins(values): """Eliminate values using the naked twins strategy. Args: values(dict): a dictionary of the form {'box_name': '123456789', ...} Returns: the values dictionary with the naked twins eliminated from peers. """ # Get all pairs of 2 digits. Note, combinations are always sorted allpairs = [d[0] + d[1] for d in combinations(digits, 2)] for unit in unitlist: # Create a dictionary of (digits_pair, count) d = {pair : 0 for pair in allpairs} for box in unit: if len(values[box]) == 2: d[values[box]] += 1 # Combine found twins in a single string whatreplace = '' for s in allpairs: if d[s] > 1: whatreplace += s if whatreplace: # Create a translation table transtable = str.maketrans(whatreplace, ' ' * len(whatreplace)) # Eliminate twins digits using the translation table for box in unit: if len(values[box]) > 2: assign_value(values, box, values[box].translate(transtable).replace(' ', '')) return values def reduce_puzzle(values): """ Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False. If the sudoku is solved, return the sudoku. If after an iteration of both functions, the sudoku remains the same, return the sudoku. Input: A sudoku in dictionary form. Output: The resulting sudoku in dictionary form. """ solved_values = [box for box in values.keys() if len(values[box]) == 1] stalled = False while not stalled: solved_values_before = len([box for box in values.keys() if len(values[box]) == 1]) values = eliminate(values) values = only_choice(values) values = naked_twins(values) solved_values_after = len([box for box in values.keys() if len(values[box]) == 1]) stalled = solved_values_before == solved_values_after if len([box for box in values.keys() if len(values[box]) == 0]): return False return values def search(values): values = reduce_puzzle(values) if not values: return False # Choose one of the unfilled squares with the fewest possibilities # The Key function returns 10 for single digit boxes so they cannot be # returned if there are any unfilled squares min_key = min(values, key=lambda k: len(values[k]) if len(values[k]) > 1 else 10) if len(values[min_key]) == 1: # We found a solution! return values else: digits = list(values[min_key]) for d in digits: assign_value(values, min_key, d) # Recursively call search and with a new copy of the values solved_values = search(values.copy()) if solved_values: return solved_values return False def solve(grid): """ Find the solution to a Sudoku grid. Args: grid(string): a string representing a sudoku grid. Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3' Returns: The dictionary representation of the final sudoku grid. False if no solution exists. """ return search(grid_values(grid)) if __name__ == '__main__': diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3' display(solve(diag_sudoku_grid)) try: from visualize import visualize_assignments visualize_assignments(assignments) except SystemExit: pass except: print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
AIND-Sudoku/solution.py
from itertools import combinations rows = 'ABCDEFGHI' cols = '123456789' digits = '123456789' assignments = [] def assign_value(values, box, value): """ Please use this function to update your values dictionary! Assigns a value to a given box. If it updates the board record it. """ values[box] = value if len(value) == 1: assignments.append(values.copy()) return values def cross(A, B): "Cross product of elements in A and elements in B." return [s + t for s in A for t in B] boxes = cross(rows, cols) row_units = [cross(r, cols) for r in rows] column_units = [cross(rows, c) for c in cols] square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')] # Two additional diagonal units diagonal_units = [list(map(''.join, zip(rows, cols)))] + [list(map(''.join, zip(rows, cols[::-1])))] unitlist = row_units + column_units + square_units + diagonal_units units = dict((s, [u for u in unitlist if s in u]) for s in boxes) peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes) def grid_values(grid): """ Convert grid into a dict of {square: char} with '123456789' for empties. Args: grid(string) - A grid in string form. Returns: A grid in dictionary form Keys: The boxes, e.g., 'A1' Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'. """ assert(len(grid) == len(rows) * len(cols)) return dict(zip(boxes, [c if c != '.' else '123456789' for c in grid])) def display(values): """ Display the values as a 2-D grid. Args: values(dict): The sudoku in dictionary form """ width = 1 + max(len(values[s]) for s in boxes) line = '+'.join(['-' * (width * 3)] * 3) for r in rows: print(''.join(values[r + c].center(width) + ('|' if c in '36' else '') for c in cols)) if r in 'CF': print(line) print def eliminate(values): """Eliminate values from peers of each box with a single value. Go through all the boxes, and whenever there is a box with a single value, eliminate this value from the set of values of all its peers. Args: values: Sudoku in dictionary form. Returns: Resulting Sudoku in dictionary form after eliminating values. """ solved_values = [box for box in values.keys() if len(values[box]) == 1] for box in solved_values: digit = values[box] for peer in peers[box]: assign_value(values, peer, values[peer].replace(digit, '')) return values def only_choice(values): """Finalize all values that are the only choice for a unit. Go through all the units, and whenever there is a unit with a value that only fits in one box, assign the value to this box. Input: Sudoku in dictionary form. Output: Resulting Sudoku in dictionary form after filling in only choices. """ for unit in unitlist: for digit in '123456789': dplaces = [box for box in unit if digit in values[box]] if len(dplaces) == 1: assign_value(values, dplaces[0], digit) return values def naked_twins(values): """Eliminate values using the naked twins strategy. Args: values(dict): a dictionary of the form {'box_name': '123456789', ...} Returns: the values dictionary with the naked twins eliminated from peers. """ # Get all pairs of 2 digits. Note, combinations are always sorted allpairs = [d[0] + d[1] for d in combinations(digits, 2)] for unit in unitlist: # Create a dictionary of (digits_pair, count) d = {pair : 0 for pair in allpairs} for box in unit: if len(values[box]) == 2: d[values[box]] += 1 # Combine found twins in a single string whatreplace = '' for s in allpairs: if d[s] > 1: whatreplace += s if whatreplace: # Create a translation table transtable = str.maketrans(whatreplace, ' ' * len(whatreplace)) # Eliminate twins digits using the translation table for box in unit: if len(values[box]) > 2: assign_value(values, box, values[box].translate(transtable).replace(' ', '')) return values def reduce_puzzle(values): """ Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False. If the sudoku is solved, return the sudoku. If after an iteration of both functions, the sudoku remains the same, return the sudoku. Input: A sudoku in dictionary form. Output: The resulting sudoku in dictionary form. """ solved_values = [box for box in values.keys() if len(values[box]) == 1] stalled = False while not stalled: solved_values_before = len([box for box in values.keys() if len(values[box]) == 1]) values = eliminate(values) values = only_choice(values) values = naked_twins(values) solved_values_after = len([box for box in values.keys() if len(values[box]) == 1]) stalled = solved_values_before == solved_values_after if len([box for box in values.keys() if len(values[box]) == 0]): return False return values def search(values): values = reduce_puzzle(values) if not values: return False # Choose one of the unfilled squares with the fewest possibilities # The Key function returns 10 for single digit boxes so they cannot be # returned if there are any unfilled squares min_key = min(values, key=lambda k: len(values[k]) if len(values[k]) > 1 else 10) if len(values[min_key]) == 1: # We found a solution! return values else: digits = list(values[min_key]) for d in digits: assign_value(values, min_key, d) # Recursively call search and with a new copy of the values solved_values = search(values.copy()) if solved_values: return solved_values return False def solve(grid): """ Find the solution to a Sudoku grid. Args: grid(string): a string representing a sudoku grid. Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3' Returns: The dictionary representation of the final sudoku grid. False if no solution exists. """ return search(grid_values(grid)) if __name__ == '__main__': diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3' display(solve(diag_sudoku_grid)) try: from visualize import visualize_assignments visualize_assignments(assignments) except SystemExit: pass except: print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
0.650023
0.583975
from ..util import InsteonError import logbook logger = logbook.Logger(__name__) class Linker: def __init__(self, dev): self._dev = dev def start_linking_responder(self, group=0x01, port=None): pass def start_linking_controller(self, group=0x01, port=None): pass def stop_linking(self, port=None): pass class ModemLinker(Linker): def __init__(self, modem): super().__init__(modem) def start_linking_responder(self, group=0x01, port=None): port = port if port else self._dev.port if not port: raise InsteonError('No port specified') msg = port.defs['StartALLLinking'].create() msg['LinkCode'] = 0x00 msg['ALLLinkGroup'] = group ack_reply = Channel() port.write(msg, ack_reply_channel=ack_reply) if not ack_reply.wait(1): raise InsteonError('Received no reply') def start_linking_controller(self, group=0x01, port=None): port = port if port else self._dev.port if not port: raise InsteonError('No port specified') msg = port.defs['StartALLLinking'].create() msg['LinkCode'] = 0x01 msg['ALLLinkGroup'] = group ack_reply = Channel() port.write(msg, ack_reply_channel=ack_reply) if not ack_reply.wait(1): raise InsteonError('Received no reply') def stop_linking(self, port=None): port = port if port else self._dev.port if not port: raise InsteonError('No port specified') msg = port.defs['CancelALLLinking'].create() ack_reply = Channel() port.write(msg, ack_reply_channel=ack_reply) if not ack_reply.wait(1): raise InsteonError('Received no reply') class GenericLinker(Linker): def __init__(self, dev): super().__init__(dev) def start_linking_controller(self, group=0x01, port=None): port = port if port else self._dev.port if not port: raise InsteonError('No port specified') self._dev.querier.query_ext(0x09, 0x01, []) def start_linking_responder(self, group=0x01, port=None): self.start_linking_controller(group, port) def stop_linking(self, port=None): raise InsteonError('Not implemented')
insteon/dev/linker.py
from ..util import InsteonError import logbook logger = logbook.Logger(__name__) class Linker: def __init__(self, dev): self._dev = dev def start_linking_responder(self, group=0x01, port=None): pass def start_linking_controller(self, group=0x01, port=None): pass def stop_linking(self, port=None): pass class ModemLinker(Linker): def __init__(self, modem): super().__init__(modem) def start_linking_responder(self, group=0x01, port=None): port = port if port else self._dev.port if not port: raise InsteonError('No port specified') msg = port.defs['StartALLLinking'].create() msg['LinkCode'] = 0x00 msg['ALLLinkGroup'] = group ack_reply = Channel() port.write(msg, ack_reply_channel=ack_reply) if not ack_reply.wait(1): raise InsteonError('Received no reply') def start_linking_controller(self, group=0x01, port=None): port = port if port else self._dev.port if not port: raise InsteonError('No port specified') msg = port.defs['StartALLLinking'].create() msg['LinkCode'] = 0x01 msg['ALLLinkGroup'] = group ack_reply = Channel() port.write(msg, ack_reply_channel=ack_reply) if not ack_reply.wait(1): raise InsteonError('Received no reply') def stop_linking(self, port=None): port = port if port else self._dev.port if not port: raise InsteonError('No port specified') msg = port.defs['CancelALLLinking'].create() ack_reply = Channel() port.write(msg, ack_reply_channel=ack_reply) if not ack_reply.wait(1): raise InsteonError('Received no reply') class GenericLinker(Linker): def __init__(self, dev): super().__init__(dev) def start_linking_controller(self, group=0x01, port=None): port = port if port else self._dev.port if not port: raise InsteonError('No port specified') self._dev.querier.query_ext(0x09, 0x01, []) def start_linking_responder(self, group=0x01, port=None): self.start_linking_controller(group, port) def stop_linking(self, port=None): raise InsteonError('Not implemented')
0.42179
0.073132
from Tkinter import * from idlelib import TreeWidget import os from mabot.utils import get_status_color TreeWidget.ICONDIR = os.path.join(os.path.dirname(__file__), 'icons') class Node(TreeWidget.TreeNode): def __init__(self, canvas, parent, item, root=None): self.root = root TreeWidget.TreeNode.__init__(self, canvas, parent, item) self.label = ForeGroundLabel(self.canvas, get_status_color(self.item.model_item), text=self.item.GetText(), bd=0, padx=2, pady=2) def select(self, event=None): TreeWidget.TreeNode.select(self, event) if self.root is not None: self.root.notify_select(self) else: parent = self.parent while parent is not None: if parent.root is not None: parent.root.notify_select(self) break parent = parent.parent def drawtext(self): self.label.update_foreground(get_status_color(self.item.model_item)) TreeWidget.TreeNode.drawtext(self) class ForeGroundLabel(Label): def __init__(self, master, foreground, **cnf): self.foreground = foreground Label.__init__(self, master, cnf) def update_foreground(self, foreground): self.foreground = foreground Label.configure(self, {'foreground':self.foreground}) def configure(self, cnf): Label.configure(self, cnf) Label.configure(self, {'foreground':self.foreground}) class _RobotTreeItem(TreeWidget.TreeItem): def __init__(self, item): self.model_item = item self.children = self._get_children() self.label = self._get_label() def GetText(self): return self.label def GetSubList(self): return self.children def IsExpandable(self): return self.model_item.has_visible_children() class SuiteTreeItem(_RobotTreeItem): def _get_children(self): if self._only_one_visible_folder_suite_child(self.model_item): visible_suite = [s for s in self.model_item.suites if s.visible ][0] return SuiteTreeItem(visible_suite)._get_children() children = [] for suite in self.model_item.suites: if suite.visible: children.append(SuiteTreeItem(suite)) for test in self.model_item.tests: if test.visible: children.append(TestTreeItem(test)) return children def GetIconName(self): if self.model_item.tests: return 'file_suite' return 'dir_suite' def _only_one_visible_folder_suite_child(self, item): return len([s for s in item.suites if s.visible ]) == 1 \ and not item.suites[0].tests def _get_label(self): names = [] self._get_suite_names_with_one_suite_child(self.model_item, names) return '/'.join(names) def _get_suite_names_with_one_suite_child(self, item, names): names.append(item.name) if self._only_one_visible_folder_suite_child(item): return self._get_suite_names_with_one_suite_child(item.suites[0], names) return names class _AbstractTestAndKWItem(_RobotTreeItem): def _get_label(self): return self.model_item.name def _get_children(self): return [ KeywordTreeItem(kw) for kw in self.model_item.keywords ] class TestTreeItem(_AbstractTestAndKWItem): def GetIconName(self): return 'test' class KeywordTreeItem(_AbstractTestAndKWItem): def GetIconName(self): return 'keyword'
src/mabot/ui/tree.py
from Tkinter import * from idlelib import TreeWidget import os from mabot.utils import get_status_color TreeWidget.ICONDIR = os.path.join(os.path.dirname(__file__), 'icons') class Node(TreeWidget.TreeNode): def __init__(self, canvas, parent, item, root=None): self.root = root TreeWidget.TreeNode.__init__(self, canvas, parent, item) self.label = ForeGroundLabel(self.canvas, get_status_color(self.item.model_item), text=self.item.GetText(), bd=0, padx=2, pady=2) def select(self, event=None): TreeWidget.TreeNode.select(self, event) if self.root is not None: self.root.notify_select(self) else: parent = self.parent while parent is not None: if parent.root is not None: parent.root.notify_select(self) break parent = parent.parent def drawtext(self): self.label.update_foreground(get_status_color(self.item.model_item)) TreeWidget.TreeNode.drawtext(self) class ForeGroundLabel(Label): def __init__(self, master, foreground, **cnf): self.foreground = foreground Label.__init__(self, master, cnf) def update_foreground(self, foreground): self.foreground = foreground Label.configure(self, {'foreground':self.foreground}) def configure(self, cnf): Label.configure(self, cnf) Label.configure(self, {'foreground':self.foreground}) class _RobotTreeItem(TreeWidget.TreeItem): def __init__(self, item): self.model_item = item self.children = self._get_children() self.label = self._get_label() def GetText(self): return self.label def GetSubList(self): return self.children def IsExpandable(self): return self.model_item.has_visible_children() class SuiteTreeItem(_RobotTreeItem): def _get_children(self): if self._only_one_visible_folder_suite_child(self.model_item): visible_suite = [s for s in self.model_item.suites if s.visible ][0] return SuiteTreeItem(visible_suite)._get_children() children = [] for suite in self.model_item.suites: if suite.visible: children.append(SuiteTreeItem(suite)) for test in self.model_item.tests: if test.visible: children.append(TestTreeItem(test)) return children def GetIconName(self): if self.model_item.tests: return 'file_suite' return 'dir_suite' def _only_one_visible_folder_suite_child(self, item): return len([s for s in item.suites if s.visible ]) == 1 \ and not item.suites[0].tests def _get_label(self): names = [] self._get_suite_names_with_one_suite_child(self.model_item, names) return '/'.join(names) def _get_suite_names_with_one_suite_child(self, item, names): names.append(item.name) if self._only_one_visible_folder_suite_child(item): return self._get_suite_names_with_one_suite_child(item.suites[0], names) return names class _AbstractTestAndKWItem(_RobotTreeItem): def _get_label(self): return self.model_item.name def _get_children(self): return [ KeywordTreeItem(kw) for kw in self.model_item.keywords ] class TestTreeItem(_AbstractTestAndKWItem): def GetIconName(self): return 'test' class KeywordTreeItem(_AbstractTestAndKWItem): def GetIconName(self): return 'keyword'
0.454472
0.092688
import time import datetime import json from django.http import response, StreamingHttpResponse from rest_framework import request from rest_framework.views import APIView from rest_framework.renderers import JSONRenderer from rest_framework.response import Response from rest_framework import status class IVAD(APIView): renderer_classes = [JSONRenderer, ] def get(self, request, *args, **kwargs): return Response({'msg': 'hi'}) def post(self, request, *args, **kwargs): caseDataDict = request.data # 生成测试案例编号 uid = 'ALPHA_{}'.format(str(time.time()).replace('.', '')) fileName = 'api\\data\\caches\\case_{}.json'.format(uid) cleanCaseDataDict = { 'uid': uid, 'SVN': { "versionRangeForVersionNumber": None, "versionRangeForDate": None, }, 'GamePlay': caseDataDict.get('gamePlay'), 'DefectBehavior': caseDataDict.get('defectBehavior'), 'Path': { "Jx3BVTNeedCheck": caseDataDict.get('needCheckPath'), "Jx3BVTWorkPath": caseDataDict.get('binPath'), "Jx3Remake": "JX3ClientX64.exe", } } versionRangeList = caseDataDict.get('versionRange') dateRangeList = caseDataDict.get('dateRange') # versionRange校验 for version in versionRangeList: if not version or not version.isdigit(): # version缺失 -> version范围不能用 cleanCaseDataDict['SVN']['versionRangeForVersionNumber'] = 'E599: Abnormal data information.' # 版本范围不能用 -> 检查时间范围 for date in dateRangeList: if not date: # 缺少时间范围 cleanCaseDataDict['SVN']['versionRangeForDate'] = 'E599: Abnormal data information.' return Response({ 'status': 599, 'msg': 'E599: 不符合 OPSVN CASE 运行标准。', 'dataOfResult': cleanCaseDataDict, }) else: cleanCaseDataDict['SVN']['versionRangeForDate'] = dateRangeList break else: cleanCaseDataDict['SVN']['versionRangeForVersionNumber'] = versionRangeList cleanCaseDataDict['SVN']['versionRangeForDate'] = 'E598: When using the version range, the time range will not be saved.' # 写入文件 with open(fileName, 'w', encoding='utf-8') as f: json.dump(cleanCaseDataDict, f, indent=4) downloadUrl = '/api/download/{}'.format(uid) return Response({ 'status': 200, 'msg': '即刻执行你的测试需求!', 'dataOfResult': cleanCaseDataDict, 'downloadUrl': downloadUrl, }) class DownloadCase(APIView): renderer_classes = [JSONRenderer, ] def get(self, request, uid, *args, **kwargs): if not uid: return Response({ 'status': 598, 'msg': '缺失ID,请在 CASE 生成页面重新提交' }) fileName = 'case_{}.json'.format(uid) with open('api\\data\\caches\\{}'.format(fileName)) as f: data = f.read() responseData = StreamingHttpResponse(data) responseData['Content-Type'] = 'application/octet-stream' responseData['Content-Disposition'] = 'attachment;filename={}'.format(fileName) return responseData
console/api/views/IVAD.py
import time import datetime import json from django.http import response, StreamingHttpResponse from rest_framework import request from rest_framework.views import APIView from rest_framework.renderers import JSONRenderer from rest_framework.response import Response from rest_framework import status class IVAD(APIView): renderer_classes = [JSONRenderer, ] def get(self, request, *args, **kwargs): return Response({'msg': 'hi'}) def post(self, request, *args, **kwargs): caseDataDict = request.data # 生成测试案例编号 uid = 'ALPHA_{}'.format(str(time.time()).replace('.', '')) fileName = 'api\\data\\caches\\case_{}.json'.format(uid) cleanCaseDataDict = { 'uid': uid, 'SVN': { "versionRangeForVersionNumber": None, "versionRangeForDate": None, }, 'GamePlay': caseDataDict.get('gamePlay'), 'DefectBehavior': caseDataDict.get('defectBehavior'), 'Path': { "Jx3BVTNeedCheck": caseDataDict.get('needCheckPath'), "Jx3BVTWorkPath": caseDataDict.get('binPath'), "Jx3Remake": "JX3ClientX64.exe", } } versionRangeList = caseDataDict.get('versionRange') dateRangeList = caseDataDict.get('dateRange') # versionRange校验 for version in versionRangeList: if not version or not version.isdigit(): # version缺失 -> version范围不能用 cleanCaseDataDict['SVN']['versionRangeForVersionNumber'] = 'E599: Abnormal data information.' # 版本范围不能用 -> 检查时间范围 for date in dateRangeList: if not date: # 缺少时间范围 cleanCaseDataDict['SVN']['versionRangeForDate'] = 'E599: Abnormal data information.' return Response({ 'status': 599, 'msg': 'E599: 不符合 OPSVN CASE 运行标准。', 'dataOfResult': cleanCaseDataDict, }) else: cleanCaseDataDict['SVN']['versionRangeForDate'] = dateRangeList break else: cleanCaseDataDict['SVN']['versionRangeForVersionNumber'] = versionRangeList cleanCaseDataDict['SVN']['versionRangeForDate'] = 'E598: When using the version range, the time range will not be saved.' # 写入文件 with open(fileName, 'w', encoding='utf-8') as f: json.dump(cleanCaseDataDict, f, indent=4) downloadUrl = '/api/download/{}'.format(uid) return Response({ 'status': 200, 'msg': '即刻执行你的测试需求!', 'dataOfResult': cleanCaseDataDict, 'downloadUrl': downloadUrl, }) class DownloadCase(APIView): renderer_classes = [JSONRenderer, ] def get(self, request, uid, *args, **kwargs): if not uid: return Response({ 'status': 598, 'msg': '缺失ID,请在 CASE 生成页面重新提交' }) fileName = 'case_{}.json'.format(uid) with open('api\\data\\caches\\{}'.format(fileName)) as f: data = f.read() responseData = StreamingHttpResponse(data) responseData['Content-Type'] = 'application/octet-stream' responseData['Content-Disposition'] = 'attachment;filename={}'.format(fileName) return responseData
0.337204
0.091423
import os import time import json import requests import warnings # Leave here to suppress https missing certificate validation warning warnings.filterwarnings("ignore") HEADER = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'SEC': "SEC_TOKEN_HERE" } OK = 1 KO = -1 class QRadarAPI(): """Class exposing functions to interact with QRadar APIs. The class does not check for errors, you must handle them on your own. """ def __init__(self, SEC, API_BASE, verify=False): self.api_base = API_BASE self.verify = verify self.header = HEADER self.header['SEC'] = SEC def get_help(self) -> requests.Response: """Retrieve the available APIs. Returns: res (requests.Response): Response object """ url = self.api_base + "help/capabilities" print(f"[+] GET {url}") res = requests.get(url, verify=self.verify, headers=self.header) return res def get_offenses(self) -> requests.Response: """Retrieve the offenses present into QRadar. Returns: res (requests.Response): Response object. """ url = self.api_base + "/siem/offenses" print(f"[+] GET {url}") s = time.time() res = requests.get(url, headers=HEADER, verify=self.verify) e = time.time() print(f"Served in {e - s} s") return res def get_offense_closing_reasons(self) -> requests.Response: """Retrieve the offenses closing reason types. Returns: res (requests.Response): Response object """ url = self.api_base + "/siem/offense_closing_reasons" print(f"[+] GET {url}") s = time.time() res = requests.get(url, headers=HEADER, verify=self.verify) e = time.time() print(f"[+] Served in {e - s} s") return res def post_ariel_search(self, query_expression: str = "") -> requests.Response: """Create a new ariel search with the query_expression passed in input. Args: query_expression (str, optional): Query expression written in AQL. Defaults to "". Returns: res (requests.Response): Response object """ url = f"{self.api_base}/ariel/searches?query_expression={query_expression}" return requests.post(url, headers=self.header, verify=self.verify) def get_ariel_search_status(self, sid: str) -> requests.Response: """Look up a search's current status. Args: sid (str): The element cursor_id present in the response received by post_ariel_search() Returns: status (requests.Response): A string describing the status of the search. It is the status parameter of the response received from the server. """ url = f"{self.api_base}/ariel/searches/{sid}" res = requests.get(url, headers=self.header, verify=self.verify) status = res.json()["status"] return status def get_ariel_search_results(self, sid: str) -> requests.Response: """Retrieve the results of a search. If the search is not completed, the response will contain a particular message describing why it is not done. For more details check out QRadar docs. Args: sid (str): The element cursor_id present in the response received by post_ariel_search() Returns: res (requests.Response): Response object """ url = f"{self.api_base}/ariel/searches/{sid}/results" return requests.get(url, headers=self.header, verify=self.verify) def do_ariel_search(self, query_expression: str = "", debug: bool = False) -> dict: """This function runs a full ariel search, taking care of the async architecture of ariel. Basically it posts a search, then waits for it to complete and the retrieves the results using publicly available functions. Args: query_expression (str, optional): Query expression written in AQL. Defaults to "". Returns: dict: A dictionary containing the search results, or the error message """ res = self.post_ariel_search(query_expression).json() try: sid = res['cursor_id'] except: print("No cursor_id found. Here is the response received:\n") print(json.dumps(res, indent=4)) return json.loads("{\"error\": \"no cursor_id\"}") s = time.time() print(f"[+] cursor_id = {sid}") status = self.get_ariel_search_status(sid) if (debug): print("[+] " + status) while status != "COMPLETED" and status != "ERROR": status = self.get_ariel_search_status(sid) time.sleep(5) if (debug): print("[+] " + status) if status == "ERROR": return json.loads("{\"error\": \"search id not found\"}") res = self.get_ariel_search_results(sid) e = time.time() if (debug): print(f"[+] Served in {e - s} s") return res.json() if __name__ == "__main__": print("Pyradar is a module. Not a standalone.")
src/pyradar.py
import os import time import json import requests import warnings # Leave here to suppress https missing certificate validation warning warnings.filterwarnings("ignore") HEADER = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'SEC': "SEC_TOKEN_HERE" } OK = 1 KO = -1 class QRadarAPI(): """Class exposing functions to interact with QRadar APIs. The class does not check for errors, you must handle them on your own. """ def __init__(self, SEC, API_BASE, verify=False): self.api_base = API_BASE self.verify = verify self.header = HEADER self.header['SEC'] = SEC def get_help(self) -> requests.Response: """Retrieve the available APIs. Returns: res (requests.Response): Response object """ url = self.api_base + "help/capabilities" print(f"[+] GET {url}") res = requests.get(url, verify=self.verify, headers=self.header) return res def get_offenses(self) -> requests.Response: """Retrieve the offenses present into QRadar. Returns: res (requests.Response): Response object. """ url = self.api_base + "/siem/offenses" print(f"[+] GET {url}") s = time.time() res = requests.get(url, headers=HEADER, verify=self.verify) e = time.time() print(f"Served in {e - s} s") return res def get_offense_closing_reasons(self) -> requests.Response: """Retrieve the offenses closing reason types. Returns: res (requests.Response): Response object """ url = self.api_base + "/siem/offense_closing_reasons" print(f"[+] GET {url}") s = time.time() res = requests.get(url, headers=HEADER, verify=self.verify) e = time.time() print(f"[+] Served in {e - s} s") return res def post_ariel_search(self, query_expression: str = "") -> requests.Response: """Create a new ariel search with the query_expression passed in input. Args: query_expression (str, optional): Query expression written in AQL. Defaults to "". Returns: res (requests.Response): Response object """ url = f"{self.api_base}/ariel/searches?query_expression={query_expression}" return requests.post(url, headers=self.header, verify=self.verify) def get_ariel_search_status(self, sid: str) -> requests.Response: """Look up a search's current status. Args: sid (str): The element cursor_id present in the response received by post_ariel_search() Returns: status (requests.Response): A string describing the status of the search. It is the status parameter of the response received from the server. """ url = f"{self.api_base}/ariel/searches/{sid}" res = requests.get(url, headers=self.header, verify=self.verify) status = res.json()["status"] return status def get_ariel_search_results(self, sid: str) -> requests.Response: """Retrieve the results of a search. If the search is not completed, the response will contain a particular message describing why it is not done. For more details check out QRadar docs. Args: sid (str): The element cursor_id present in the response received by post_ariel_search() Returns: res (requests.Response): Response object """ url = f"{self.api_base}/ariel/searches/{sid}/results" return requests.get(url, headers=self.header, verify=self.verify) def do_ariel_search(self, query_expression: str = "", debug: bool = False) -> dict: """This function runs a full ariel search, taking care of the async architecture of ariel. Basically it posts a search, then waits for it to complete and the retrieves the results using publicly available functions. Args: query_expression (str, optional): Query expression written in AQL. Defaults to "". Returns: dict: A dictionary containing the search results, or the error message """ res = self.post_ariel_search(query_expression).json() try: sid = res['cursor_id'] except: print("No cursor_id found. Here is the response received:\n") print(json.dumps(res, indent=4)) return json.loads("{\"error\": \"no cursor_id\"}") s = time.time() print(f"[+] cursor_id = {sid}") status = self.get_ariel_search_status(sid) if (debug): print("[+] " + status) while status != "COMPLETED" and status != "ERROR": status = self.get_ariel_search_status(sid) time.sleep(5) if (debug): print("[+] " + status) if status == "ERROR": return json.loads("{\"error\": \"search id not found\"}") res = self.get_ariel_search_results(sid) e = time.time() if (debug): print(f"[+] Served in {e - s} s") return res.json() if __name__ == "__main__": print("Pyradar is a module. Not a standalone.")
0.676406
0.183447
from util.typedispatch import * from language.python import ast from dataflow.reverse import * from analysis import tools def liveMeet(values): if values: return top else: return undefined # Mark a locals in an AST subtree as used. class MarkLocals(TypeDispatcher): @dispatch(ast.leafTypes) def visitLeaf(self, node): pass @dispatch(ast.Local) def visitLocal(self, node): self.flow.define(node, top) @dispatch(ast.GetGlobal, ast.SetGlobal) def visitGlobalOp(self, node): self.flow.define(self.selfparam, top) node.visitChildren(self) @defaultdispatch def default(self, node): node.visitChildren(self) nodesWithNoSideEffects = (ast.GetGlobal, ast.Existing, ast.Local, ast.Is, ast.Load, ast.Allocate, ast.BuildTuple, ast.BuildList, ast.BuildMap) class MarkLive(TypeDispatcher): def __init__(self, code): self.code = code self.marker = MarkLocals() def hasNoSideEffects(self, node): if self.descriptive(): return isinstance(node, (ast.Local, ast.Existing)) else: return isinstance(node, nodesWithNoSideEffects) or not tools.mightHaveSideEffect(node) def descriptive(self): return self.code.annotation.descriptive @dispatch(ast.Condition) def visitCondition(self, node): self.marker(node.conditional) return node @dispatch(ast.Discard) def visitDiscard(self, node): if self.hasNoSideEffects(node.expr): return [] else: self.marker(node) return node @dispatch(ast.Assign) def visitAssign(self, node): used = any([self.flow.lookup(lcl) is not undefined for lcl in node.lcls]) if used: for lcl in node.lcls: self.flow.undefine(lcl) self.marker(node.expr) return node elif self.hasNoSideEffects(node.expr): return [] else: node = ast.Discard(node.expr) node = self(node) return node @dispatch(ast.Delete) def visitDelete(self, node): self.flow.undefine(node.lcl) @defaultdispatch def default(self, node): if isinstance(node, ast.SimpleStatement): self.marker(node) return node @dispatch(ast.InputBlock) def visitInputBlock(self, node): inputs = [] for input in node.inputs: if self.flow.lookup(input.lcl) is not undefined: inputs.append(input) return ast.InputBlock(inputs) @dispatch(ast.OutputBlock) def visitOutputBlock(self, node): for output in node.outputs: self.flow.define(output.expr, top) return node @dispatch(ast.Return) def visitReturn(self, node): for lcl in self.initialLive: self.flow.define(lcl, top) self.marker(node) return node def filterParam(self, p): if p is None: return None elif self.flow.lookup(p) is undefined: return ast.DoNotCare() else: return p @dispatch(ast.CodeParameters) def visitCodeParameters(self, node): # Insert don't care for unused parameters. # selfparam is a special case, it's OK if it disappears in descriptive stubs. selfparam = self.filterParam(node.selfparam) if self.descriptive(): params = node.params vparam = node.vparam kparam = node.kparam else: params = [self.filterParam(p) for p in node.params] vparam = self.filterParam(node.vparam) kparam = self.filterParam(node.kparam) return ast.CodeParameters(selfparam, params, node.paramnames, node.defaults, vparam, kparam, node.returnparams) def evaluateCode(compiler, node, initialLive=None): rewrite = MarkLive(node) traverse = ReverseFlowTraverse(liveMeet, rewrite) # HACK rewrite.flow = traverse.flow rewrite.marker.flow = traverse.flow rewrite.marker.selfparam = node.codeparameters.selfparam t = MutateCodeReversed(traverse) # For shader translation, locals may be used as outputs. # We need to retain these locals. rewrite.initialLive = initialLive if initialLive != None else () result = t(node) return result
bin/optimization/dce.py
from util.typedispatch import * from language.python import ast from dataflow.reverse import * from analysis import tools def liveMeet(values): if values: return top else: return undefined # Mark a locals in an AST subtree as used. class MarkLocals(TypeDispatcher): @dispatch(ast.leafTypes) def visitLeaf(self, node): pass @dispatch(ast.Local) def visitLocal(self, node): self.flow.define(node, top) @dispatch(ast.GetGlobal, ast.SetGlobal) def visitGlobalOp(self, node): self.flow.define(self.selfparam, top) node.visitChildren(self) @defaultdispatch def default(self, node): node.visitChildren(self) nodesWithNoSideEffects = (ast.GetGlobal, ast.Existing, ast.Local, ast.Is, ast.Load, ast.Allocate, ast.BuildTuple, ast.BuildList, ast.BuildMap) class MarkLive(TypeDispatcher): def __init__(self, code): self.code = code self.marker = MarkLocals() def hasNoSideEffects(self, node): if self.descriptive(): return isinstance(node, (ast.Local, ast.Existing)) else: return isinstance(node, nodesWithNoSideEffects) or not tools.mightHaveSideEffect(node) def descriptive(self): return self.code.annotation.descriptive @dispatch(ast.Condition) def visitCondition(self, node): self.marker(node.conditional) return node @dispatch(ast.Discard) def visitDiscard(self, node): if self.hasNoSideEffects(node.expr): return [] else: self.marker(node) return node @dispatch(ast.Assign) def visitAssign(self, node): used = any([self.flow.lookup(lcl) is not undefined for lcl in node.lcls]) if used: for lcl in node.lcls: self.flow.undefine(lcl) self.marker(node.expr) return node elif self.hasNoSideEffects(node.expr): return [] else: node = ast.Discard(node.expr) node = self(node) return node @dispatch(ast.Delete) def visitDelete(self, node): self.flow.undefine(node.lcl) @defaultdispatch def default(self, node): if isinstance(node, ast.SimpleStatement): self.marker(node) return node @dispatch(ast.InputBlock) def visitInputBlock(self, node): inputs = [] for input in node.inputs: if self.flow.lookup(input.lcl) is not undefined: inputs.append(input) return ast.InputBlock(inputs) @dispatch(ast.OutputBlock) def visitOutputBlock(self, node): for output in node.outputs: self.flow.define(output.expr, top) return node @dispatch(ast.Return) def visitReturn(self, node): for lcl in self.initialLive: self.flow.define(lcl, top) self.marker(node) return node def filterParam(self, p): if p is None: return None elif self.flow.lookup(p) is undefined: return ast.DoNotCare() else: return p @dispatch(ast.CodeParameters) def visitCodeParameters(self, node): # Insert don't care for unused parameters. # selfparam is a special case, it's OK if it disappears in descriptive stubs. selfparam = self.filterParam(node.selfparam) if self.descriptive(): params = node.params vparam = node.vparam kparam = node.kparam else: params = [self.filterParam(p) for p in node.params] vparam = self.filterParam(node.vparam) kparam = self.filterParam(node.kparam) return ast.CodeParameters(selfparam, params, node.paramnames, node.defaults, vparam, kparam, node.returnparams) def evaluateCode(compiler, node, initialLive=None): rewrite = MarkLive(node) traverse = ReverseFlowTraverse(liveMeet, rewrite) # HACK rewrite.flow = traverse.flow rewrite.marker.flow = traverse.flow rewrite.marker.selfparam = node.codeparameters.selfparam t = MutateCodeReversed(traverse) # For shader translation, locals may be used as outputs. # We need to retain these locals. rewrite.initialLive = initialLive if initialLive != None else () result = t(node) return result
0.532911
0.319473
import io import tqdm import requests from PIL import Image BASE_URL = 'https://api.nosconecta.com.ar/' PATH = 'eform/thumbnail/{}' BASE_PARAMS = { 'resize': 'full', 'page': '0', } FOLDER_URL = 'https://ar.turecibo.com/bandeja.php?apiendpoint=/folders/{}/documents/available' MAX_FAILED_REQUESTS = 3 class DocumentDownloader: def __init__(self, doc_hash, filename=None): self.doc_hash = doc_hash self.filename = filename if filename is not None else '{}.pdf'.format(doc_hash) self.url = BASE_URL + PATH.format(self.doc_hash) def download(self): pages = self.get_pages() self.save_as_pdf(pages) def get_pages(self): """ Downloads all the pages and returns them as an ordered list of images loaded in memory """ session = requests.session() params = BASE_PARAMS.copy() pages = [] page = 0 req_failed = 0 bar = tqdm.tqdm(desc=self.filename, unit='pages') while req_failed < MAX_FAILED_REQUESTS: page += 1 params.update({'page': '{}'.format(page)}) req = session.get(self.url, params=params) if req.headers.get('Content-Type').startswith('application/json'): # This is probably an error message, we are most likely # out of bounds. Continue trying to get pages though req_failed += 1 continue bar.update(1) img = Image.open(io.BytesIO(req.content)) pages.append(img) bar.close() return pages def save_as_pdf(self, pages): first_page, pages = pages[0], pages[1:] first_page.save( self.filename, 'PDF', resolution=100.0, save_all=True, append_images=pages ) class FolderDownloader: def __init__(self, cookie, folder): self.cookie = cookie self.folder = folder def download(self): url = FOLDER_URL.format(self.folder) data = dict(reload='1') headers = dict(cookie=self.cookie) req = requests.post(url, data=data, headers=headers) try: response = req.json() except: print('Error, invalid cookie?') return -1 categories = response.get('categorias', {}) for document in categories.get('documentos', []): doc_hash = document.get('archivo') DocumentDownloader(doc_hash=doc_hash).download()
turecibo_downloader/turecibo_downloader.py
import io import tqdm import requests from PIL import Image BASE_URL = 'https://api.nosconecta.com.ar/' PATH = 'eform/thumbnail/{}' BASE_PARAMS = { 'resize': 'full', 'page': '0', } FOLDER_URL = 'https://ar.turecibo.com/bandeja.php?apiendpoint=/folders/{}/documents/available' MAX_FAILED_REQUESTS = 3 class DocumentDownloader: def __init__(self, doc_hash, filename=None): self.doc_hash = doc_hash self.filename = filename if filename is not None else '{}.pdf'.format(doc_hash) self.url = BASE_URL + PATH.format(self.doc_hash) def download(self): pages = self.get_pages() self.save_as_pdf(pages) def get_pages(self): """ Downloads all the pages and returns them as an ordered list of images loaded in memory """ session = requests.session() params = BASE_PARAMS.copy() pages = [] page = 0 req_failed = 0 bar = tqdm.tqdm(desc=self.filename, unit='pages') while req_failed < MAX_FAILED_REQUESTS: page += 1 params.update({'page': '{}'.format(page)}) req = session.get(self.url, params=params) if req.headers.get('Content-Type').startswith('application/json'): # This is probably an error message, we are most likely # out of bounds. Continue trying to get pages though req_failed += 1 continue bar.update(1) img = Image.open(io.BytesIO(req.content)) pages.append(img) bar.close() return pages def save_as_pdf(self, pages): first_page, pages = pages[0], pages[1:] first_page.save( self.filename, 'PDF', resolution=100.0, save_all=True, append_images=pages ) class FolderDownloader: def __init__(self, cookie, folder): self.cookie = cookie self.folder = folder def download(self): url = FOLDER_URL.format(self.folder) data = dict(reload='1') headers = dict(cookie=self.cookie) req = requests.post(url, data=data, headers=headers) try: response = req.json() except: print('Error, invalid cookie?') return -1 categories = response.get('categorias', {}) for document in categories.get('documentos', []): doc_hash = document.get('archivo') DocumentDownloader(doc_hash=doc_hash).download()
0.434461
0.101857
import aiosqlite import asyncio import json import logging import signal import argparse import collections import traceback from enum import Enum ReadMode = Enum('ReadMod', ('EXACT', 'LINE', 'MAX', 'UNTIL')) # 对应四种读模式 class MyError(Exception): # 自定义一个异常类,raise抛出错误实例,便于追踪 pass async def aioClose(w, *, logHint=None): # 关闭对应服务器,输出log信息 if not w: await asyncio.sleep(0.001) return host, port, *_ = w.get_extra_info('peername') log.info(f'{logHint} close {host} {port}') try: w.close() await w.wait_closed() except Exception as exc: pass async def aioRead(r, mode, *, logHint=None, exactData=None, exactLen=None, maxLen=-1, untilSep=b'\r\n'): # 读报文,有四种模式 data = None try: if ReadMode.EXACT == mode: # 读精确的几字节 exactLen = len(exactData) if exactData else exactLen data = await r.readexactly(exactLen) if exactData and data != exactData: raise MyError(f'recvERR={data} {logHint}') elif ReadMode.LINE == mode: # 读一行 data = await r.readline() elif ReadMode.MAX == mode: # 读大量字节,长度为maxLen data = await r.read(maxLen) elif ReadMode.UNTIL == mode: # 读到对应分隔符 data = await r.readuntil(untilSep) else: log.error(f'INVALID mode={mode}') exit(1) except asyncio.IncompleteReadError as exc: raise MyError(f'recvEXC={exc} {logHint}') except ConnectionAbortedError as exc: raise MyError(f'recvEXC={exc} {logHint}') except ConnectionResetError as exc: raise MyError(f'recvEXC={exc} {logHint}') if not data: raise MyError(f'EOF {logHint}') return data async def aioWrite(w, data, *, logHint=''): # 写报文 try: w.write(data) await w.drain() # 与write配套,用于立即清空缓冲区 except ConnectionAbortedError as exc: raise MyError(f'sendEXC={exc} {logHint}') except ConnectionResetError as exc: raise MyError(f'recvEXC={exc} {logHint}') User = collections.namedtuple('User', ['name', 'password', 'dataRate']) # namedtuple可直接用属性名表示item gUserDict = dict() # 存从数据库中取出的用户信息 gUserDictLock = asyncio.Lock() # 对数据库访问加锁,避免冲突 gLinkCount = 0 # 同时连接remoteproxy的数量 gLeakyBucketDict = dict() class LeakyBucket: # 令牌桶类,用于流量控制 def __init__(self, tokenLimit): # tokenlimit为用户数据库中的流量限制 self.tokenCount = tokenLimit # 桶中剩余令牌数 self.tokenLimit = tokenLimit self.tokenSemaphore = asyncio.BoundedSemaphore(1) # 创建信号量确保互斥访问 def __del__(self): # 删除该桶,信号量置空 self.tokenLock = None self.tokenSemaphore = None async def acquireToken(self, count): # 获取令牌,数量为count await self.tokenSemaphore.acquire() # 信号量的P操作 tokenCount = 0 # 此次消耗的令牌数 tokenCount = min(self.tokenCount, count) # 桶中令牌数可能小于所需 self.tokenCount -= tokenCount if 0 < self.tokenCount: # 若桶中令牌足够 try: self.tokenSemaphore.release() # 信号量V操作 except ValueError: pass return tokenCount def releaseToken(self, count): # 增加count数量的令牌 self.tokenCount = min(self.tokenCount + count, self.tokenLimit) # 数量不超过limit try: self.tokenSemaphore.release() except ValueError: pass async def doLocal(localR, localW): # 处理与localProxy的通信,两个参数分别是stream读写类的实例 global gLinkCount gLinkCount += 1 serverR, serverW = None, None try: localHost, localPort, *_ = localW.get_extra_info('peername') logHint = f'{localHost} {localPort}' # 读取local发来的目的地址、用户名密码 firstLine = await aioRead(localR, ReadMode.LINE, logHint=f'1stLine') firstDict = json.loads(firstLine.strip().decode()) # 转为dict类型 dstHost = firstDict.get('dst') dstPort = firstDict.get('dport') username = firstDict.get('user') password = <PASSWORD>('password') if not dstHost or not dstPort or not username or not password: raise MyError(f'ErrorFirst') user = gUserDict.get(username) # 得到数据库中该user的行 if not user or user.password != password: # 密码不符 raise MyError(f'authFail {username} {password}') tokenLimit = user.dataRate if user.dataRate else args.tokenLimit # 若用户限制为空,tokenlimit从命令行取得 logHint = f'{logHint} {dstHost} {dstPort}' log.info(f'{logHint} connStart...') # 与目标服务器建立TCP连接 serverR, serverW = await asyncio.open_connection(dstHost, dstPort) bindHost, bindPort, *_ = serverW.get_extra_info('sockname') log.info(f'{logHint} connSucc bind {bindHost} {bindPort}') gLinkCount += 1 await aioWrite(localW, f'{bindHost} {bindPort}\r\n'.encode(), logHint='1stLine') # 向local回复bind成功的消息 if username not in gLeakyBucketDict: # 为用户分配其对应的令牌桶 gLeakyBucketDict[username] = LeakyBucket(tokenLimit) bucket = gLeakyBucketDict.get(username) # 返回当前用户的令牌桶 await asyncio.wait({ # 创建task以并发地传输信息,全双工方式 asyncio.create_task(xferData(bucket, localR, serverW, logHint=f'{logHint} fromLocal', upDirect=True)), asyncio.create_task(xferData(bucket, serverR, localW, logHint=f'{logHint} fromServer', upDirect=False)) }) except MyError as exc: log.info(f'{logHint} {exc}') except json.JSONDecodeError as exc: log.info(f'{logHint} {exc}') except OSError: log.info(f'{logHint} connFail') except ValueError as exc: log.info(f'{logHint} {exc}') except Exception as exc: log.error(f'{traceback.format_exc()}') exit(1) await aioClose(localW, logHint=logHint) await aioClose(serverW, logHint=logHint) gLinkCount -= 1 if serverR: gLinkCount -= 1 async def remoteTask(): # remoteProxy异步任务主函数 asyncio.create_task(dbSyncTask()) # 创建task,异步运行 asyncio.create_task(tokenLeakTask()) srv = await asyncio.start_server(doLocal, host=args.listenHost, port=args.listenPort) # 启动与local的TCP通信服务 addrList = list([s.getsockname() for s in srv.sockets]) log.info(f'LISTEN {addrList}') async with srv: await srv.serve_forever() # 持续异步运行 async def dbSyncTask(): # 数据库,同步gUserDict与 gLeakyBucketDict async with aiosqlite.connect(args.sqliteFile) as db: while True: await asyncio.sleep(1) # 每秒1次同步 userDict = dict() async with db.execute("SELECT name,password,dataRate FROM user;") as cursor: # 执行查询 async for row in cursor: userDict[row[0]] = User(row[0], row[1], row[2]) # 以username作为key global gUserDict global gLeakyBucketDict gUserDict = userDict for name, user in gUserDict.items(): # name, user对应key,value if name in gLeakyBucketDict: # 用户已连接,则返回其对应带宽限制 gLeakyBucketDict[name].tokenLimit = user.dataRate if user.dataRate else args.tokenLimit async def tokenLeakTask(): # 异步task,生成令牌 while True: await asyncio.sleep(1) for username, bucket in gLeakyBucketDict.items(): bucket.releaseToken(bucket.tokenLimit) # 每秒生成limit数量的令牌 async def xferData(bucket, srcR, dstW, *, logHint=None, upDirect): # 单向数据流传输,upDirect判断是否为上行流量 try: while True: tokenCount = 65535 if bucket: # remote端有bucket对流量进行限制 tokenCount = await bucket.acquireToken(65535) # 一次读写的maxLen为65535,所以获取该数量令牌 data = await aioRead(srcR, ReadMode.MAX, maxLen=tokenCount, logHint='') # 得到多少令牌,传输多少字节 if bucket: leftToken = tokenCount - len(data) # 没读到足够数据,因此有剩余令牌 if leftToken: bucket.releaseToken(leftToken) # 剩余令牌加入令牌桶 await aioWrite(dstW, data, logHint='') except MyError as exc: log.info(f'{logHint} {exc}') await aioClose(dstW, logHint=logHint) if __name__ == '__main__': signal.signal(signal.SIGINT, signal.SIG_DFL) _logFmt = logging.Formatter('%(asctime)s %(levelname).1s %(lineno)-3d %(funcName)-20s %(message)s', datefmt='%H:%M:%S') # 调试信息设置 _consoleHandler = logging.StreamHandler() _consoleHandler.setLevel(logging.DEBUG) _consoleHandler.setFormatter(_logFmt) log = logging.getLogger(__file__) log.addHandler(_consoleHandler) log.setLevel(logging.DEBUG) _parser = argparse.ArgumentParser(description='remote Proxy') # 命令行解析设置 _parser.add_argument('-d', dest='sqliteFile', default='user.db', help='user database sqlite file') # 数据库文件名 _parser.add_argument('-l', dest='listenHost', default='192.168.43.227', help='proxy listen host default listen all interfaces') # 监听的主机地址 _parser.add_argument('-p', dest='listenPort', type=int, default=8889, help='proxy listen port') _parser.add_argument('-t', dest='tokenLimit', type=int, default=999999, help='bytes/second per user') # 默认的令牌桶流量限制 args = _parser.parse_args() asyncio.run(remoteTask())
remoteProxy.py
import aiosqlite import asyncio import json import logging import signal import argparse import collections import traceback from enum import Enum ReadMode = Enum('ReadMod', ('EXACT', 'LINE', 'MAX', 'UNTIL')) # 对应四种读模式 class MyError(Exception): # 自定义一个异常类,raise抛出错误实例,便于追踪 pass async def aioClose(w, *, logHint=None): # 关闭对应服务器,输出log信息 if not w: await asyncio.sleep(0.001) return host, port, *_ = w.get_extra_info('peername') log.info(f'{logHint} close {host} {port}') try: w.close() await w.wait_closed() except Exception as exc: pass async def aioRead(r, mode, *, logHint=None, exactData=None, exactLen=None, maxLen=-1, untilSep=b'\r\n'): # 读报文,有四种模式 data = None try: if ReadMode.EXACT == mode: # 读精确的几字节 exactLen = len(exactData) if exactData else exactLen data = await r.readexactly(exactLen) if exactData and data != exactData: raise MyError(f'recvERR={data} {logHint}') elif ReadMode.LINE == mode: # 读一行 data = await r.readline() elif ReadMode.MAX == mode: # 读大量字节,长度为maxLen data = await r.read(maxLen) elif ReadMode.UNTIL == mode: # 读到对应分隔符 data = await r.readuntil(untilSep) else: log.error(f'INVALID mode={mode}') exit(1) except asyncio.IncompleteReadError as exc: raise MyError(f'recvEXC={exc} {logHint}') except ConnectionAbortedError as exc: raise MyError(f'recvEXC={exc} {logHint}') except ConnectionResetError as exc: raise MyError(f'recvEXC={exc} {logHint}') if not data: raise MyError(f'EOF {logHint}') return data async def aioWrite(w, data, *, logHint=''): # 写报文 try: w.write(data) await w.drain() # 与write配套,用于立即清空缓冲区 except ConnectionAbortedError as exc: raise MyError(f'sendEXC={exc} {logHint}') except ConnectionResetError as exc: raise MyError(f'recvEXC={exc} {logHint}') User = collections.namedtuple('User', ['name', 'password', 'dataRate']) # namedtuple可直接用属性名表示item gUserDict = dict() # 存从数据库中取出的用户信息 gUserDictLock = asyncio.Lock() # 对数据库访问加锁,避免冲突 gLinkCount = 0 # 同时连接remoteproxy的数量 gLeakyBucketDict = dict() class LeakyBucket: # 令牌桶类,用于流量控制 def __init__(self, tokenLimit): # tokenlimit为用户数据库中的流量限制 self.tokenCount = tokenLimit # 桶中剩余令牌数 self.tokenLimit = tokenLimit self.tokenSemaphore = asyncio.BoundedSemaphore(1) # 创建信号量确保互斥访问 def __del__(self): # 删除该桶,信号量置空 self.tokenLock = None self.tokenSemaphore = None async def acquireToken(self, count): # 获取令牌,数量为count await self.tokenSemaphore.acquire() # 信号量的P操作 tokenCount = 0 # 此次消耗的令牌数 tokenCount = min(self.tokenCount, count) # 桶中令牌数可能小于所需 self.tokenCount -= tokenCount if 0 < self.tokenCount: # 若桶中令牌足够 try: self.tokenSemaphore.release() # 信号量V操作 except ValueError: pass return tokenCount def releaseToken(self, count): # 增加count数量的令牌 self.tokenCount = min(self.tokenCount + count, self.tokenLimit) # 数量不超过limit try: self.tokenSemaphore.release() except ValueError: pass async def doLocal(localR, localW): # 处理与localProxy的通信,两个参数分别是stream读写类的实例 global gLinkCount gLinkCount += 1 serverR, serverW = None, None try: localHost, localPort, *_ = localW.get_extra_info('peername') logHint = f'{localHost} {localPort}' # 读取local发来的目的地址、用户名密码 firstLine = await aioRead(localR, ReadMode.LINE, logHint=f'1stLine') firstDict = json.loads(firstLine.strip().decode()) # 转为dict类型 dstHost = firstDict.get('dst') dstPort = firstDict.get('dport') username = firstDict.get('user') password = <PASSWORD>('password') if not dstHost or not dstPort or not username or not password: raise MyError(f'ErrorFirst') user = gUserDict.get(username) # 得到数据库中该user的行 if not user or user.password != password: # 密码不符 raise MyError(f'authFail {username} {password}') tokenLimit = user.dataRate if user.dataRate else args.tokenLimit # 若用户限制为空,tokenlimit从命令行取得 logHint = f'{logHint} {dstHost} {dstPort}' log.info(f'{logHint} connStart...') # 与目标服务器建立TCP连接 serverR, serverW = await asyncio.open_connection(dstHost, dstPort) bindHost, bindPort, *_ = serverW.get_extra_info('sockname') log.info(f'{logHint} connSucc bind {bindHost} {bindPort}') gLinkCount += 1 await aioWrite(localW, f'{bindHost} {bindPort}\r\n'.encode(), logHint='1stLine') # 向local回复bind成功的消息 if username not in gLeakyBucketDict: # 为用户分配其对应的令牌桶 gLeakyBucketDict[username] = LeakyBucket(tokenLimit) bucket = gLeakyBucketDict.get(username) # 返回当前用户的令牌桶 await asyncio.wait({ # 创建task以并发地传输信息,全双工方式 asyncio.create_task(xferData(bucket, localR, serverW, logHint=f'{logHint} fromLocal', upDirect=True)), asyncio.create_task(xferData(bucket, serverR, localW, logHint=f'{logHint} fromServer', upDirect=False)) }) except MyError as exc: log.info(f'{logHint} {exc}') except json.JSONDecodeError as exc: log.info(f'{logHint} {exc}') except OSError: log.info(f'{logHint} connFail') except ValueError as exc: log.info(f'{logHint} {exc}') except Exception as exc: log.error(f'{traceback.format_exc()}') exit(1) await aioClose(localW, logHint=logHint) await aioClose(serverW, logHint=logHint) gLinkCount -= 1 if serverR: gLinkCount -= 1 async def remoteTask(): # remoteProxy异步任务主函数 asyncio.create_task(dbSyncTask()) # 创建task,异步运行 asyncio.create_task(tokenLeakTask()) srv = await asyncio.start_server(doLocal, host=args.listenHost, port=args.listenPort) # 启动与local的TCP通信服务 addrList = list([s.getsockname() for s in srv.sockets]) log.info(f'LISTEN {addrList}') async with srv: await srv.serve_forever() # 持续异步运行 async def dbSyncTask(): # 数据库,同步gUserDict与 gLeakyBucketDict async with aiosqlite.connect(args.sqliteFile) as db: while True: await asyncio.sleep(1) # 每秒1次同步 userDict = dict() async with db.execute("SELECT name,password,dataRate FROM user;") as cursor: # 执行查询 async for row in cursor: userDict[row[0]] = User(row[0], row[1], row[2]) # 以username作为key global gUserDict global gLeakyBucketDict gUserDict = userDict for name, user in gUserDict.items(): # name, user对应key,value if name in gLeakyBucketDict: # 用户已连接,则返回其对应带宽限制 gLeakyBucketDict[name].tokenLimit = user.dataRate if user.dataRate else args.tokenLimit async def tokenLeakTask(): # 异步task,生成令牌 while True: await asyncio.sleep(1) for username, bucket in gLeakyBucketDict.items(): bucket.releaseToken(bucket.tokenLimit) # 每秒生成limit数量的令牌 async def xferData(bucket, srcR, dstW, *, logHint=None, upDirect): # 单向数据流传输,upDirect判断是否为上行流量 try: while True: tokenCount = 65535 if bucket: # remote端有bucket对流量进行限制 tokenCount = await bucket.acquireToken(65535) # 一次读写的maxLen为65535,所以获取该数量令牌 data = await aioRead(srcR, ReadMode.MAX, maxLen=tokenCount, logHint='') # 得到多少令牌,传输多少字节 if bucket: leftToken = tokenCount - len(data) # 没读到足够数据,因此有剩余令牌 if leftToken: bucket.releaseToken(leftToken) # 剩余令牌加入令牌桶 await aioWrite(dstW, data, logHint='') except MyError as exc: log.info(f'{logHint} {exc}') await aioClose(dstW, logHint=logHint) if __name__ == '__main__': signal.signal(signal.SIGINT, signal.SIG_DFL) _logFmt = logging.Formatter('%(asctime)s %(levelname).1s %(lineno)-3d %(funcName)-20s %(message)s', datefmt='%H:%M:%S') # 调试信息设置 _consoleHandler = logging.StreamHandler() _consoleHandler.setLevel(logging.DEBUG) _consoleHandler.setFormatter(_logFmt) log = logging.getLogger(__file__) log.addHandler(_consoleHandler) log.setLevel(logging.DEBUG) _parser = argparse.ArgumentParser(description='remote Proxy') # 命令行解析设置 _parser.add_argument('-d', dest='sqliteFile', default='user.db', help='user database sqlite file') # 数据库文件名 _parser.add_argument('-l', dest='listenHost', default='192.168.43.227', help='proxy listen host default listen all interfaces') # 监听的主机地址 _parser.add_argument('-p', dest='listenPort', type=int, default=8889, help='proxy listen port') _parser.add_argument('-t', dest='tokenLimit', type=int, default=999999, help='bytes/second per user') # 默认的令牌桶流量限制 args = _parser.parse_args() asyncio.run(remoteTask())
0.215103
0.107157
import unittest import pandas as pd from scipy.stats import randint, uniform from sklearn import __version__ as sk_version from sklearn.base import clone from sklearn.datasets import load_boston, load_linnerud from sklearn.decomposition import PCA, TruncatedSVD from sklearn.model_selection import train_test_split from sklearn.pipeline import FeatureUnion from physlearn import Regressor from physlearn.datasets import load_benchmark from physlearn.supervised import ShapInterpret from physlearn.supervised.utils._estimator_checks import (_check_estimator_choice, _check_stacking_layer) class TestBasic(unittest.TestCase): def test_regressor_gridsearchcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler') search_params = dict(reg__alpha=[0.1, 0.2, 0.5], reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params) self.assertLess(reg.best_score_.values, 3.6) self.assertIn(reg.best_params_['reg__alpha'], [0.1, 0.2, 0.5]) self.assertIn(reg.best_params_['reg__fit_intercept'], [True, False]) # sklearn < 0.22 does not have a stacking regressor @unittest.skipIf(sk_version < '0.22.0', 'scikit-learn version is less than 0.22') def test_stacking_regressor_gridsearchcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) stack = dict(regressors=['kneighborsregressor', 'bayesianridge'], final_regressor='lasso') reg = Regressor(regressor_choice='stackingregressor', pipeline_transform='standardscaler', stacking_options=dict(layers=stack)) search_params = dict(reg__0__n_neighbors=[2, 4, 5], reg__1__alpha_1=[1e-7, 1e-6], reg__final_estimator__alpha=[1.0], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params) self.assertLess(reg.best_score_.values, 2.8) self.assertIn(reg.best_params_['reg__0__n_neighbors'], [2, 4, 5]) self.assertIn(reg.best_params_['reg__1__alpha_1'], [1e-7, 1e-6]) self.assertIn(reg.best_params_['reg__final_estimator__alpha'], [1.0]) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressor_gridsearchcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler') search_params = dict(reg__alpha=[0.1, 0.2, 0.5], reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params) self.assertLess(reg.best_score_.values, 10.0) self.assertIn(reg.best_params_['reg__estimator__alpha'], [0.1, 0.2, 0.5]) self.assertIn(reg.best_params_['reg__estimator__fit_intercept'], [True, False]) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressorchain_gridsearchcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', chain_order=[2, 0, 1]) search_params = dict(reg__alpha=[0.1, 0.2, 0.5], reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params) self.assertLess(reg.best_score_.values, 10.0) self.assertIn(reg.best_params_['reg__base_estimator__alpha'], [0.1, 0.2, 0.5]) self.assertIn(reg.best_params_['reg__base_estimator__fit_intercept'], [True, False]) def test_regressor_randomizedsearchcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', randomizedcv_n_iter=6) search_params = dict(reg__alpha=uniform(loc=0.01, scale=1.5), reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params, search_method='randomizedsearchcv') self.assertLess(reg.best_score_.values, 3.6) self.assertLessEqual(reg.best_params_['reg__alpha'], 1.51) self.assertGreaterEqual(reg.best_params_['reg__alpha'], 0.01) self.assertIn(reg.best_params_['reg__fit_intercept'], [True, False]) # sklearn < 0.22 does not have a stacking regressor @unittest.skipIf(sk_version < '0.22.0', 'scikit-learn version is less than 0.22') def test_stacking_regressor_randomizedsearchcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) stack = dict(regressors=['kneighborsregressor', 'bayesianridge'], final_regressor='lasso') reg = Regressor(regressor_choice='stackingregressor', pipeline_transform='standardscaler', stacking_options=dict(layers=stack), randomizedcv_n_iter=6) search_params = dict(reg__0__n_neighbors=randint(low=2, high=5), reg__1__alpha_1=[1e-7, 1e-6], reg__final_estimator__alpha=[1.0], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params, search_method='randomizedsearchcv') self.assertLess(reg.best_score_.values, 3.5) self.assertLessEqual(reg.best_params_['reg__0__n_neighbors'], 5) self.assertGreaterEqual(reg.best_params_['reg__0__n_neighbors'], 2) self.assertIn(reg.best_params_['reg__1__alpha_1'], [1e-7, 1e-6]) self.assertIn(reg.best_params_['reg__final_estimator__alpha'], [1.0]) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressor_randomizedsearchcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', randomizedcv_n_iter=6) search_params = dict(reg__alpha=uniform(loc=0.01, scale=1.5), reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params, search_method='randomizedsearchcv') self.assertLess(reg.best_score_.values, 12.0) self.assertLessEqual(reg.best_params_['reg__estimator__alpha'], 1.51) self.assertGreaterEqual(reg.best_params_['reg__estimator__alpha'], 0.01) self.assertIn(reg.best_params_['reg__estimator__fit_intercept'], [True, False]) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressorchain_randomizedsearchcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', randomizedcv_n_iter=6, chain_order=[2, 0, 1]) search_params = dict(reg__alpha=uniform(loc=0.01, scale=1.5), reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params, search_method='randomizedsearchcv') self.assertLess(reg.best_score_.values, 12.0) self.assertLessEqual(reg.best_params_['reg__base_estimator__alpha'], 1.51) self.assertGreaterEqual(reg.best_params_['reg__base_estimator__alpha'], 0.01) self.assertIn(reg.best_params_['reg__base_estimator__fit_intercept'], [True, False]) def test_regressor_bayesoptcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='svr', pipeline_transform='standardscaler') search_pbounds = dict(reg__gamma=(0.1, 2.0), reg__epsilon=(0.1, 0.4)) reg.search(X_train, y_train, search_params=search_pbounds, search_method='bayesoptcv') self.assertLess(reg.best_score_.values, 3.7) self.assertLessEqual(reg.best_params_['reg__gamma'], 2.0) self.assertGreaterEqual(reg.best_params_['reg__gamma'], 0.1) self.assertLessEqual(reg.best_params_['reg__epsilon'], 0.4) self.assertGreaterEqual(reg.best_params_['reg__epsilon'], 0.1) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressor_bayesoptcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='svr', pipeline_transform='standardscaler') search_pbounds = dict(reg__gamma=(0.1, 2.0), reg__epsilon=(0.1, 0.4)) reg.search(X_train, y_train, search_params=search_pbounds, search_method='bayesoptcv') self.assertLess(reg.best_score_.values, 10.0) self.assertLessEqual(reg.best_params_['reg__estimator__gamma'], 2.0) self.assertGreaterEqual(reg.best_params_['reg__estimator__gamma'], 0.1) self.assertLessEqual(reg.best_params_['reg__estimator__epsilon'], 0.4) self.assertGreaterEqual(reg.best_params_['reg__estimator__epsilon'], 0.1) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressorchain_bayesoptcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='svr', pipeline_transform='standardscaler', chain_order=[2, 0, 1]) search_pbounds = dict(reg__gamma=(0.1, 2.0), reg__epsilon=(0.1, 0.4)) reg.search(X_train, y_train, search_params=search_pbounds, search_method='bayesoptcv') self.assertLess(reg.best_score_.values, 10.0) self.assertLessEqual(reg.best_params_['reg__base_estimator__gamma'], 2.0) self.assertGreaterEqual(reg.best_params_['reg__base_estimator__gamma'], 0.1) self.assertLessEqual(reg.best_params_['reg__base_estimator__epsilon'], 0.4) self.assertGreaterEqual(reg.best_params_['reg__base_estimator__epsilon'], 0.1) def test_regressor_fit_score(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler') reg.fit(X_train, y_train) y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred) self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'].values, 0.0) self.assertGreaterEqual(score['mse'].values, 0.0) self.assertLess(score['mae'].values, 3.1) self.assertLess(score['mse'].values, 23.0) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressor_fit_score(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler') y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred).mean() self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'], 0.0) self.assertGreaterEqual(score['mse'], 0.0) self.assertLess(score['mae'], 11.0) self.assertLess(score['mse'], 232.0) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressorchain_fit_score(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', chain_order=[0, 2, 1]) y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred).mean() self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'], 0.0) self.assertGreaterEqual(score['mse'], 0.0) self.assertLess(score['mae'], 11.0) self.assertLess(score['mse'], 237.0) # sklearn < 0.22 does not have a stacking regressor @unittest.skipIf(sk_version < '0.22.0', 'scikit-learn version is less than 0.22') def test_stacking_regressor_fit_score(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) stack = dict(regressors=['kneighborsregressor', 'bayesianridge'], final_regressor='lasso') reg = Regressor(regressor_choice='stackingregressor', pipeline_transform='standardscaler', stacking_options=dict(layers=stack)) reg.fit(X_train, y_train) y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred) self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'].values, 0.0) self.assertGreaterEqual(score['mse'].values, 0.0) self.assertLess(score['mae'].values, 2.8) self.assertLess(score['mse'].values, 19.0) def test_pipeline_clone_fit_score(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) transformer_list = [('pca', PCA(n_components=1)), ('svd', TruncatedSVD(n_components=2))] union = FeatureUnion(transformer_list=transformer_list, n_jobs=-1) params = dict(alpha=1.2, positive=True) reg = Regressor(regressor_choice='lasso', pipeline_transform=('tr', union), params=params) reg.get_pipeline(y=y_train) _class_before_clone = reg.pipe.__class__ reg.pipe = clone(reg.pipe) y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred) self.assertEqual(_class_before_clone, reg.pipe.__class__) self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'].values, 0.0) self.assertGreaterEqual(score['mse'].values, 0.0) self.assertLess(score['mae'].values, 11.0) self.assertLess(score['mse'].values, 232.0) def test_shap_explainer(self): X_train, _, y_train, _ = load_benchmark(return_split=True) index = 3 interpret = ShapInterpret(regressor_choice='ridgecv', target_index=index) interpret.fit(X=X_train, y=y_train, index=index) explainer, shap_values = interpret.explainer(X=X_train) self.assertEqual(X_train.shape, shap_values.shape) def test_estimator_choice(self): choices = ['ridge', 'Ridge', 'RIDGE', 'rIdGe'] estimator_choices = [_check_estimator_choice(estimator_choice=choice, estimator_type='regression') for choice in choices] self.assertListEqual(estimator_choices, ['ridge'] * 4) def test_stacking_choice(self): reg_test = dict(regressors=['kneighborsregressor', 'bayesianridge'], final_regressor='lasso') est_test = dict(estimators=['kneighborsregressor', 'bayesianridge'], final_estimator='lasso') reg = dict(regressors=['KnEiGhBoRsReGrEsSoR', 'BAYESIANRIDGE'], final_regressor='Lasso') est = dict(estimators=['KnEiGhBoRsReGrEsSoR', 'BAYESIANRIDGE'], final_estimator='Lasso') check_reg_stack = _check_stacking_layer(stacking_layer=reg, estimator_type='regression') check_est_stack = _check_stacking_layer(stacking_layer=est, estimator_type='regression') self.assertDictEqual(reg_test, check_reg_stack) self.assertDictEqual(est_test, check_est_stack) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_score_uniform_average(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', score_multioutput='uniform_average') y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred) self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'], 0.0) self.assertGreaterEqual(score['mse'], 0.0) self.assertLess(score['mae'], 11.0) self.assertLess(score['mse'], 237.0) if __name__ == '__main__': unittest.main()
tests/test_basic.py
import unittest import pandas as pd from scipy.stats import randint, uniform from sklearn import __version__ as sk_version from sklearn.base import clone from sklearn.datasets import load_boston, load_linnerud from sklearn.decomposition import PCA, TruncatedSVD from sklearn.model_selection import train_test_split from sklearn.pipeline import FeatureUnion from physlearn import Regressor from physlearn.datasets import load_benchmark from physlearn.supervised import ShapInterpret from physlearn.supervised.utils._estimator_checks import (_check_estimator_choice, _check_stacking_layer) class TestBasic(unittest.TestCase): def test_regressor_gridsearchcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler') search_params = dict(reg__alpha=[0.1, 0.2, 0.5], reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params) self.assertLess(reg.best_score_.values, 3.6) self.assertIn(reg.best_params_['reg__alpha'], [0.1, 0.2, 0.5]) self.assertIn(reg.best_params_['reg__fit_intercept'], [True, False]) # sklearn < 0.22 does not have a stacking regressor @unittest.skipIf(sk_version < '0.22.0', 'scikit-learn version is less than 0.22') def test_stacking_regressor_gridsearchcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) stack = dict(regressors=['kneighborsregressor', 'bayesianridge'], final_regressor='lasso') reg = Regressor(regressor_choice='stackingregressor', pipeline_transform='standardscaler', stacking_options=dict(layers=stack)) search_params = dict(reg__0__n_neighbors=[2, 4, 5], reg__1__alpha_1=[1e-7, 1e-6], reg__final_estimator__alpha=[1.0], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params) self.assertLess(reg.best_score_.values, 2.8) self.assertIn(reg.best_params_['reg__0__n_neighbors'], [2, 4, 5]) self.assertIn(reg.best_params_['reg__1__alpha_1'], [1e-7, 1e-6]) self.assertIn(reg.best_params_['reg__final_estimator__alpha'], [1.0]) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressor_gridsearchcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler') search_params = dict(reg__alpha=[0.1, 0.2, 0.5], reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params) self.assertLess(reg.best_score_.values, 10.0) self.assertIn(reg.best_params_['reg__estimator__alpha'], [0.1, 0.2, 0.5]) self.assertIn(reg.best_params_['reg__estimator__fit_intercept'], [True, False]) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressorchain_gridsearchcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', chain_order=[2, 0, 1]) search_params = dict(reg__alpha=[0.1, 0.2, 0.5], reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params) self.assertLess(reg.best_score_.values, 10.0) self.assertIn(reg.best_params_['reg__base_estimator__alpha'], [0.1, 0.2, 0.5]) self.assertIn(reg.best_params_['reg__base_estimator__fit_intercept'], [True, False]) def test_regressor_randomizedsearchcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', randomizedcv_n_iter=6) search_params = dict(reg__alpha=uniform(loc=0.01, scale=1.5), reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params, search_method='randomizedsearchcv') self.assertLess(reg.best_score_.values, 3.6) self.assertLessEqual(reg.best_params_['reg__alpha'], 1.51) self.assertGreaterEqual(reg.best_params_['reg__alpha'], 0.01) self.assertIn(reg.best_params_['reg__fit_intercept'], [True, False]) # sklearn < 0.22 does not have a stacking regressor @unittest.skipIf(sk_version < '0.22.0', 'scikit-learn version is less than 0.22') def test_stacking_regressor_randomizedsearchcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) stack = dict(regressors=['kneighborsregressor', 'bayesianridge'], final_regressor='lasso') reg = Regressor(regressor_choice='stackingregressor', pipeline_transform='standardscaler', stacking_options=dict(layers=stack), randomizedcv_n_iter=6) search_params = dict(reg__0__n_neighbors=randint(low=2, high=5), reg__1__alpha_1=[1e-7, 1e-6], reg__final_estimator__alpha=[1.0], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params, search_method='randomizedsearchcv') self.assertLess(reg.best_score_.values, 3.5) self.assertLessEqual(reg.best_params_['reg__0__n_neighbors'], 5) self.assertGreaterEqual(reg.best_params_['reg__0__n_neighbors'], 2) self.assertIn(reg.best_params_['reg__1__alpha_1'], [1e-7, 1e-6]) self.assertIn(reg.best_params_['reg__final_estimator__alpha'], [1.0]) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressor_randomizedsearchcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', randomizedcv_n_iter=6) search_params = dict(reg__alpha=uniform(loc=0.01, scale=1.5), reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params, search_method='randomizedsearchcv') self.assertLess(reg.best_score_.values, 12.0) self.assertLessEqual(reg.best_params_['reg__estimator__alpha'], 1.51) self.assertGreaterEqual(reg.best_params_['reg__estimator__alpha'], 0.01) self.assertIn(reg.best_params_['reg__estimator__fit_intercept'], [True, False]) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressorchain_randomizedsearchcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', randomizedcv_n_iter=6, chain_order=[2, 0, 1]) search_params = dict(reg__alpha=uniform(loc=0.01, scale=1.5), reg__fit_intercept=[True, False], tr__with_std=[True, False]) reg.search(X_train, y_train, search_params=search_params, search_method='randomizedsearchcv') self.assertLess(reg.best_score_.values, 12.0) self.assertLessEqual(reg.best_params_['reg__base_estimator__alpha'], 1.51) self.assertGreaterEqual(reg.best_params_['reg__base_estimator__alpha'], 0.01) self.assertIn(reg.best_params_['reg__base_estimator__fit_intercept'], [True, False]) def test_regressor_bayesoptcv(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='svr', pipeline_transform='standardscaler') search_pbounds = dict(reg__gamma=(0.1, 2.0), reg__epsilon=(0.1, 0.4)) reg.search(X_train, y_train, search_params=search_pbounds, search_method='bayesoptcv') self.assertLess(reg.best_score_.values, 3.7) self.assertLessEqual(reg.best_params_['reg__gamma'], 2.0) self.assertGreaterEqual(reg.best_params_['reg__gamma'], 0.1) self.assertLessEqual(reg.best_params_['reg__epsilon'], 0.4) self.assertGreaterEqual(reg.best_params_['reg__epsilon'], 0.1) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressor_bayesoptcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='svr', pipeline_transform='standardscaler') search_pbounds = dict(reg__gamma=(0.1, 2.0), reg__epsilon=(0.1, 0.4)) reg.search(X_train, y_train, search_params=search_pbounds, search_method='bayesoptcv') self.assertLess(reg.best_score_.values, 10.0) self.assertLessEqual(reg.best_params_['reg__estimator__gamma'], 2.0) self.assertGreaterEqual(reg.best_params_['reg__estimator__gamma'], 0.1) self.assertLessEqual(reg.best_params_['reg__estimator__epsilon'], 0.4) self.assertGreaterEqual(reg.best_params_['reg__estimator__epsilon'], 0.1) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressorchain_bayesoptcv(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='svr', pipeline_transform='standardscaler', chain_order=[2, 0, 1]) search_pbounds = dict(reg__gamma=(0.1, 2.0), reg__epsilon=(0.1, 0.4)) reg.search(X_train, y_train, search_params=search_pbounds, search_method='bayesoptcv') self.assertLess(reg.best_score_.values, 10.0) self.assertLessEqual(reg.best_params_['reg__base_estimator__gamma'], 2.0) self.assertGreaterEqual(reg.best_params_['reg__base_estimator__gamma'], 0.1) self.assertLessEqual(reg.best_params_['reg__base_estimator__epsilon'], 0.4) self.assertGreaterEqual(reg.best_params_['reg__base_estimator__epsilon'], 0.1) def test_regressor_fit_score(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler') reg.fit(X_train, y_train) y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred) self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'].values, 0.0) self.assertGreaterEqual(score['mse'].values, 0.0) self.assertLess(score['mae'].values, 3.1) self.assertLess(score['mse'].values, 23.0) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressor_fit_score(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler') y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred).mean() self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'], 0.0) self.assertGreaterEqual(score['mse'], 0.0) self.assertLess(score['mae'], 11.0) self.assertLess(score['mse'], 232.0) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_multioutput_regressorchain_fit_score(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', chain_order=[0, 2, 1]) y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred).mean() self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'], 0.0) self.assertGreaterEqual(score['mse'], 0.0) self.assertLess(score['mae'], 11.0) self.assertLess(score['mse'], 237.0) # sklearn < 0.22 does not have a stacking regressor @unittest.skipIf(sk_version < '0.22.0', 'scikit-learn version is less than 0.22') def test_stacking_regressor_fit_score(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) stack = dict(regressors=['kneighborsregressor', 'bayesianridge'], final_regressor='lasso') reg = Regressor(regressor_choice='stackingregressor', pipeline_transform='standardscaler', stacking_options=dict(layers=stack)) reg.fit(X_train, y_train) y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred) self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'].values, 0.0) self.assertGreaterEqual(score['mse'].values, 0.0) self.assertLess(score['mae'].values, 2.8) self.assertLess(score['mse'].values, 19.0) def test_pipeline_clone_fit_score(self): X, y = load_boston(return_X_y=True) X, y = pd.DataFrame(X), pd.Series(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) transformer_list = [('pca', PCA(n_components=1)), ('svd', TruncatedSVD(n_components=2))] union = FeatureUnion(transformer_list=transformer_list, n_jobs=-1) params = dict(alpha=1.2, positive=True) reg = Regressor(regressor_choice='lasso', pipeline_transform=('tr', union), params=params) reg.get_pipeline(y=y_train) _class_before_clone = reg.pipe.__class__ reg.pipe = clone(reg.pipe) y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred) self.assertEqual(_class_before_clone, reg.pipe.__class__) self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'].values, 0.0) self.assertGreaterEqual(score['mse'].values, 0.0) self.assertLess(score['mae'].values, 11.0) self.assertLess(score['mse'].values, 232.0) def test_shap_explainer(self): X_train, _, y_train, _ = load_benchmark(return_split=True) index = 3 interpret = ShapInterpret(regressor_choice='ridgecv', target_index=index) interpret.fit(X=X_train, y=y_train, index=index) explainer, shap_values = interpret.explainer(X=X_train) self.assertEqual(X_train.shape, shap_values.shape) def test_estimator_choice(self): choices = ['ridge', 'Ridge', 'RIDGE', 'rIdGe'] estimator_choices = [_check_estimator_choice(estimator_choice=choice, estimator_type='regression') for choice in choices] self.assertListEqual(estimator_choices, ['ridge'] * 4) def test_stacking_choice(self): reg_test = dict(regressors=['kneighborsregressor', 'bayesianridge'], final_regressor='lasso') est_test = dict(estimators=['kneighborsregressor', 'bayesianridge'], final_estimator='lasso') reg = dict(regressors=['KnEiGhBoRsReGrEsSoR', 'BAYESIANRIDGE'], final_regressor='Lasso') est = dict(estimators=['KnEiGhBoRsReGrEsSoR', 'BAYESIANRIDGE'], final_estimator='Lasso') check_reg_stack = _check_stacking_layer(stacking_layer=reg, estimator_type='regression') check_est_stack = _check_stacking_layer(stacking_layer=est, estimator_type='regression') self.assertDictEqual(reg_test, check_reg_stack) self.assertDictEqual(est_test, check_est_stack) # sklearn < 0.23 does not have as_frame parameter @unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23') def test_score_uniform_average(self): bunch = load_linnerud(as_frame=True) # returns a Bunch instance X, y = bunch['data'], bunch['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg = Regressor(regressor_choice='ridge', pipeline_transform='standardscaler', score_multioutput='uniform_average') y_pred = reg.fit(X_train, y_train).predict(X_test) score = reg.score(y_test, y_pred) self.assertCountEqual(y_pred.index, y_test.index) self.assertGreaterEqual(score['mae'], 0.0) self.assertGreaterEqual(score['mse'], 0.0) self.assertLess(score['mae'], 11.0) self.assertLess(score['mse'], 237.0) if __name__ == '__main__': unittest.main()
0.837121
0.653224
import os from typing import NamedTuple, Optional, Dict, Tuple, List, Sequence, Union, \ Set, Any, Callable import PIL.Image import numpy as np import pandas as pd import torch import torchvision as tv from tqdm import tqdm from .. import transforms as trafo from ..base import BaseDataset class BrodenLabel(NamedTuple): """Information needed to load the annotation of a Broden label.""" name: str """The (unique) name of the label in the annotations.""" number: int """The label ID.""" category: str """The category from which to select samples for the label""" class BrodenHandle(BaseDataset): """Handle to collect a sub-dataset of a dataset following Broden format. .. note:: The original Broden dataset is not required for usage of this handle. Used datasets just must use a format as is used by the Broden dataset. In the following, the format specifics relevant for the datasets that can be handled are explained, using the original Broden Dataset as role model. *(No code from the original datasets was used.)* **About the Original Broden Dataset** The Broden dataset is the broad and densely labeled dataset initially prepared for the paper `Network Dissection <http://arxiv.org/abs/1704.05796>`_. It is a combination of the following datasets: - `ADE (scene, object, part) <https://groups.csail.mit.edu/vision/datasets/ADE20K/>`_ - `Pascal-Context (object) <https://cs.stanford.edu/~roozbeh/pascal-context/>`_ - `Pascal-Part (part) <http://roozbehm.info/pascal-parts/pascal-parts.html>`_ - `OpenSurfaces (material) <http://opensurfaces.cs.cornell.edu/>`_ - `DTD (texture) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_ - and a generated color dataset, with 11 human selected colors The original Broden data features both pixel-level semantic segmentation annotations (for categories see :py:attr:`SEG_CATS`), and image-level classification annotations (for categories see :py:attr:`CLS_CATS`). The :py:attr:`annotations` attribute stores the raw annotation information as :py:class:`pandas.DataFrame` as it is loaded from the index file (see :py:attr:`INDEX_CSV_FILE`) within the :py:attr:`~hybrid_learning.datasets.base.BaseDataset.dataset_root`. For the format of the annotations see :py:attr:`annotations` directly. .. note:: To create sub-sets, one can also provide the annotations information on init. **Default Output Format** The :py:meth:`~hybrid_learning.datasets.base.BaseDataset.getitem` method yields tuples of input image and a dictionary ``{label_name: annotation}`` containing the annotations for all specified labels. For the exact output format of the annotations have a look at the :py:meth:`getitem` doc. By default, for classification, the annotation is ``bool``, and for segmentation, it is a :py:class:`numpy.ndarray` binary mask for the label. If the label information is missing for the selected item, ``None`` is returned instead. This output is transformed by :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` before yielding it as output of :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__`. .. note:: - To collect a single custom label/merged annotations from the Broden dataset, refer to the :py:meth:`custom_label` builder. - To modify the internal annotations table after init, use :py:meth:`prune` or directly modify :py:attr:`annotations`. """ CAT_SEP = ">>" """Separator string if the category is specified for a label. Then the format is ``"{label}{sep}{category}"``.""" LABEL_CSV_FILE: str = "label.csv" """Path to the file containing meta-information about the labels, relative to a dataset root. For details on the encoding see :py:meth:`label_info_for`.""" INDEX_CSV_FILE: str = "index.csv" """Path to the file containing the annotation information, relative to a dataset root. For the encoding see the documentation of this class.""" IMAGES_ROOT: str = "images" """Root directory for annotated image files. Relative to the :py:attr:`~hybrid_learning.datasets.base.BaseDataset.dataset_root`. Annotations can be found in :py:attr:`INDEX_CSV_FILE`. """ SEG_CATS = ('object', 'part', 'color', 'material') """Categories that provide segmentation data.""" CLS_CATS = ('scene', 'texture') """Categories that provide classification data.""" def __init__(self, labels: Sequence[BrodenLabel], dataset_root: str, annotations: pd.DataFrame = None, prune_na: bool = True, prune_na_rule: str = 'all', broden_split: Optional[str] = None, max_num_samples: Optional[int] = None, shuffle: bool = False, **dataset_args): """Init. For further arguments see the details in :py:meth:`standard_prune`. .. warning:: Currently, no labels with duplicate names are allowed. Therefore, a label may only occur for one category. :param labels: list of labels to collect for each sample. :param dataset_root: the path to the root directory holding the annotation files and the images/ directory with the images and segmentations :param annotations: optional initializer for :py:attr:`annotations`, which is by default loaded from :py:const:`INDEX_CSV_FILE`; use to create sub-sets :param dataset_args: arguments to :py:class:`~hybrid_learning.datasets.base.BaseDataset`. """ if annotations is not None and len(annotations) <= 0: raise ValueError("Empty annotations!") if len(labels) == 0: raise ValueError("Empty labels!") self._default_transforms = self.datum_to_tens """The default transformation will return tensors.""" super(BrodenHandle, self).__init__(dataset_root=dataset_root, **dataset_args) self.annotations: pd.DataFrame = annotations \ if annotations is not None \ else self.load_annotations_table(self.dataset_root) """The actual annotation (meta-)information. The columns used here are described below. .. rubric:: Preliminary Remarks - All file-paths are relative to :py:attr:`~hybrid_learning.datasets.base.BaseDataset.dataset_root` ``/images``. - Several files or class labels may be given, separated by semi-colon. - A mask for a category is an RGB-image encoding segmentation masks for all different labels of that category. For the encoding see :py:meth:`process_seg_mask`. - An annotation may have labels in different categories (i.e. entries in these category columns). If annotation information for a category is missing, this column is ``None``. .. rubric:: The Columns The following columns are used here: - *image*: The file-path to the original image file of this annotation - *split*: The dataset split for which this annotation was used (``train`` or ``val``) - category columns: - *color*: color mask file-path - *object*: object mask file-path (semantic object segmentation) - *part*: part mask file-path (same as object masks, only parts belong to a super-object) - *material*: material mask file-path - *scene*: label number of the depicted scene - *texture*: texture label numbers """ if len(self) == 0: raise RuntimeError("Loaded annotations information is empty!") label_infos: pd.DataFrame = pd.read_csv( os.path.join(self.dataset_root, self.LABEL_CSV_FILE)) self.labels: List[BrodenLabel] = \ [self.parse_label(label_spec, label_infos) for label_spec in labels] """The labels to load the values for in each line of the Broden annotations.""" # Check for duplicate labels: for label in self.labels: duplicates: List[BrodenLabel] = [lab for lab in self.labels if lab.name == label.name] if self.labels.count(label) > 1: raise ValueError( "Duplicate label names for labels {}".format(duplicates)) # Prune annotations self.standard_prune(max_num_samples=max_num_samples, prune_na=prune_na, prune_na_rule=prune_na_rule, broden_split=broden_split, shuffle=shuffle) def standard_prune(self, max_num_samples: Optional[int] = None, prune_na: bool = True, prune_na_rule: str = 'all', broden_split: Optional[str] = None, shuffle: bool = False) -> 'BrodenHandle': """Apply the specified standard pruning operations. Pruning is applied to the :py:attr:`annotations` table. :param prune_na: whether to prune all entries (rows) from the :py:attr:`annotations` table in which ``'all'`` or ``'any'`` of the covered label categories are ``NaN`` (see also ``prune_rule``) :param prune_na_rule: if ``prune_na`` is ``True``, rule by which to select candidates for pruning: - ``'all'``: all categories occurring in the specified labels must be ``NaN`` - ``'any'``: any must be ``NaN`` :param broden_split: the original dataset had a fix split into training and validation data; choose the corresponding original split (see also :py:attr:`annotations`, where the split meta-information is stored in) :param max_num_samples: the maximum number of samples to select; if set to ``None``, no restriction is applied :param shuffle: whether to shuffle the dataset (before restricting to ``max_num_samples``) :return: self """ # region Value checks if broden_split is not None and broden_split not in ('train', 'val'): raise ValueError(("broden_split must be one of ('train', 'val'), " "but was: {}").format(broden_split)) if prune_na and prune_na_rule not in ('all', 'any'): raise ValueError(("prune_na_rule must be one of ('all', 'any'), " "but was {}").format(prune_na_rule)) # endregion # Prune NaN values if prune_na: na_selector = \ self.annotations[{la.category for la in self.labels}].isna() if prune_na_rule == 'all': na_selector = na_selector.all(axis=1) else: na_selector = na_selector.any(axis=1) self.annotations: pd.DataFrame = self.annotations.loc[~na_selector] # Restrict to the selected split if broden_split is not None: self.annotations = \ self.annotations.loc[self.annotations['split'] == broden_split] # Restrict to the selected number of samples (and shuffle) if max_num_samples is None or max_num_samples <= 0 or \ max_num_samples > len(self.annotations): max_num_samples = len(self.annotations) if shuffle: self.annotations = self.annotations.sample(n=max_num_samples ).reset_index(drop=True) self.annotations = self.annotations.iloc[:max_num_samples] # Final sanity check if len(self) == 0: raise RuntimeError("Annotations information is now empty after " "standard pruning!") return self @classmethod def load_annotations_table(cls, dataset_root: str, index_file: str = None) -> pd.DataFrame: """Load the annotation information from the ``index_file`` under ``dataset_root``. For simplicity of parsing, all category and the ``"image"`` column are parsed to string. :param dataset_root: the root directory under which to find the index file :param index_file: the file name / relative path under ``dataset_root`` of the index CSV file to load the annotations from; defaults to :py:attr:`INDEX_CSV_FILE` :return: annotations table with correct types of the category columns """ index_file = index_file or cls.INDEX_CSV_FILE return pd.read_csv(os.path.join(dataset_root, index_file), dtype={col: str for col in [*cls.CLS_CATS, *cls.SEG_CATS, "image"]}) def parse_label(self, label_spec: Union[str, BrodenLabel], label_infos: pd.DataFrame, ) -> BrodenLabel: """Given a label specifier, parse it to a :py:class:`BrodenLabel` given ``label_infos``. :param label_spec: the label specifier to turn into a :py:class:`BrodenLabel` :param label_infos: the meta-information about all Broden labels; contains the information about available labels :return: the :py:class:`BrodenLabel` instance with information of the ``label_spec`` """ # Already in correct format: if isinstance(label_spec, BrodenLabel): return label_spec category: Optional[str] = None # region collect category information from label_spec if available if self.CAT_SEP not in label_spec: label_name: str = label_spec elif label_spec.split(self.CAT_SEP) == 2: label_name, category = label_spec.split(self.CAT_SEP) else: raise ValueError( ("Wrong label format of label specifier {}: expected exactly 1 " "occurrence of {}").format(label_spec, self.CAT_SEP)) # endregion # select category label_info: pd.Series = self.label_info_for(label_name, label_infos) categories: Dict[str, int] = self._to_cat_info(label_info['category']) category: str = category or list(categories.keys())[0] # region validate category if category not in categories: raise ValueError(("Category {} not available for labels {}; " "choose one of {}" ).format(category, self.labels, categories)) if category not in [*self.SEG_CATS, *self.CLS_CATS]: raise ValueError("Label {} has invalid category {}; allowed: {}" .format(label_spec, category, [*self.SEG_CATS, *self.CLS_CATS])) if category not in self.annotations.columns: raise ValueError(("Category {} of label {} not available in " "annotations; found cols: {}" ).format(category, label_spec, self.annotations.columns)) # endregion return BrodenLabel(name=label_name, number=label_info.number, category=category) @staticmethod def label_info_for(label_name: str, label_infos: pd.DataFrame) -> pd.Series: """Obtain information for label given by name from label information. A label may have samples in different categories. The output features the following information (compare Broden README): :number: the label ID (used for annotation in the segmentation masks) :name: the trivial unique name :category: the categories the labels have samples in, specified as semi-colon separated list of entries in ``{'color', 'object', 'material', 'part', 'scene', 'texture'}``, each entry followed by the total amount of samples for the label for that category; use :py:meth:`_to_cat_info` to process those :frequency: total number of images having that label over all categories :coverage: the mean(?) pixels per image :syns: synonyms :param label_name: the name of the label :param label_infos: the meta-information on all Broden labels as can by default be loaded from :py:const:`LABEL_CSV_FILE`. :returns: :py:class:`pandas.Series` with above fields filled :raises: :py:exc:`ValueError` if the label is not unique or cannot be found """ label_info = label_infos[label_infos['name'] == label_name] if len(label_info) < 1: raise ValueError("Label {} not found".format(label_name)) if len(label_info) > 1: raise ValueError("Label {} ambiguous: {} occurrences" .format(label_name, len(label_info))) label_info = label_info.iloc[0] return label_info @staticmethod def _to_cat_info(cat_info_str: str): """Transform category info str of cat1(freq1);cat2(freq2);... to a dict. :meta public: """ cats_freq: List[Tuple[str, ...]] = [tuple(cf.split('(')) for cf in cat_info_str.split(';')] for cat_freq in (cf for cf in cats_freq if not len(cf) == 2): raise ValueError(("Unknown format for category: {} (full category" "info: {})").format('('.join(cat_freq), cat_info_str)) return {c: f.rstrip(')') for c, f in cats_freq} def __len__(self): return len(self.annotations) def getitem(self, i: int) -> Tuple[PIL.Image.Image, Dict[str, Union[bool, np.ndarray]]]: """Provide tuple of input image and dictionary with annotations for all labels. (See :py:attr:`labels`). Used for :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__`. The output format is a tuple of ``(input_image, {label_name: annotation})``. The return type is as follows: The input image is an RGB image as :py:class:`~PIL.Image.Image`; For the annotations dictionary holds: - Each label from :py:attr:`labels` is considered, and the annotation for a label is - for classification: a ``bool`` value - for segmentation: a binary mask as :py:class:`numpy.ndarray` - In case the label is not available, its value in the annotations dict is ``None``. is a tuple of the input :py:class:`~PIL.Image.Image` and the annotations dict. :return: tuple of input image and annotations dict """ img: PIL.Image.Image = PIL.Image.open(self.image_filepath(i)) anns: Dict[str, Union[bool, np.ndarray]] = self.load_anns(i) return img, anns def load_anns(self, i: int) -> Dict[str, Union[bool, np.ndarray]]: """Load all annotation information for row ``i``. Information is retrieved from :py:attr:`annotations`. For details on the output format see :py:meth:`load_ann`.""" loaded_rgb_masks = {} raw_ann_row: pd.Series = self.annotations.iloc[i] anns: Dict[str, Union[bool, np.ndarray]] = { label.name: self.load_ann(label, raw_ann_row=raw_ann_row, loaded_rgb_masks=loaded_rgb_masks) for label in self.labels } return anns @staticmethod def datum_to_tens(img: PIL.Image.Image, anns: Dict[bool, np.ndarray] ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: """This transformation will convert an output tuple of image, label dict to a tensor. For the input format see :py:meth:`getitem`. Any ``None`` entries in the annotations dictionary will remain ``None``. """ img_t = tv.transforms.ToTensor()(img) # pylint: disable=no-member anns_t = {k: (torch.as_tensor(a, dtype=torch.float) if a is not None else None) for k, a in anns.items()} # pylint: enable=no-member return img_t, anns_t def image_filepath(self, i: int) -> str: """Get the path to the image file for row ``i``. Information is retrieved from :py:attr:`annotations`.""" return os.path.join(self.dataset_root, self.IMAGES_ROOT, self.annotations.iloc[i]['image']) def load_ann(self, label: BrodenLabel, i: Optional[int] = None, raw_ann_row: pd.Series = None, loaded_rgb_masks: Dict[str, List[PIL.Image.Image]] = None ) -> Optional[Union[bool, np.ndarray]]: """Load the annotation information for ``label`` at row ``i``. Information is retrieved from :py:attr:`annotations`. If the annotation information is missing for the given label category, return ``None``. .. note:: If ``loaded_rgb_masks`` is given, this function has the side effect of updating this dict with newly loaded masks! This is used to speed up loading of several labels from the same mask. :param label: the label to restrict the annotation to :param i: the index of the row in the annotations information :py:attr:`annotations` which holds the information for this single annotation of interest :param raw_ann_row: optionally directly hand over the row of interest instead of providing its index (see ``i``) :param loaded_rgb_masks: RGB segmentation masks loaded so far (for speed-up); gets updated with any newly loaded masks :return: One of - ``None`` if category information is missing, - the binary segmentation mask for the label in case of a segmentation category, - the boolean truth value whether the label holds for the image in case of a classification category """ if i is None and raw_ann_row is None: raise ValueError("Either index i or the annotation row raw_ann_row" " must be given but both were None") if loaded_rgb_masks is None: loaded_rgb_masks: Dict[str, List[PIL.Image.Image]] = {} if raw_ann_row is None: raw_ann_row: pd.Series = self.annotations.iloc[i] raw_ann: Union[str, float] = raw_ann_row[label.category] # Missing annotation: return None if pd.isnull(raw_ann): return None raw_anns: List[str] = raw_ann.split(';') # raw_anns is list of file paths: if label.category in self.SEG_CATS: # RGB masks with label information encoded in red and green channel if label.category not in loaded_rgb_masks: # Update loaded mask list with newly loaded mask loaded_rgb_masks[label.category] = [ PIL.Image.open( os.path.join(self.dataset_root, self.IMAGES_ROOT, fp)) for fp in raw_anns] ann = self.process_seg_mask(label, loaded_rgb_masks[label.category]) return ann if label.category in self.CLS_CATS: # raw_anns is list of classification label numbers return str(label.number) in raw_anns raise ValueError("Unknown category for label {}; known ones: {}" .format(label, [*self.SEG_CATS, *self.CLS_CATS])) def process_seg_mask(self, label: BrodenLabel, rgb_masks: List[PIL.Image.Image]) -> np.ndarray: """Collect the binary segmentation mask for ``label`` from given relative file paths. Pixels belonging to the given ``label`` are 1, others 0. :param label: the label to look for (:py:attr:`~BrodenLabel.number` needed) :param rgb_masks: a list of RGB masks with label information encoded in red and green channel; for details on encoding see :py:meth:`to_seg_mask` :return: binary segmentation mask for ``label`` merged from the segmentation masks at given file paths :raises: :py:exc:`ValueError` for invalid label category """ if len(rgb_masks) == 0: raise ValueError("Empty relative file path list rel_fp!") # Convert to binary masks only for self.label: masks_np = [self.to_seg_mask(ext_mask, label_num=label.number) for ext_mask in rgb_masks] # Add up masks return (np.sum(masks_np, axis=0) > 0) \ if len(masks_np) > 1 else masks_np[0] @staticmethod def to_seg_mask(seg: PIL.Image.Image, label_num: int) -> np.ndarray: """Given a Broden RGB segmentation, reduce it to a binary mask for ``label_num``. Broden segmentations are saved as RGB images, where the the label number of a pixel is ``(256 * green + red)`` with ``red`` the red channel value of the pixel, and ``green`` its green channel value. A label number of ``0`` means background. The label number is the ``'number'`` field from :py:attr:`label_info_for` respectively the :py:attr:`BrodenLabel.number` attribute. One can either specify a single label number as ``int``, or an iterable of label numbers. :param seg: the original RGB segmentation mask encoded as described above :param label_num: the label number to restrict the mask to :return: union of binary segmentation masks for given label numbers """ # noinspection PyTypeChecker seg_np = np.array(seg) red, green = seg_np[..., 0], seg_np[..., 1] binary_seg_np = (256 * green + red) == label_num return binary_seg_np def prune(self, condition: Callable[[Tuple[Any, Any]], bool], by_target: bool = False, show_progress_bar: bool = False) -> 'BrodenHandle': """Prune all items that fulfill ``condition`` from this dataset. For this, :py:attr:`annotations` is modified accordingly. :param condition: callable that accepts the output of :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__` and returns a ``bool`` stating whether this item is to be pruned :param show_progress_bar: whether to show a progress bar while collecting the selector for ``condition`` :param by_target: only load the target annotations of each item (the :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` are applied with dummy input) and apply ``condition`` to the target; asserts that transforms yields a tuple of ``(input, target)``; this is useful to avoid the costly loading of input images if they do not contribute to the transformations or the ``condition``. :return: this instance (with modified :py:attr:`annotations`) """ selector: np.ndarray = self._selector_for( condition, show_progress_bar=show_progress_bar, by_target=by_target) self.annotations = self.annotations[~selector] return self def balance(self, condition: Callable[[Tuple[Any, Any]], bool], proportion: float = 0.5, by_target: bool = False, show_progress_bar: bool = False) -> 'BrodenHandle': """Restrict this dataset to a subset with an exact ``proportion`` fulfilling ``condition``. For this, :py:attr:`annotations` is modified accordingly. After splitting the dataset by ``condition``, the half which is too large to fulfill ``proportion`` is reduced by random sub-sampling, determining the final size of the dataset. If there is only one class in the dataset, only shuffling is applied. :param condition: callable that accepts the output of :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__` and returns a ``bool`` stating whether this item belongs to the first split :param proportion: the aimed-for proportion of the first split on the final dataset :param show_progress_bar: whether to show a progress bar while collecting the selector for ``condition`` :param by_target: only load the target annotations of each item (the :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` are applied with dummy input) and apply ``condition`` to the target; asserts that transforms yields a tuple of ``(input, target)``; this is useful to avoid the costly loading of input images if they do not contribute to the transformations or the ``condition``. :return: self """ selector: np.ndarray = self._selector_for( condition, by_target=by_target, show_progress_bar=show_progress_bar) # Reduce positives pos: pd.DataFrame = self.annotations.loc[selector] if len(pos) / len(self.annotations) > proportion: to_reduce: pd.DataFrame = pos to_keep: pd.DataFrame = self.annotations.loc[~selector] prop_to_keep: float = 1 - proportion # Reduce negatives else: to_reduce: pd.DataFrame = self.annotations.loc[~selector] to_keep: pd.DataFrame = pos prop_to_keep: float = proportion # Is there only one class in the dataset? if np.allclose(prop_to_keep, 0): return self.shuffle() # Calc the final amounts of samples for each slice num_to_keep: int = len(to_keep) num_all: int = int(num_to_keep / prop_to_keep) num_to_reduce: int = max(1, num_all - num_to_keep) # Subsample, shuffle: self.annotations: pd.DataFrame = pd.concat( [to_reduce.sample(n=num_to_reduce), to_keep.sample(n=num_to_keep)], ignore_index=True) self.shuffle() return self def _selector_for(self, condition: Callable[[Tuple[Any, Any]], bool], show_progress_bar: bool = False, by_target: bool = False) -> np.ndarray: """Provide ``bool`` list matching indices of this dataset for which ``condition`` holds. Optionally show a progress bar while processing the data. :param by_target: only load target (transforms is applied with dummy input) and apply condition to target; asserts that transforms yields a tuple of ``(input, target)`` """ if by_target: dummy_img: PIL.Image.Image = PIL.Image.open(self.image_filepath(0)) load_fn: Callable[[int], Any] = \ (lambda i: self.transforms(dummy_img, self.load_anns(i))[1]) else: load_fn: Callable[[int], Any] = lambda i: self[i] selector: List[bool] = [] iterator = range(len(self)) if show_progress_bar: iterator = tqdm(iterator, desc="Iterating " + self.__class__.__name__) for i in iterator: selector.append(condition(load_fn(i))) return np.array(selector, dtype=bool) def shuffle(self) -> 'BrodenHandle': """Shuffle the held annotations and return self.""" self.annotations = self.annotations.sample(frac=1 ).reset_index(drop=True) return self @classmethod def custom_label(cls, dataset_root: str, label: str, prune_empty: Union[bool, str] = True, balance_pos_to: Optional[float] = None, verbose: bool = False, **init_args): # pylint: disable=line-too-long """Return a :py:class:`BrodenHandle` instance with output restricted to single ``label``. The transformations in :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` will be chosen such that :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__` outputs a tuple of ``(input_image, annotation)`` where - ``input_image`` is encoded as :py:class:`torch.Tensor` - ``annotation`` is a :py:class:`torch.Tensor` holding either the binary mask for the specified label or the bool classification value. The label may either be a label as would be specified in :py:class:`__init__ <BrodenHandle>` or a string representation of a :py:class:`~hybrid_learning.datasets.transforms.dict_transforms.Merge` operation. :param dataset_root: the ``dataset_root`` parameter for init of the :py:class:`BrodenHandle` :param label: the label to restrict to; may either be a valid string label name, a valid :py:class:`BrodenLabel`, or a valid string representation of a :py:class:`~hybrid_learning.datasets.transforms.dict_transforms.Merge` operation the :py:class:`~hybrid_learning.datasets.transforms.dict_transforms.Merge.all_in_keys` of which are all valid string label names; :param init_args: further init arguments to the :py:class:`BrodenHandle` :param balance_pos_to: if a value given, balance the resulting :py:class:`BrodenHandle` instance such that the proportion of the ``True`` entries is this value; only use for classification examples :param prune_empty: whether to prune empty entries (``None`` values and empty masks) using :py:meth:`prune` :param verbose: show progress bars :return: :py:class:`BrodenHandle` instance for ``dataset_root`` with :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` and :py:class:`~BrodenHandle.labels` selected such that the output of :py:meth:`getitem` is transformed to the format specified above """ # pylint: enable=line-too-long # region Value checks if "labels" in init_args: raise ValueError(("init_args must not contain labels key, " "but were {}").format(init_args)) # endregion merge_op: Optional[trafo.Merge] = None # Merge op before flatten # region Parse the label (and collect Merge operation if necessary): # collect: labels, merge_op, final_key (=the final key to which to # restrict the dict) if isinstance(label, BrodenLabel): labels: List[BrodenLabel] = [label] final_key: str = label.name elif isinstance(label, trafo.Merge): merge_op: trafo.Merge = label labels: Set[str] = merge_op.all_in_keys final_key: str = merge_op.out_key elif isinstance(label, str): # Can be parsed to merge operation? parsed_label: Union[str, trafo.Merge] = trafo.Merge.parse(label) if isinstance(parsed_label, str): labels: List[str] = [label] final_key: str = label else: merge_op: trafo.Merge = parsed_label labels: Set[str] = merge_op.all_in_keys final_key: str = merge_op.out_key else: raise ValueError("label {} has unknown format {}" .format(label, type(label))) assert final_key != "" assert len(labels) > 0 # endregion # region Collect the transformation trafos: List[trafo.TupleTransforms] = [] trafos += [trafo.OnTarget(merge_op), trafo.OnTarget(trafo.RestrictDict([final_key]))] \ if merge_op else [] trafos += [cls.datum_to_tens, trafo.OnTarget(trafo.FlattenDict(final_key))] user_defined_trafo = init_args.pop('transforms', None) # endregion broden_inst = cls(dataset_root=dataset_root, labels=labels, **init_args) # specify separately for IDE type inference: broden_inst.transforms = trafo.Compose(trafos) if prune_empty: broden_inst.prune( lambda a: a is None or (a.dim() > 0 and a.sum() == 0), by_target=True, show_progress_bar=verbose) if balance_pos_to is not None: broden_inst.balance(lambda a: a, proportion=balance_pos_to, by_target=True, show_progress_bar=verbose) # Append the user-defined transforms # (after pruning, since this requires control over the output format!) if user_defined_trafo is not None: broden_inst.transforms.append(user_defined_trafo) return broden_inst
hybrid_learning/datasets/custom/broden.py
import os from typing import NamedTuple, Optional, Dict, Tuple, List, Sequence, Union, \ Set, Any, Callable import PIL.Image import numpy as np import pandas as pd import torch import torchvision as tv from tqdm import tqdm from .. import transforms as trafo from ..base import BaseDataset class BrodenLabel(NamedTuple): """Information needed to load the annotation of a Broden label.""" name: str """The (unique) name of the label in the annotations.""" number: int """The label ID.""" category: str """The category from which to select samples for the label""" class BrodenHandle(BaseDataset): """Handle to collect a sub-dataset of a dataset following Broden format. .. note:: The original Broden dataset is not required for usage of this handle. Used datasets just must use a format as is used by the Broden dataset. In the following, the format specifics relevant for the datasets that can be handled are explained, using the original Broden Dataset as role model. *(No code from the original datasets was used.)* **About the Original Broden Dataset** The Broden dataset is the broad and densely labeled dataset initially prepared for the paper `Network Dissection <http://arxiv.org/abs/1704.05796>`_. It is a combination of the following datasets: - `ADE (scene, object, part) <https://groups.csail.mit.edu/vision/datasets/ADE20K/>`_ - `Pascal-Context (object) <https://cs.stanford.edu/~roozbeh/pascal-context/>`_ - `Pascal-Part (part) <http://roozbehm.info/pascal-parts/pascal-parts.html>`_ - `OpenSurfaces (material) <http://opensurfaces.cs.cornell.edu/>`_ - `DTD (texture) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_ - and a generated color dataset, with 11 human selected colors The original Broden data features both pixel-level semantic segmentation annotations (for categories see :py:attr:`SEG_CATS`), and image-level classification annotations (for categories see :py:attr:`CLS_CATS`). The :py:attr:`annotations` attribute stores the raw annotation information as :py:class:`pandas.DataFrame` as it is loaded from the index file (see :py:attr:`INDEX_CSV_FILE`) within the :py:attr:`~hybrid_learning.datasets.base.BaseDataset.dataset_root`. For the format of the annotations see :py:attr:`annotations` directly. .. note:: To create sub-sets, one can also provide the annotations information on init. **Default Output Format** The :py:meth:`~hybrid_learning.datasets.base.BaseDataset.getitem` method yields tuples of input image and a dictionary ``{label_name: annotation}`` containing the annotations for all specified labels. For the exact output format of the annotations have a look at the :py:meth:`getitem` doc. By default, for classification, the annotation is ``bool``, and for segmentation, it is a :py:class:`numpy.ndarray` binary mask for the label. If the label information is missing for the selected item, ``None`` is returned instead. This output is transformed by :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` before yielding it as output of :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__`. .. note:: - To collect a single custom label/merged annotations from the Broden dataset, refer to the :py:meth:`custom_label` builder. - To modify the internal annotations table after init, use :py:meth:`prune` or directly modify :py:attr:`annotations`. """ CAT_SEP = ">>" """Separator string if the category is specified for a label. Then the format is ``"{label}{sep}{category}"``.""" LABEL_CSV_FILE: str = "label.csv" """Path to the file containing meta-information about the labels, relative to a dataset root. For details on the encoding see :py:meth:`label_info_for`.""" INDEX_CSV_FILE: str = "index.csv" """Path to the file containing the annotation information, relative to a dataset root. For the encoding see the documentation of this class.""" IMAGES_ROOT: str = "images" """Root directory for annotated image files. Relative to the :py:attr:`~hybrid_learning.datasets.base.BaseDataset.dataset_root`. Annotations can be found in :py:attr:`INDEX_CSV_FILE`. """ SEG_CATS = ('object', 'part', 'color', 'material') """Categories that provide segmentation data.""" CLS_CATS = ('scene', 'texture') """Categories that provide classification data.""" def __init__(self, labels: Sequence[BrodenLabel], dataset_root: str, annotations: pd.DataFrame = None, prune_na: bool = True, prune_na_rule: str = 'all', broden_split: Optional[str] = None, max_num_samples: Optional[int] = None, shuffle: bool = False, **dataset_args): """Init. For further arguments see the details in :py:meth:`standard_prune`. .. warning:: Currently, no labels with duplicate names are allowed. Therefore, a label may only occur for one category. :param labels: list of labels to collect for each sample. :param dataset_root: the path to the root directory holding the annotation files and the images/ directory with the images and segmentations :param annotations: optional initializer for :py:attr:`annotations`, which is by default loaded from :py:const:`INDEX_CSV_FILE`; use to create sub-sets :param dataset_args: arguments to :py:class:`~hybrid_learning.datasets.base.BaseDataset`. """ if annotations is not None and len(annotations) <= 0: raise ValueError("Empty annotations!") if len(labels) == 0: raise ValueError("Empty labels!") self._default_transforms = self.datum_to_tens """The default transformation will return tensors.""" super(BrodenHandle, self).__init__(dataset_root=dataset_root, **dataset_args) self.annotations: pd.DataFrame = annotations \ if annotations is not None \ else self.load_annotations_table(self.dataset_root) """The actual annotation (meta-)information. The columns used here are described below. .. rubric:: Preliminary Remarks - All file-paths are relative to :py:attr:`~hybrid_learning.datasets.base.BaseDataset.dataset_root` ``/images``. - Several files or class labels may be given, separated by semi-colon. - A mask for a category is an RGB-image encoding segmentation masks for all different labels of that category. For the encoding see :py:meth:`process_seg_mask`. - An annotation may have labels in different categories (i.e. entries in these category columns). If annotation information for a category is missing, this column is ``None``. .. rubric:: The Columns The following columns are used here: - *image*: The file-path to the original image file of this annotation - *split*: The dataset split for which this annotation was used (``train`` or ``val``) - category columns: - *color*: color mask file-path - *object*: object mask file-path (semantic object segmentation) - *part*: part mask file-path (same as object masks, only parts belong to a super-object) - *material*: material mask file-path - *scene*: label number of the depicted scene - *texture*: texture label numbers """ if len(self) == 0: raise RuntimeError("Loaded annotations information is empty!") label_infos: pd.DataFrame = pd.read_csv( os.path.join(self.dataset_root, self.LABEL_CSV_FILE)) self.labels: List[BrodenLabel] = \ [self.parse_label(label_spec, label_infos) for label_spec in labels] """The labels to load the values for in each line of the Broden annotations.""" # Check for duplicate labels: for label in self.labels: duplicates: List[BrodenLabel] = [lab for lab in self.labels if lab.name == label.name] if self.labels.count(label) > 1: raise ValueError( "Duplicate label names for labels {}".format(duplicates)) # Prune annotations self.standard_prune(max_num_samples=max_num_samples, prune_na=prune_na, prune_na_rule=prune_na_rule, broden_split=broden_split, shuffle=shuffle) def standard_prune(self, max_num_samples: Optional[int] = None, prune_na: bool = True, prune_na_rule: str = 'all', broden_split: Optional[str] = None, shuffle: bool = False) -> 'BrodenHandle': """Apply the specified standard pruning operations. Pruning is applied to the :py:attr:`annotations` table. :param prune_na: whether to prune all entries (rows) from the :py:attr:`annotations` table in which ``'all'`` or ``'any'`` of the covered label categories are ``NaN`` (see also ``prune_rule``) :param prune_na_rule: if ``prune_na`` is ``True``, rule by which to select candidates for pruning: - ``'all'``: all categories occurring in the specified labels must be ``NaN`` - ``'any'``: any must be ``NaN`` :param broden_split: the original dataset had a fix split into training and validation data; choose the corresponding original split (see also :py:attr:`annotations`, where the split meta-information is stored in) :param max_num_samples: the maximum number of samples to select; if set to ``None``, no restriction is applied :param shuffle: whether to shuffle the dataset (before restricting to ``max_num_samples``) :return: self """ # region Value checks if broden_split is not None and broden_split not in ('train', 'val'): raise ValueError(("broden_split must be one of ('train', 'val'), " "but was: {}").format(broden_split)) if prune_na and prune_na_rule not in ('all', 'any'): raise ValueError(("prune_na_rule must be one of ('all', 'any'), " "but was {}").format(prune_na_rule)) # endregion # Prune NaN values if prune_na: na_selector = \ self.annotations[{la.category for la in self.labels}].isna() if prune_na_rule == 'all': na_selector = na_selector.all(axis=1) else: na_selector = na_selector.any(axis=1) self.annotations: pd.DataFrame = self.annotations.loc[~na_selector] # Restrict to the selected split if broden_split is not None: self.annotations = \ self.annotations.loc[self.annotations['split'] == broden_split] # Restrict to the selected number of samples (and shuffle) if max_num_samples is None or max_num_samples <= 0 or \ max_num_samples > len(self.annotations): max_num_samples = len(self.annotations) if shuffle: self.annotations = self.annotations.sample(n=max_num_samples ).reset_index(drop=True) self.annotations = self.annotations.iloc[:max_num_samples] # Final sanity check if len(self) == 0: raise RuntimeError("Annotations information is now empty after " "standard pruning!") return self @classmethod def load_annotations_table(cls, dataset_root: str, index_file: str = None) -> pd.DataFrame: """Load the annotation information from the ``index_file`` under ``dataset_root``. For simplicity of parsing, all category and the ``"image"`` column are parsed to string. :param dataset_root: the root directory under which to find the index file :param index_file: the file name / relative path under ``dataset_root`` of the index CSV file to load the annotations from; defaults to :py:attr:`INDEX_CSV_FILE` :return: annotations table with correct types of the category columns """ index_file = index_file or cls.INDEX_CSV_FILE return pd.read_csv(os.path.join(dataset_root, index_file), dtype={col: str for col in [*cls.CLS_CATS, *cls.SEG_CATS, "image"]}) def parse_label(self, label_spec: Union[str, BrodenLabel], label_infos: pd.DataFrame, ) -> BrodenLabel: """Given a label specifier, parse it to a :py:class:`BrodenLabel` given ``label_infos``. :param label_spec: the label specifier to turn into a :py:class:`BrodenLabel` :param label_infos: the meta-information about all Broden labels; contains the information about available labels :return: the :py:class:`BrodenLabel` instance with information of the ``label_spec`` """ # Already in correct format: if isinstance(label_spec, BrodenLabel): return label_spec category: Optional[str] = None # region collect category information from label_spec if available if self.CAT_SEP not in label_spec: label_name: str = label_spec elif label_spec.split(self.CAT_SEP) == 2: label_name, category = label_spec.split(self.CAT_SEP) else: raise ValueError( ("Wrong label format of label specifier {}: expected exactly 1 " "occurrence of {}").format(label_spec, self.CAT_SEP)) # endregion # select category label_info: pd.Series = self.label_info_for(label_name, label_infos) categories: Dict[str, int] = self._to_cat_info(label_info['category']) category: str = category or list(categories.keys())[0] # region validate category if category not in categories: raise ValueError(("Category {} not available for labels {}; " "choose one of {}" ).format(category, self.labels, categories)) if category not in [*self.SEG_CATS, *self.CLS_CATS]: raise ValueError("Label {} has invalid category {}; allowed: {}" .format(label_spec, category, [*self.SEG_CATS, *self.CLS_CATS])) if category not in self.annotations.columns: raise ValueError(("Category {} of label {} not available in " "annotations; found cols: {}" ).format(category, label_spec, self.annotations.columns)) # endregion return BrodenLabel(name=label_name, number=label_info.number, category=category) @staticmethod def label_info_for(label_name: str, label_infos: pd.DataFrame) -> pd.Series: """Obtain information for label given by name from label information. A label may have samples in different categories. The output features the following information (compare Broden README): :number: the label ID (used for annotation in the segmentation masks) :name: the trivial unique name :category: the categories the labels have samples in, specified as semi-colon separated list of entries in ``{'color', 'object', 'material', 'part', 'scene', 'texture'}``, each entry followed by the total amount of samples for the label for that category; use :py:meth:`_to_cat_info` to process those :frequency: total number of images having that label over all categories :coverage: the mean(?) pixels per image :syns: synonyms :param label_name: the name of the label :param label_infos: the meta-information on all Broden labels as can by default be loaded from :py:const:`LABEL_CSV_FILE`. :returns: :py:class:`pandas.Series` with above fields filled :raises: :py:exc:`ValueError` if the label is not unique or cannot be found """ label_info = label_infos[label_infos['name'] == label_name] if len(label_info) < 1: raise ValueError("Label {} not found".format(label_name)) if len(label_info) > 1: raise ValueError("Label {} ambiguous: {} occurrences" .format(label_name, len(label_info))) label_info = label_info.iloc[0] return label_info @staticmethod def _to_cat_info(cat_info_str: str): """Transform category info str of cat1(freq1);cat2(freq2);... to a dict. :meta public: """ cats_freq: List[Tuple[str, ...]] = [tuple(cf.split('(')) for cf in cat_info_str.split(';')] for cat_freq in (cf for cf in cats_freq if not len(cf) == 2): raise ValueError(("Unknown format for category: {} (full category" "info: {})").format('('.join(cat_freq), cat_info_str)) return {c: f.rstrip(')') for c, f in cats_freq} def __len__(self): return len(self.annotations) def getitem(self, i: int) -> Tuple[PIL.Image.Image, Dict[str, Union[bool, np.ndarray]]]: """Provide tuple of input image and dictionary with annotations for all labels. (See :py:attr:`labels`). Used for :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__`. The output format is a tuple of ``(input_image, {label_name: annotation})``. The return type is as follows: The input image is an RGB image as :py:class:`~PIL.Image.Image`; For the annotations dictionary holds: - Each label from :py:attr:`labels` is considered, and the annotation for a label is - for classification: a ``bool`` value - for segmentation: a binary mask as :py:class:`numpy.ndarray` - In case the label is not available, its value in the annotations dict is ``None``. is a tuple of the input :py:class:`~PIL.Image.Image` and the annotations dict. :return: tuple of input image and annotations dict """ img: PIL.Image.Image = PIL.Image.open(self.image_filepath(i)) anns: Dict[str, Union[bool, np.ndarray]] = self.load_anns(i) return img, anns def load_anns(self, i: int) -> Dict[str, Union[bool, np.ndarray]]: """Load all annotation information for row ``i``. Information is retrieved from :py:attr:`annotations`. For details on the output format see :py:meth:`load_ann`.""" loaded_rgb_masks = {} raw_ann_row: pd.Series = self.annotations.iloc[i] anns: Dict[str, Union[bool, np.ndarray]] = { label.name: self.load_ann(label, raw_ann_row=raw_ann_row, loaded_rgb_masks=loaded_rgb_masks) for label in self.labels } return anns @staticmethod def datum_to_tens(img: PIL.Image.Image, anns: Dict[bool, np.ndarray] ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: """This transformation will convert an output tuple of image, label dict to a tensor. For the input format see :py:meth:`getitem`. Any ``None`` entries in the annotations dictionary will remain ``None``. """ img_t = tv.transforms.ToTensor()(img) # pylint: disable=no-member anns_t = {k: (torch.as_tensor(a, dtype=torch.float) if a is not None else None) for k, a in anns.items()} # pylint: enable=no-member return img_t, anns_t def image_filepath(self, i: int) -> str: """Get the path to the image file for row ``i``. Information is retrieved from :py:attr:`annotations`.""" return os.path.join(self.dataset_root, self.IMAGES_ROOT, self.annotations.iloc[i]['image']) def load_ann(self, label: BrodenLabel, i: Optional[int] = None, raw_ann_row: pd.Series = None, loaded_rgb_masks: Dict[str, List[PIL.Image.Image]] = None ) -> Optional[Union[bool, np.ndarray]]: """Load the annotation information for ``label`` at row ``i``. Information is retrieved from :py:attr:`annotations`. If the annotation information is missing for the given label category, return ``None``. .. note:: If ``loaded_rgb_masks`` is given, this function has the side effect of updating this dict with newly loaded masks! This is used to speed up loading of several labels from the same mask. :param label: the label to restrict the annotation to :param i: the index of the row in the annotations information :py:attr:`annotations` which holds the information for this single annotation of interest :param raw_ann_row: optionally directly hand over the row of interest instead of providing its index (see ``i``) :param loaded_rgb_masks: RGB segmentation masks loaded so far (for speed-up); gets updated with any newly loaded masks :return: One of - ``None`` if category information is missing, - the binary segmentation mask for the label in case of a segmentation category, - the boolean truth value whether the label holds for the image in case of a classification category """ if i is None and raw_ann_row is None: raise ValueError("Either index i or the annotation row raw_ann_row" " must be given but both were None") if loaded_rgb_masks is None: loaded_rgb_masks: Dict[str, List[PIL.Image.Image]] = {} if raw_ann_row is None: raw_ann_row: pd.Series = self.annotations.iloc[i] raw_ann: Union[str, float] = raw_ann_row[label.category] # Missing annotation: return None if pd.isnull(raw_ann): return None raw_anns: List[str] = raw_ann.split(';') # raw_anns is list of file paths: if label.category in self.SEG_CATS: # RGB masks with label information encoded in red and green channel if label.category not in loaded_rgb_masks: # Update loaded mask list with newly loaded mask loaded_rgb_masks[label.category] = [ PIL.Image.open( os.path.join(self.dataset_root, self.IMAGES_ROOT, fp)) for fp in raw_anns] ann = self.process_seg_mask(label, loaded_rgb_masks[label.category]) return ann if label.category in self.CLS_CATS: # raw_anns is list of classification label numbers return str(label.number) in raw_anns raise ValueError("Unknown category for label {}; known ones: {}" .format(label, [*self.SEG_CATS, *self.CLS_CATS])) def process_seg_mask(self, label: BrodenLabel, rgb_masks: List[PIL.Image.Image]) -> np.ndarray: """Collect the binary segmentation mask for ``label`` from given relative file paths. Pixels belonging to the given ``label`` are 1, others 0. :param label: the label to look for (:py:attr:`~BrodenLabel.number` needed) :param rgb_masks: a list of RGB masks with label information encoded in red and green channel; for details on encoding see :py:meth:`to_seg_mask` :return: binary segmentation mask for ``label`` merged from the segmentation masks at given file paths :raises: :py:exc:`ValueError` for invalid label category """ if len(rgb_masks) == 0: raise ValueError("Empty relative file path list rel_fp!") # Convert to binary masks only for self.label: masks_np = [self.to_seg_mask(ext_mask, label_num=label.number) for ext_mask in rgb_masks] # Add up masks return (np.sum(masks_np, axis=0) > 0) \ if len(masks_np) > 1 else masks_np[0] @staticmethod def to_seg_mask(seg: PIL.Image.Image, label_num: int) -> np.ndarray: """Given a Broden RGB segmentation, reduce it to a binary mask for ``label_num``. Broden segmentations are saved as RGB images, where the the label number of a pixel is ``(256 * green + red)`` with ``red`` the red channel value of the pixel, and ``green`` its green channel value. A label number of ``0`` means background. The label number is the ``'number'`` field from :py:attr:`label_info_for` respectively the :py:attr:`BrodenLabel.number` attribute. One can either specify a single label number as ``int``, or an iterable of label numbers. :param seg: the original RGB segmentation mask encoded as described above :param label_num: the label number to restrict the mask to :return: union of binary segmentation masks for given label numbers """ # noinspection PyTypeChecker seg_np = np.array(seg) red, green = seg_np[..., 0], seg_np[..., 1] binary_seg_np = (256 * green + red) == label_num return binary_seg_np def prune(self, condition: Callable[[Tuple[Any, Any]], bool], by_target: bool = False, show_progress_bar: bool = False) -> 'BrodenHandle': """Prune all items that fulfill ``condition`` from this dataset. For this, :py:attr:`annotations` is modified accordingly. :param condition: callable that accepts the output of :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__` and returns a ``bool`` stating whether this item is to be pruned :param show_progress_bar: whether to show a progress bar while collecting the selector for ``condition`` :param by_target: only load the target annotations of each item (the :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` are applied with dummy input) and apply ``condition`` to the target; asserts that transforms yields a tuple of ``(input, target)``; this is useful to avoid the costly loading of input images if they do not contribute to the transformations or the ``condition``. :return: this instance (with modified :py:attr:`annotations`) """ selector: np.ndarray = self._selector_for( condition, show_progress_bar=show_progress_bar, by_target=by_target) self.annotations = self.annotations[~selector] return self def balance(self, condition: Callable[[Tuple[Any, Any]], bool], proportion: float = 0.5, by_target: bool = False, show_progress_bar: bool = False) -> 'BrodenHandle': """Restrict this dataset to a subset with an exact ``proportion`` fulfilling ``condition``. For this, :py:attr:`annotations` is modified accordingly. After splitting the dataset by ``condition``, the half which is too large to fulfill ``proportion`` is reduced by random sub-sampling, determining the final size of the dataset. If there is only one class in the dataset, only shuffling is applied. :param condition: callable that accepts the output of :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__` and returns a ``bool`` stating whether this item belongs to the first split :param proportion: the aimed-for proportion of the first split on the final dataset :param show_progress_bar: whether to show a progress bar while collecting the selector for ``condition`` :param by_target: only load the target annotations of each item (the :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` are applied with dummy input) and apply ``condition`` to the target; asserts that transforms yields a tuple of ``(input, target)``; this is useful to avoid the costly loading of input images if they do not contribute to the transformations or the ``condition``. :return: self """ selector: np.ndarray = self._selector_for( condition, by_target=by_target, show_progress_bar=show_progress_bar) # Reduce positives pos: pd.DataFrame = self.annotations.loc[selector] if len(pos) / len(self.annotations) > proportion: to_reduce: pd.DataFrame = pos to_keep: pd.DataFrame = self.annotations.loc[~selector] prop_to_keep: float = 1 - proportion # Reduce negatives else: to_reduce: pd.DataFrame = self.annotations.loc[~selector] to_keep: pd.DataFrame = pos prop_to_keep: float = proportion # Is there only one class in the dataset? if np.allclose(prop_to_keep, 0): return self.shuffle() # Calc the final amounts of samples for each slice num_to_keep: int = len(to_keep) num_all: int = int(num_to_keep / prop_to_keep) num_to_reduce: int = max(1, num_all - num_to_keep) # Subsample, shuffle: self.annotations: pd.DataFrame = pd.concat( [to_reduce.sample(n=num_to_reduce), to_keep.sample(n=num_to_keep)], ignore_index=True) self.shuffle() return self def _selector_for(self, condition: Callable[[Tuple[Any, Any]], bool], show_progress_bar: bool = False, by_target: bool = False) -> np.ndarray: """Provide ``bool`` list matching indices of this dataset for which ``condition`` holds. Optionally show a progress bar while processing the data. :param by_target: only load target (transforms is applied with dummy input) and apply condition to target; asserts that transforms yields a tuple of ``(input, target)`` """ if by_target: dummy_img: PIL.Image.Image = PIL.Image.open(self.image_filepath(0)) load_fn: Callable[[int], Any] = \ (lambda i: self.transforms(dummy_img, self.load_anns(i))[1]) else: load_fn: Callable[[int], Any] = lambda i: self[i] selector: List[bool] = [] iterator = range(len(self)) if show_progress_bar: iterator = tqdm(iterator, desc="Iterating " + self.__class__.__name__) for i in iterator: selector.append(condition(load_fn(i))) return np.array(selector, dtype=bool) def shuffle(self) -> 'BrodenHandle': """Shuffle the held annotations and return self.""" self.annotations = self.annotations.sample(frac=1 ).reset_index(drop=True) return self @classmethod def custom_label(cls, dataset_root: str, label: str, prune_empty: Union[bool, str] = True, balance_pos_to: Optional[float] = None, verbose: bool = False, **init_args): # pylint: disable=line-too-long """Return a :py:class:`BrodenHandle` instance with output restricted to single ``label``. The transformations in :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` will be chosen such that :py:meth:`~hybrid_learning.datasets.base.BaseDataset.__getitem__` outputs a tuple of ``(input_image, annotation)`` where - ``input_image`` is encoded as :py:class:`torch.Tensor` - ``annotation`` is a :py:class:`torch.Tensor` holding either the binary mask for the specified label or the bool classification value. The label may either be a label as would be specified in :py:class:`__init__ <BrodenHandle>` or a string representation of a :py:class:`~hybrid_learning.datasets.transforms.dict_transforms.Merge` operation. :param dataset_root: the ``dataset_root`` parameter for init of the :py:class:`BrodenHandle` :param label: the label to restrict to; may either be a valid string label name, a valid :py:class:`BrodenLabel`, or a valid string representation of a :py:class:`~hybrid_learning.datasets.transforms.dict_transforms.Merge` operation the :py:class:`~hybrid_learning.datasets.transforms.dict_transforms.Merge.all_in_keys` of which are all valid string label names; :param init_args: further init arguments to the :py:class:`BrodenHandle` :param balance_pos_to: if a value given, balance the resulting :py:class:`BrodenHandle` instance such that the proportion of the ``True`` entries is this value; only use for classification examples :param prune_empty: whether to prune empty entries (``None`` values and empty masks) using :py:meth:`prune` :param verbose: show progress bars :return: :py:class:`BrodenHandle` instance for ``dataset_root`` with :py:attr:`~hybrid_learning.datasets.base.BaseDataset.transforms` and :py:class:`~BrodenHandle.labels` selected such that the output of :py:meth:`getitem` is transformed to the format specified above """ # pylint: enable=line-too-long # region Value checks if "labels" in init_args: raise ValueError(("init_args must not contain labels key, " "but were {}").format(init_args)) # endregion merge_op: Optional[trafo.Merge] = None # Merge op before flatten # region Parse the label (and collect Merge operation if necessary): # collect: labels, merge_op, final_key (=the final key to which to # restrict the dict) if isinstance(label, BrodenLabel): labels: List[BrodenLabel] = [label] final_key: str = label.name elif isinstance(label, trafo.Merge): merge_op: trafo.Merge = label labels: Set[str] = merge_op.all_in_keys final_key: str = merge_op.out_key elif isinstance(label, str): # Can be parsed to merge operation? parsed_label: Union[str, trafo.Merge] = trafo.Merge.parse(label) if isinstance(parsed_label, str): labels: List[str] = [label] final_key: str = label else: merge_op: trafo.Merge = parsed_label labels: Set[str] = merge_op.all_in_keys final_key: str = merge_op.out_key else: raise ValueError("label {} has unknown format {}" .format(label, type(label))) assert final_key != "" assert len(labels) > 0 # endregion # region Collect the transformation trafos: List[trafo.TupleTransforms] = [] trafos += [trafo.OnTarget(merge_op), trafo.OnTarget(trafo.RestrictDict([final_key]))] \ if merge_op else [] trafos += [cls.datum_to_tens, trafo.OnTarget(trafo.FlattenDict(final_key))] user_defined_trafo = init_args.pop('transforms', None) # endregion broden_inst = cls(dataset_root=dataset_root, labels=labels, **init_args) # specify separately for IDE type inference: broden_inst.transforms = trafo.Compose(trafos) if prune_empty: broden_inst.prune( lambda a: a is None or (a.dim() > 0 and a.sum() == 0), by_target=True, show_progress_bar=verbose) if balance_pos_to is not None: broden_inst.balance(lambda a: a, proportion=balance_pos_to, by_target=True, show_progress_bar=verbose) # Append the user-defined transforms # (after pruning, since this requires control over the output format!) if user_defined_trafo is not None: broden_inst.transforms.append(user_defined_trafo) return broden_inst
0.940092
0.68892
from __future__ import unicode_literals import django.contrib.gis.db.models.fields from django.db import migrations, models import django.db.models.deletion import django_date_extensions.fields import tinymce.models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Correspondent', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_name', models.CharField(blank=True, max_length=50)), ('married_name', models.CharField(blank=True, max_length=50)), ('first_names', models.CharField(blank=True, max_length=75)), ('suffix', models.CharField(blank=True, max_length=10)), ('aliases', models.CharField(blank=True, max_length=75)), ('description', models.TextField(blank=True)), ], ), migrations.CreateModel( name='DocumentImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('description', models.CharField(blank=True, max_length=75)), ('type', models.CharField(choices=[('L', 'Letter'), ('E', 'Envelope'), ('T', 'Transcription'), ('D', 'Other document')], max_length=1)), ('image_file', models.ImageField(upload_to='letter_images')), ], ), migrations.CreateModel( name='DocumentSource', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=75)), ('description', models.TextField(blank=True)), ('url', models.URLField(blank=True)), ('images', models.ManyToManyField(blank=True, to='letters.DocumentImage')), ], ), migrations.CreateModel( name='Envelope', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('description', models.CharField(blank=True, max_length=75)), ('date', django_date_extensions.fields.ApproximateDateField(null=True)), ('contents', tinymce.models.HTMLField(blank=True, null=True)), ('notes', tinymce.models.HTMLField(blank=True)), ], ), migrations.CreateModel( name='Letter', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', django_date_extensions.fields.ApproximateDateField(null=True)), ('heading', models.TextField(blank=True, null=True)), ('greeting', models.TextField(blank=True, null=True)), ('body', tinymce.models.HTMLField(blank=True, null=True)), ('closing', models.TextField(blank=True, null=True)), ('signature', models.TextField(blank=True, null=True)), ('ps', models.TextField(blank=True, null=True)), ('complete_transcription', models.BooleanField(default=False)), ('notes', tinymce.models.HTMLField(blank=True)), ('language', models.CharField(blank=True, default='EN', max_length=2)), ('images', models.ManyToManyField(blank=True, to='letters.DocumentImage')), ], ), migrations.CreateModel( name='MiscDocument', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('description', models.CharField(max_length=75)), ('date', django_date_extensions.fields.ApproximateDateField(null=True)), ('contents', tinymce.models.HTMLField(blank=True, null=True)), ('notes', tinymce.models.HTMLField(blank=True)), ('images', models.ManyToManyField(blank=True, to='letters.DocumentImage')), ], ), migrations.CreateModel( name='Place', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=75)), ('state', models.CharField(blank=True, max_length=2)), ('country', models.CharField(blank=True, default='US', max_length=2)), ('point', django.contrib.gis.db.models.fields.PointField(blank=True, help_text='Represented as (longitude, latitude)', null=True, srid=4326)), ('notes', models.TextField(blank=True)), ], ), migrations.AddField( model_name='miscdocument', name='place', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.Place'), ), migrations.AddField( model_name='miscdocument', name='source', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.DocumentSource'), ), migrations.AddField( model_name='miscdocument', name='writer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='miscdoc_writer', to='letters.Correspondent'), ), migrations.AddField( model_name='letter', name='place', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.Place'), ), migrations.AddField( model_name='letter', name='recipient', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipient', to='letters.Correspondent'), ), migrations.AddField( model_name='letter', name='source', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.DocumentSource'), ), migrations.AddField( model_name='letter', name='writer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='writer', to='letters.Correspondent'), ), migrations.AddField( model_name='envelope', name='destination', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination', to='letters.Place'), ), migrations.AddField( model_name='envelope', name='images', field=models.ManyToManyField(blank=True, to='letters.DocumentImage'), ), migrations.AddField( model_name='envelope', name='origin', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='origin', to='letters.Place'), ), migrations.AddField( model_name='envelope', name='recipient', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='envelope_recipient', to='letters.Correspondent'), ), migrations.AddField( model_name='envelope', name='source', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.DocumentSource'), ), migrations.AddField( model_name='envelope', name='writer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='envelope_writer', to='letters.Correspondent'), ), migrations.AddField( model_name='correspondent', name='images', field=models.ManyToManyField(blank=True, to='letters.DocumentImage'), ), ]
letters/migrations/0001_initial.py
from __future__ import unicode_literals import django.contrib.gis.db.models.fields from django.db import migrations, models import django.db.models.deletion import django_date_extensions.fields import tinymce.models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Correspondent', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_name', models.CharField(blank=True, max_length=50)), ('married_name', models.CharField(blank=True, max_length=50)), ('first_names', models.CharField(blank=True, max_length=75)), ('suffix', models.CharField(blank=True, max_length=10)), ('aliases', models.CharField(blank=True, max_length=75)), ('description', models.TextField(blank=True)), ], ), migrations.CreateModel( name='DocumentImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('description', models.CharField(blank=True, max_length=75)), ('type', models.CharField(choices=[('L', 'Letter'), ('E', 'Envelope'), ('T', 'Transcription'), ('D', 'Other document')], max_length=1)), ('image_file', models.ImageField(upload_to='letter_images')), ], ), migrations.CreateModel( name='DocumentSource', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=75)), ('description', models.TextField(blank=True)), ('url', models.URLField(blank=True)), ('images', models.ManyToManyField(blank=True, to='letters.DocumentImage')), ], ), migrations.CreateModel( name='Envelope', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('description', models.CharField(blank=True, max_length=75)), ('date', django_date_extensions.fields.ApproximateDateField(null=True)), ('contents', tinymce.models.HTMLField(blank=True, null=True)), ('notes', tinymce.models.HTMLField(blank=True)), ], ), migrations.CreateModel( name='Letter', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', django_date_extensions.fields.ApproximateDateField(null=True)), ('heading', models.TextField(blank=True, null=True)), ('greeting', models.TextField(blank=True, null=True)), ('body', tinymce.models.HTMLField(blank=True, null=True)), ('closing', models.TextField(blank=True, null=True)), ('signature', models.TextField(blank=True, null=True)), ('ps', models.TextField(blank=True, null=True)), ('complete_transcription', models.BooleanField(default=False)), ('notes', tinymce.models.HTMLField(blank=True)), ('language', models.CharField(blank=True, default='EN', max_length=2)), ('images', models.ManyToManyField(blank=True, to='letters.DocumentImage')), ], ), migrations.CreateModel( name='MiscDocument', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('description', models.CharField(max_length=75)), ('date', django_date_extensions.fields.ApproximateDateField(null=True)), ('contents', tinymce.models.HTMLField(blank=True, null=True)), ('notes', tinymce.models.HTMLField(blank=True)), ('images', models.ManyToManyField(blank=True, to='letters.DocumentImage')), ], ), migrations.CreateModel( name='Place', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=75)), ('state', models.CharField(blank=True, max_length=2)), ('country', models.CharField(blank=True, default='US', max_length=2)), ('point', django.contrib.gis.db.models.fields.PointField(blank=True, help_text='Represented as (longitude, latitude)', null=True, srid=4326)), ('notes', models.TextField(blank=True)), ], ), migrations.AddField( model_name='miscdocument', name='place', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.Place'), ), migrations.AddField( model_name='miscdocument', name='source', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.DocumentSource'), ), migrations.AddField( model_name='miscdocument', name='writer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='miscdoc_writer', to='letters.Correspondent'), ), migrations.AddField( model_name='letter', name='place', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.Place'), ), migrations.AddField( model_name='letter', name='recipient', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipient', to='letters.Correspondent'), ), migrations.AddField( model_name='letter', name='source', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.DocumentSource'), ), migrations.AddField( model_name='letter', name='writer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='writer', to='letters.Correspondent'), ), migrations.AddField( model_name='envelope', name='destination', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination', to='letters.Place'), ), migrations.AddField( model_name='envelope', name='images', field=models.ManyToManyField(blank=True, to='letters.DocumentImage'), ), migrations.AddField( model_name='envelope', name='origin', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='origin', to='letters.Place'), ), migrations.AddField( model_name='envelope', name='recipient', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='envelope_recipient', to='letters.Correspondent'), ), migrations.AddField( model_name='envelope', name='source', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='letters.DocumentSource'), ), migrations.AddField( model_name='envelope', name='writer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='envelope_writer', to='letters.Correspondent'), ), migrations.AddField( model_name='correspondent', name='images', field=models.ManyToManyField(blank=True, to='letters.DocumentImage'), ), ]
0.575349
0.233171
import os, sys, getopt, string from Bio.Seq import Seq from Bio.Blast import NCBIXML from Bio.Alphabet import IUPAC #============================================================================== def show_help(): print """%s parses BLASTX XML output to STDOUT Options: -f:\tBLASTX output in XML format -n:\tnumber of best hits to be parsed (default: 1) -e:\tmaximum e-value to accept hits (default: 1e-5) What this program does: It takes the best hit's start and endposition from BLAST, applies it to the sequence in your query (e.g. the CAP3-output), and translates to the left resp. right from the start resp. end of your CAP3-output, until a Start-orStopcodon appears. """ % sys.argv[0] sys.exit(1) # ============================================================================= def handle_arguments(): """ verifies the presence of all necessary arguments and returns the data dir """ if len ( sys.argv ) == 1: sys.stderr.write( "no arguments provided.\n" ) show_help() try: # check for the right arguments keys, values = getopt.getopt( sys.argv[1:], "hf:n:e:" ) except getopt.GetoptError: sys.stderr.write( "invalid arguments provided.\n" ) show_help() args = {} args['numhits'] = 1 args['evalue'] = float('1e-5') for key, value in keys: if key == '-f': args['blastfile'] = value if key == '-n': args['numhits'] = int(value) if key == '-e': args['evalue'] = float(value) if not args.has_key('blastfile'): sys.stderr.write( "blastx XML file argument missing.\n" ) show_help() elif not os.path.exists( args.get('blastfile') ) or not os.path.isfile( args.get('blastfile') ): sys.stderr.write( "blastx XML file does not exist.\n" ) show_help() return args #============================================================================== def main(args): #print "Working..." header = ['query', 'hit', 'frame', 'query_startpos', 'query_endpos', 'subject_startpos', 'subject_endpos', 'evalue', 'score'] print '#', string.join(header, "\t") XML = open( args.get('blastfile') ) blast_records = NCBIXML.parse(XML) for i in blast_records: # print i.query count = 0 while count < args.get('numhits'): count += 1 hit = i.alignments.pop(0) hsp = hit.hsps[0] if hsp.expect > args.get('evalue'): break # print i.query, hit.title.split()[0], hsp.frame[0], hsp.query_start, hsp.query_start -1+ len(hsp.query)*3, hsp.sbjct_start, hsp.sbjct_start -1+ len(hsp.sbjct), hsp.expect, hsp.score print string.join([i.query, hit.title.split()[0], str(hsp.frame[0]), str(hsp.query_start), str(hsp.query_start -1+ len(hsp.query.replace('-', ''))*3), str(hsp.sbjct_start), str(hsp.sbjct_start -1+ len(hsp.sbjct)), str(hsp.expect), str(hsp.score)], "\t") # ============================================================================= args = handle_arguments() main( args )
python/blast/parse-blastout-xml.py
import os, sys, getopt, string from Bio.Seq import Seq from Bio.Blast import NCBIXML from Bio.Alphabet import IUPAC #============================================================================== def show_help(): print """%s parses BLASTX XML output to STDOUT Options: -f:\tBLASTX output in XML format -n:\tnumber of best hits to be parsed (default: 1) -e:\tmaximum e-value to accept hits (default: 1e-5) What this program does: It takes the best hit's start and endposition from BLAST, applies it to the sequence in your query (e.g. the CAP3-output), and translates to the left resp. right from the start resp. end of your CAP3-output, until a Start-orStopcodon appears. """ % sys.argv[0] sys.exit(1) # ============================================================================= def handle_arguments(): """ verifies the presence of all necessary arguments and returns the data dir """ if len ( sys.argv ) == 1: sys.stderr.write( "no arguments provided.\n" ) show_help() try: # check for the right arguments keys, values = getopt.getopt( sys.argv[1:], "hf:n:e:" ) except getopt.GetoptError: sys.stderr.write( "invalid arguments provided.\n" ) show_help() args = {} args['numhits'] = 1 args['evalue'] = float('1e-5') for key, value in keys: if key == '-f': args['blastfile'] = value if key == '-n': args['numhits'] = int(value) if key == '-e': args['evalue'] = float(value) if not args.has_key('blastfile'): sys.stderr.write( "blastx XML file argument missing.\n" ) show_help() elif not os.path.exists( args.get('blastfile') ) or not os.path.isfile( args.get('blastfile') ): sys.stderr.write( "blastx XML file does not exist.\n" ) show_help() return args #============================================================================== def main(args): #print "Working..." header = ['query', 'hit', 'frame', 'query_startpos', 'query_endpos', 'subject_startpos', 'subject_endpos', 'evalue', 'score'] print '#', string.join(header, "\t") XML = open( args.get('blastfile') ) blast_records = NCBIXML.parse(XML) for i in blast_records: # print i.query count = 0 while count < args.get('numhits'): count += 1 hit = i.alignments.pop(0) hsp = hit.hsps[0] if hsp.expect > args.get('evalue'): break # print i.query, hit.title.split()[0], hsp.frame[0], hsp.query_start, hsp.query_start -1+ len(hsp.query)*3, hsp.sbjct_start, hsp.sbjct_start -1+ len(hsp.sbjct), hsp.expect, hsp.score print string.join([i.query, hit.title.split()[0], str(hsp.frame[0]), str(hsp.query_start), str(hsp.query_start -1+ len(hsp.query.replace('-', ''))*3), str(hsp.sbjct_start), str(hsp.sbjct_start -1+ len(hsp.sbjct)), str(hsp.expect), str(hsp.score)], "\t") # ============================================================================= args = handle_arguments() main( args )
0.257205
0.259808
import json import argparse import csv import sys parser = argparse.ArgumentParser() parser.add_argument('-s', '--state', default = 'open', choices = ['open' ,'closed', 'all'], help = 'state of issues to fetch') parser.add_argument('-l', '--label', help = 'comma-separated labels to filter on') parser.add_argument('-m', '--milestone', help = 'comma-separated milestones to filter on') parser.add_argument('-c', '--columns', default='nalmt', help = 'columns to include in csv output') parser.add_argument('-o', '--output', type=str, help = 'output file location') parser.add_argument('input', type=str, help = 'location of input json data') args = parser.parse_args() with open(args.input, 'r') as f: data = json.load(f) outfile = open(args.output, 'w') if args.output else sys.stdout cw = csv.writer(outfile) header_names = { 'n': 'Issue', #number 'a': 'Assignees', 'l': 'Labels', 'm': 'Milestone', 't': 'Title', } cw.writerow(header_names.get(c, c) for c in args.columns) # helpers to pull things out of issues in list-of-strings form def assignees(i): return list(a.get('login', '??') for a in i.get('assignees', [])) def labels(i): return list(l.get('name', '??') for l in i.get('labels', [])) def milestones(i): return [ i['milestone'].get('title', '??') ] if i.get('milestone') else [] col_values = { 'n': lambda i: str(i['number']), 'a': lambda i: ','.join(assignees(i)), 'l': lambda i: ','.join(labels(i)), 'm': lambda i: ','.join(milestones(i)), 't': lambda i: i['title'], } def list_match(mlist, vlist): first = True accept = True for mterm in mlist.split(','): if mterm.startswith('-'): if mterm[1:] in vlist: accept = False else: if first: accept = False # default is reject if mterm in vlist: accept = True first = False return accept for num in sorted(data['issues']): i = data['issues'][num] if args.state and (i['state'] != args.state): continue if args.label and not list_match(args.label, labels(i)): continue if args.milestone and not list_match(args.milestone, milestones(i)): continue cw.writerow(col_values[c](i) for c in args.columns) if args.output: outfile.close()
tools/issues2csv.py
import json import argparse import csv import sys parser = argparse.ArgumentParser() parser.add_argument('-s', '--state', default = 'open', choices = ['open' ,'closed', 'all'], help = 'state of issues to fetch') parser.add_argument('-l', '--label', help = 'comma-separated labels to filter on') parser.add_argument('-m', '--milestone', help = 'comma-separated milestones to filter on') parser.add_argument('-c', '--columns', default='nalmt', help = 'columns to include in csv output') parser.add_argument('-o', '--output', type=str, help = 'output file location') parser.add_argument('input', type=str, help = 'location of input json data') args = parser.parse_args() with open(args.input, 'r') as f: data = json.load(f) outfile = open(args.output, 'w') if args.output else sys.stdout cw = csv.writer(outfile) header_names = { 'n': 'Issue', #number 'a': 'Assignees', 'l': 'Labels', 'm': 'Milestone', 't': 'Title', } cw.writerow(header_names.get(c, c) for c in args.columns) # helpers to pull things out of issues in list-of-strings form def assignees(i): return list(a.get('login', '??') for a in i.get('assignees', [])) def labels(i): return list(l.get('name', '??') for l in i.get('labels', [])) def milestones(i): return [ i['milestone'].get('title', '??') ] if i.get('milestone') else [] col_values = { 'n': lambda i: str(i['number']), 'a': lambda i: ','.join(assignees(i)), 'l': lambda i: ','.join(labels(i)), 'm': lambda i: ','.join(milestones(i)), 't': lambda i: i['title'], } def list_match(mlist, vlist): first = True accept = True for mterm in mlist.split(','): if mterm.startswith('-'): if mterm[1:] in vlist: accept = False else: if first: accept = False # default is reject if mterm in vlist: accept = True first = False return accept for num in sorted(data['issues']): i = data['issues'][num] if args.state and (i['state'] != args.state): continue if args.label and not list_match(args.label, labels(i)): continue if args.milestone and not list_match(args.milestone, milestones(i)): continue cw.writerow(col_values[c](i) for c in args.columns) if args.output: outfile.close()
0.159348
0.112405
from PyObjCTools.TestSupport import * from Foundation import * class TestNSDateFormatter (TestCase): def testOutput(self): formatter = NSDateFormatter.alloc().init() formatter.setDateFormat_("yyyy/mm/dd") self.assertResultIsBOOL(NSDateFormatter.getObjectValue_forString_range_error_) self.assertArgIsOut(NSDateFormatter.getObjectValue_forString_range_error_, 0) self.assertArgIsInOut(NSDateFormatter.getObjectValue_forString_range_error_, 2) self.assertArgIsOut(NSDateFormatter.getObjectValue_forString_range_error_, 3) ok, val, range, err = formatter.getObjectValue_forString_range_error_( None, "2008/10/12", NSRange(0, 10), None) self.assertTrue(ok) self.assertIsInstance(val, NSDate) self.assertEqual(range , NSRange(0, 10)) self.assertIs(err, None) self.assertResultIsBOOL(NSDateFormatter.getObjectValue_forString_range_error_) self.assertArgIsInOut(NSDateFormatter.getObjectValue_forString_range_error_, 2) self.assertArgIsOut(NSDateFormatter.getObjectValue_forString_range_error_, 3) def testConstants(self): self.assertEqual(NSDateFormatterNoStyle, kCFDateFormatterNoStyle) self.assertEqual(NSDateFormatterShortStyle, kCFDateFormatterShortStyle) self.assertEqual(NSDateFormatterMediumStyle, kCFDateFormatterMediumStyle) self.assertEqual(NSDateFormatterLongStyle, kCFDateFormatterLongStyle) self.assertEqual(NSDateFormatterFullStyle, kCFDateFormatterFullStyle) self.assertEqual(NSDateFormatterBehaviorDefault, 0) self.assertEqual(NSDateFormatterBehavior10_0, 1000) self.assertEqual(NSDateFormatterBehavior10_4, 1040) def testMethods(self): self.assertResultIsBOOL(NSDateFormatter.generatesCalendarDates) self.assertArgIsBOOL(NSDateFormatter.setGeneratesCalendarDates_, 0) self.assertResultIsBOOL(NSDateFormatter.isLenient) self.assertArgIsBOOL(NSDateFormatter.setLenient_, 0) self.assertResultIsBOOL(NSDateFormatter.isLenient) self.assertArgIsBOOL(NSDateFormatter.initWithDateFormat_allowNaturalLanguage_, 1) self.assertResultIsBOOL(NSDateFormatter.allowsNaturalLanguage) @min_os_level('10.6') def testMethods10_6(self): self.assertResultIsBOOL(NSDateFormatter.doesRelativeDateFormatting) self.assertArgIsBOOL(NSDateFormatter.setDoesRelativeDateFormatting_, 0) if __name__ == "__main__": main()
PyObjCTest/test_nsdateformatter.py
from PyObjCTools.TestSupport import * from Foundation import * class TestNSDateFormatter (TestCase): def testOutput(self): formatter = NSDateFormatter.alloc().init() formatter.setDateFormat_("yyyy/mm/dd") self.assertResultIsBOOL(NSDateFormatter.getObjectValue_forString_range_error_) self.assertArgIsOut(NSDateFormatter.getObjectValue_forString_range_error_, 0) self.assertArgIsInOut(NSDateFormatter.getObjectValue_forString_range_error_, 2) self.assertArgIsOut(NSDateFormatter.getObjectValue_forString_range_error_, 3) ok, val, range, err = formatter.getObjectValue_forString_range_error_( None, "2008/10/12", NSRange(0, 10), None) self.assertTrue(ok) self.assertIsInstance(val, NSDate) self.assertEqual(range , NSRange(0, 10)) self.assertIs(err, None) self.assertResultIsBOOL(NSDateFormatter.getObjectValue_forString_range_error_) self.assertArgIsInOut(NSDateFormatter.getObjectValue_forString_range_error_, 2) self.assertArgIsOut(NSDateFormatter.getObjectValue_forString_range_error_, 3) def testConstants(self): self.assertEqual(NSDateFormatterNoStyle, kCFDateFormatterNoStyle) self.assertEqual(NSDateFormatterShortStyle, kCFDateFormatterShortStyle) self.assertEqual(NSDateFormatterMediumStyle, kCFDateFormatterMediumStyle) self.assertEqual(NSDateFormatterLongStyle, kCFDateFormatterLongStyle) self.assertEqual(NSDateFormatterFullStyle, kCFDateFormatterFullStyle) self.assertEqual(NSDateFormatterBehaviorDefault, 0) self.assertEqual(NSDateFormatterBehavior10_0, 1000) self.assertEqual(NSDateFormatterBehavior10_4, 1040) def testMethods(self): self.assertResultIsBOOL(NSDateFormatter.generatesCalendarDates) self.assertArgIsBOOL(NSDateFormatter.setGeneratesCalendarDates_, 0) self.assertResultIsBOOL(NSDateFormatter.isLenient) self.assertArgIsBOOL(NSDateFormatter.setLenient_, 0) self.assertResultIsBOOL(NSDateFormatter.isLenient) self.assertArgIsBOOL(NSDateFormatter.initWithDateFormat_allowNaturalLanguage_, 1) self.assertResultIsBOOL(NSDateFormatter.allowsNaturalLanguage) @min_os_level('10.6') def testMethods10_6(self): self.assertResultIsBOOL(NSDateFormatter.doesRelativeDateFormatting) self.assertArgIsBOOL(NSDateFormatter.setDoesRelativeDateFormatting_, 0) if __name__ == "__main__": main()
0.588653
0.432962
from datetime import datetime from unittest import TestCase from dhooks_lite.embed import Embed, Author, Footer, Field, Image, Thumbnail from . import set_test_logger MODULE_PATH = "dhooks_lite.embed" logger = set_test_logger(MODULE_PATH, __file__) class TestEmbedObjectComparing(TestCase): def setUp(self): self.x1 = Author("Bruce", "url-1") self.x2 = Author("Bruce", "url-1") self.y1 = Author("Bruce", "url-2") self.y2 = Author("Clark", "url-1") self.z = Author("Clark", "url-2") def test_objects_are_equal(self): self.assertEqual(self.x1, self.x1) self.assertEqual(self.x1, self.x2) def test_objects_are_not_equal(self): self.assertNotEqual(self.x1, self.y1) self.assertNotEqual(self.x1, self.y2) self.assertNotEqual(self.x1, self.z) self.assertNotEqual(self.x1, Footer("Bruce", "url-1")) class TestEmbedObject(TestCase): def test_fromdict(self): field = Field(name="fruit", value="orange", inline=False) field_dict = field.asdict() field2 = Field.from_dict(field_dict) self.assertEqual(field, field2) class TestAuthor(TestCase): def test_detect_missing_params_on_create(self): with self.assertRaises(ValueError): Author(None) def test_create_with_name_only(self): x = Author("<NAME>") self.assertEqual(x.name, "<NAME>") self.assertDictEqual(x.asdict(), {"name": "<NAME>"}) def test_create_with_all_params(self): x = Author("<NAME>", url="url-1", icon_url="url-2", proxy_icon_url="url-3") self.assertEqual(x.name, "<NAME>") self.assertEqual(x.url, "url-1") self.assertEqual(x.icon_url, "url-2") self.assertEqual(x.proxy_icon_url, "url-3") self.assertDictEqual( x.asdict(), { "name": "<NAME>", "url": "url-1", "icon_url": "url-2", "proxy_icon_url": "url-3", }, ) class TestField(TestCase): def test_detect_missing_params_on_create(self): with self.assertRaises(ValueError): Field(name=None, value=None) def test_detects_name_limit(self): large_string = "x" * 257 with self.assertRaises(ValueError): Field(large_string, value="Batman") def test_detects_value_limit(self): large_string = "x" * 1025 with self.assertRaises(ValueError): Field(name="<NAME>", value=large_string) def test_detect_missing_value(self): with self.assertRaises(ValueError): Field(name="<NAME>", value=None) def test_detect_missing_name(self): with self.assertRaises(ValueError): Field(name=None, value="Batman") def test_create_with_name_and_value_only(self): x = Field("fruit", "orange") self.assertEqual(x.name, "fruit") self.assertEqual(x.value, "orange") self.assertEqual(x.inline, True) self.assertDictEqual( x.asdict(), {"name": "fruit", "value": "orange", "inline": True} ) def test_create_with_all_params(self): x = Field(name="fruit", value="orange", inline=False) self.assertEqual(x.name, "fruit") self.assertEqual(x.value, "orange") self.assertEqual(x.inline, False) self.assertDictEqual( x.asdict(), {"name": "fruit", "value": "orange", "inline": False} ) def test_detect_invalid_inline_type(self): with self.assertRaises(TypeError): Field(name="fruit", value="orange", inline=int(5)) class TestFooter(TestCase): def test_detect_missing_params_on_create(self): with self.assertRaises(ValueError): Footer(None) def test_detects_wrong_type_inline(self): with self.assertRaises(TypeError): Footer("Justice League", inline=int(1)) def test_create_with_name_only(self): x = Footer("Justice League") self.assertEqual(x.text, "Justice League") self.assertDictEqual(x.asdict(), {"text": "Justice League"}) def test_create_with_all_params(self): x = Footer("Justice League", icon_url="url-1", proxy_icon_url="url-2") self.assertEqual(x.text, "Justice League") self.assertEqual(x.icon_url, "url-1") self.assertEqual(x.proxy_icon_url, "url-2") self.assertDictEqual( x.asdict(), {"text": "Justice League", "icon_url": "url-1", "proxy_icon_url": "url-2"}, ) class TestImage(TestCase): def test_detect_missing_params_on_create(self): with self.assertRaises(ValueError): Image(None) def test_create_with_url_only(self): x = Image("my-url") self.assertEqual(x.url, "my-url") self.assertDictEqual(x.asdict(), {"url": "my-url"}) def test_create_with_all_params(self): x = Image(url="url-1", proxy_url="url-2", width=500, height=400) self.assertEqual(x.url, "url-1") self.assertEqual(x.proxy_url, "url-2") self.assertEqual(x.width, 500) self.assertEqual(x.height, 400) self.assertDictEqual( x.asdict(), {"url": "url-1", "proxy_url": "url-2", "width": 500, "height": 400}, ) def test_detect_invalid_width(self): with self.assertRaises(ValueError): Image("my-url", width=-5) def test_detect_invalid_height(self): with self.assertRaises(ValueError): Image("my-url", height=-5) class TestEmbed(TestCase): def setUp(self) -> None: self.now = datetime.utcnow() def test_create_with_description_only(self): x = Embed(description="They said the age of heroes would never come again.") self.assertEqual( x.description, "They said the age of heroes would never come again." ) self.assertEqual(x.type, "rich") self.assertDictEqual( x.asdict(), { "type": "rich", "description": "They said the age of heroes would never come again.", }, ) def test_create_with_full_params(self): obj = Embed( title="Justice League", description="They said the age of heroes would never come again.", url="url-1", timestamp=self.now, color=0x5CDBF0, footer=Footer("TOP SECRET", "url-2", "url-11"), image=Image("url-3", "url-4", height=200, width=150), thumbnail=Thumbnail("url-5", "url-6", height=100, width=80), author=Author("<NAME>", "url-8", "url-9"), fields=[Field("fruit", "orange", False), Field("vegetable", "onion", True)], ) self.assertEqual(obj.title, "Justice League") self.assertEqual( obj.description, "They said the age of heroes would never come again." ) self.assertEqual(obj.type, "rich") self.assertEqual(obj.url, "url-1") self.assertEqual(obj.timestamp, self.now) self.assertEqual(obj.color, 0x5CDBF0) self.assertEqual(obj.footer, Footer("TOP SECRET", "url-2", "url-11")) self.assertEqual(obj.image, Image("url-3", "url-4", height=200, width=150)) self.assertEqual( obj.thumbnail, Thumbnail("url-5", "url-6", height=100, width=80) ) self.assertEqual(obj.author, Author("<NAME>", "url-8", "url-9")) self.assertEqual( obj.fields, [Field("fruit", "orange", False), Field("vegetable", "onion", True)], ) self.maxDiff = None self.assertDictEqual( obj.asdict(), { "title": "Justice League", "type": "rich", "description": "They said the age of heroes would never come again.", "url": "url-1", "timestamp": self.now, "color": 0x5CDBF0, "image": { "url": "url-3", "proxy_url": "url-4", "height": 200, "width": 150, }, "thumbnail": { "url": "url-5", "proxy_url": "url-6", "height": 100, "width": 80, }, "footer": { "text": "TOP SECRET", "icon_url": "url-2", "proxy_icon_url": "url-11", }, "author": {"name": "<NAME>", "url": "url-8", "icon_url": "url-9"}, "fields": [ {"name": "fruit", "value": "orange", "inline": False}, {"name": "vegetable", "value": "onion", "inline": True}, ], }, ) def test_from_dict_minimal(self): embed = Embed(description="Dummy") embed_dict = embed.asdict() embed_2 = Embed.from_dict(embed_dict) self.assertEqual(embed, embed_2) def test_from_dict_full(self): embed = Embed( title="Justice League", description="They said the age of heroes would never come again.", url="url-1", timestamp=self.now, color=0x5CDBF0, footer=Footer("TOP SECRET", "url-2", "url-11"), image=Image("url-3", "url-4", height=200, width=150), thumbnail=Thumbnail("url-5", "url-6", height=100, width=80), author=Author("<NAME>", "url-8", "url-9"), fields=[Field("fruit", "orange", False), Field("vegetable", "onion", True)], ) embed_dict = embed.asdict() embed_2 = Embed.from_dict(embed_dict) self.assertEqual(embed, embed_2) def test_detects_wrong_type_timestamp(self): with self.assertRaises(TypeError): Embed(timestamp=int(1)) def test_detects_wrong_type_footer(self): with self.assertRaises(TypeError): Embed(footer=int(1)) def test_detects_wrong_type_image(self): with self.assertRaises(TypeError): Embed(image=int(1)) def test_detects_wrong_type_thumbnail(self): with self.assertRaises(TypeError): Embed(thumbnail=int(1)) def test_detects_wrong_type_author(self): with self.assertRaises(TypeError): Embed(author=int(1)) def test_detects_wrong_type_fields_list(self): with self.assertRaises(TypeError): Embed(fields=int(1)) def test_detects_wrong_type_fields_content(self): with self.assertRaises(TypeError): Embed(fields=[int(1), Field("x", 1)]) def test_detects_max_embed_limit(self): description = "x" * 2000 fields = list() for x in range(5): fields.append(Field(name="name" + str(x), value="value" + "x" * 1000)) with self.assertRaises(ValueError): x = Embed(description=description, fields=fields) def test_detects_max_description_limit(self): large_string = "x" * 2049 with self.assertRaises(ValueError): Embed(description=large_string) def test_detects_max_title_limit(self): large_string = "x" * 257 with self.assertRaises(ValueError): Embed(title=large_string) def test_detects_max_fields_limit(self): fields = list() for x in range(26): fields.append(Field(name="name {}".format(x), value="value {}".format(x))) with self.assertRaises(ValueError): x = Embed(fields=fields)
tests/test_embed.py
from datetime import datetime from unittest import TestCase from dhooks_lite.embed import Embed, Author, Footer, Field, Image, Thumbnail from . import set_test_logger MODULE_PATH = "dhooks_lite.embed" logger = set_test_logger(MODULE_PATH, __file__) class TestEmbedObjectComparing(TestCase): def setUp(self): self.x1 = Author("Bruce", "url-1") self.x2 = Author("Bruce", "url-1") self.y1 = Author("Bruce", "url-2") self.y2 = Author("Clark", "url-1") self.z = Author("Clark", "url-2") def test_objects_are_equal(self): self.assertEqual(self.x1, self.x1) self.assertEqual(self.x1, self.x2) def test_objects_are_not_equal(self): self.assertNotEqual(self.x1, self.y1) self.assertNotEqual(self.x1, self.y2) self.assertNotEqual(self.x1, self.z) self.assertNotEqual(self.x1, Footer("Bruce", "url-1")) class TestEmbedObject(TestCase): def test_fromdict(self): field = Field(name="fruit", value="orange", inline=False) field_dict = field.asdict() field2 = Field.from_dict(field_dict) self.assertEqual(field, field2) class TestAuthor(TestCase): def test_detect_missing_params_on_create(self): with self.assertRaises(ValueError): Author(None) def test_create_with_name_only(self): x = Author("<NAME>") self.assertEqual(x.name, "<NAME>") self.assertDictEqual(x.asdict(), {"name": "<NAME>"}) def test_create_with_all_params(self): x = Author("<NAME>", url="url-1", icon_url="url-2", proxy_icon_url="url-3") self.assertEqual(x.name, "<NAME>") self.assertEqual(x.url, "url-1") self.assertEqual(x.icon_url, "url-2") self.assertEqual(x.proxy_icon_url, "url-3") self.assertDictEqual( x.asdict(), { "name": "<NAME>", "url": "url-1", "icon_url": "url-2", "proxy_icon_url": "url-3", }, ) class TestField(TestCase): def test_detect_missing_params_on_create(self): with self.assertRaises(ValueError): Field(name=None, value=None) def test_detects_name_limit(self): large_string = "x" * 257 with self.assertRaises(ValueError): Field(large_string, value="Batman") def test_detects_value_limit(self): large_string = "x" * 1025 with self.assertRaises(ValueError): Field(name="<NAME>", value=large_string) def test_detect_missing_value(self): with self.assertRaises(ValueError): Field(name="<NAME>", value=None) def test_detect_missing_name(self): with self.assertRaises(ValueError): Field(name=None, value="Batman") def test_create_with_name_and_value_only(self): x = Field("fruit", "orange") self.assertEqual(x.name, "fruit") self.assertEqual(x.value, "orange") self.assertEqual(x.inline, True) self.assertDictEqual( x.asdict(), {"name": "fruit", "value": "orange", "inline": True} ) def test_create_with_all_params(self): x = Field(name="fruit", value="orange", inline=False) self.assertEqual(x.name, "fruit") self.assertEqual(x.value, "orange") self.assertEqual(x.inline, False) self.assertDictEqual( x.asdict(), {"name": "fruit", "value": "orange", "inline": False} ) def test_detect_invalid_inline_type(self): with self.assertRaises(TypeError): Field(name="fruit", value="orange", inline=int(5)) class TestFooter(TestCase): def test_detect_missing_params_on_create(self): with self.assertRaises(ValueError): Footer(None) def test_detects_wrong_type_inline(self): with self.assertRaises(TypeError): Footer("Justice League", inline=int(1)) def test_create_with_name_only(self): x = Footer("Justice League") self.assertEqual(x.text, "Justice League") self.assertDictEqual(x.asdict(), {"text": "Justice League"}) def test_create_with_all_params(self): x = Footer("Justice League", icon_url="url-1", proxy_icon_url="url-2") self.assertEqual(x.text, "Justice League") self.assertEqual(x.icon_url, "url-1") self.assertEqual(x.proxy_icon_url, "url-2") self.assertDictEqual( x.asdict(), {"text": "Justice League", "icon_url": "url-1", "proxy_icon_url": "url-2"}, ) class TestImage(TestCase): def test_detect_missing_params_on_create(self): with self.assertRaises(ValueError): Image(None) def test_create_with_url_only(self): x = Image("my-url") self.assertEqual(x.url, "my-url") self.assertDictEqual(x.asdict(), {"url": "my-url"}) def test_create_with_all_params(self): x = Image(url="url-1", proxy_url="url-2", width=500, height=400) self.assertEqual(x.url, "url-1") self.assertEqual(x.proxy_url, "url-2") self.assertEqual(x.width, 500) self.assertEqual(x.height, 400) self.assertDictEqual( x.asdict(), {"url": "url-1", "proxy_url": "url-2", "width": 500, "height": 400}, ) def test_detect_invalid_width(self): with self.assertRaises(ValueError): Image("my-url", width=-5) def test_detect_invalid_height(self): with self.assertRaises(ValueError): Image("my-url", height=-5) class TestEmbed(TestCase): def setUp(self) -> None: self.now = datetime.utcnow() def test_create_with_description_only(self): x = Embed(description="They said the age of heroes would never come again.") self.assertEqual( x.description, "They said the age of heroes would never come again." ) self.assertEqual(x.type, "rich") self.assertDictEqual( x.asdict(), { "type": "rich", "description": "They said the age of heroes would never come again.", }, ) def test_create_with_full_params(self): obj = Embed( title="Justice League", description="They said the age of heroes would never come again.", url="url-1", timestamp=self.now, color=0x5CDBF0, footer=Footer("TOP SECRET", "url-2", "url-11"), image=Image("url-3", "url-4", height=200, width=150), thumbnail=Thumbnail("url-5", "url-6", height=100, width=80), author=Author("<NAME>", "url-8", "url-9"), fields=[Field("fruit", "orange", False), Field("vegetable", "onion", True)], ) self.assertEqual(obj.title, "Justice League") self.assertEqual( obj.description, "They said the age of heroes would never come again." ) self.assertEqual(obj.type, "rich") self.assertEqual(obj.url, "url-1") self.assertEqual(obj.timestamp, self.now) self.assertEqual(obj.color, 0x5CDBF0) self.assertEqual(obj.footer, Footer("TOP SECRET", "url-2", "url-11")) self.assertEqual(obj.image, Image("url-3", "url-4", height=200, width=150)) self.assertEqual( obj.thumbnail, Thumbnail("url-5", "url-6", height=100, width=80) ) self.assertEqual(obj.author, Author("<NAME>", "url-8", "url-9")) self.assertEqual( obj.fields, [Field("fruit", "orange", False), Field("vegetable", "onion", True)], ) self.maxDiff = None self.assertDictEqual( obj.asdict(), { "title": "Justice League", "type": "rich", "description": "They said the age of heroes would never come again.", "url": "url-1", "timestamp": self.now, "color": 0x5CDBF0, "image": { "url": "url-3", "proxy_url": "url-4", "height": 200, "width": 150, }, "thumbnail": { "url": "url-5", "proxy_url": "url-6", "height": 100, "width": 80, }, "footer": { "text": "TOP SECRET", "icon_url": "url-2", "proxy_icon_url": "url-11", }, "author": {"name": "<NAME>", "url": "url-8", "icon_url": "url-9"}, "fields": [ {"name": "fruit", "value": "orange", "inline": False}, {"name": "vegetable", "value": "onion", "inline": True}, ], }, ) def test_from_dict_minimal(self): embed = Embed(description="Dummy") embed_dict = embed.asdict() embed_2 = Embed.from_dict(embed_dict) self.assertEqual(embed, embed_2) def test_from_dict_full(self): embed = Embed( title="Justice League", description="They said the age of heroes would never come again.", url="url-1", timestamp=self.now, color=0x5CDBF0, footer=Footer("TOP SECRET", "url-2", "url-11"), image=Image("url-3", "url-4", height=200, width=150), thumbnail=Thumbnail("url-5", "url-6", height=100, width=80), author=Author("<NAME>", "url-8", "url-9"), fields=[Field("fruit", "orange", False), Field("vegetable", "onion", True)], ) embed_dict = embed.asdict() embed_2 = Embed.from_dict(embed_dict) self.assertEqual(embed, embed_2) def test_detects_wrong_type_timestamp(self): with self.assertRaises(TypeError): Embed(timestamp=int(1)) def test_detects_wrong_type_footer(self): with self.assertRaises(TypeError): Embed(footer=int(1)) def test_detects_wrong_type_image(self): with self.assertRaises(TypeError): Embed(image=int(1)) def test_detects_wrong_type_thumbnail(self): with self.assertRaises(TypeError): Embed(thumbnail=int(1)) def test_detects_wrong_type_author(self): with self.assertRaises(TypeError): Embed(author=int(1)) def test_detects_wrong_type_fields_list(self): with self.assertRaises(TypeError): Embed(fields=int(1)) def test_detects_wrong_type_fields_content(self): with self.assertRaises(TypeError): Embed(fields=[int(1), Field("x", 1)]) def test_detects_max_embed_limit(self): description = "x" * 2000 fields = list() for x in range(5): fields.append(Field(name="name" + str(x), value="value" + "x" * 1000)) with self.assertRaises(ValueError): x = Embed(description=description, fields=fields) def test_detects_max_description_limit(self): large_string = "x" * 2049 with self.assertRaises(ValueError): Embed(description=large_string) def test_detects_max_title_limit(self): large_string = "x" * 257 with self.assertRaises(ValueError): Embed(title=large_string) def test_detects_max_fields_limit(self): fields = list() for x in range(26): fields.append(Field(name="name {}".format(x), value="value {}".format(x))) with self.assertRaises(ValueError): x = Embed(fields=fields)
0.660829
0.531088
from libqtile import layout import libqtile.manager import libqtile.config from ..utils import Xephyr class StackConfig: auto_fullscreen = True main = None groups = [ libqtile.config.Group("a"), libqtile.config.Group("b"), libqtile.config.Group("c"), libqtile.config.Group("d") ] layouts = [ layout.Stack(num_stacks=2), layout.Stack(num_stacks=1), ] floating_layout = libqtile.layout.floating.Floating() keys = [] mouse = [] screens = [] follow_mouse_focus = False def _stacks(self): stacks = [] for i in self.c.layout.info()["stacks"]: windows = i["clients"] current = i["current"] stacks.append(windows[current:] + windows[:current]) return stacks @Xephyr(False, StackConfig()) def test_stack_commands(self): assert self.c.layout.info()["current_stack"] == 0 self.testWindow("one") assert _stacks(self) == [["one"], []] assert self.c.layout.info()["current_stack"] == 0 self.testWindow("two") assert _stacks(self) == [["one"], ["two"]] assert self.c.layout.info()["current_stack"] == 1 self.testWindow("three") assert _stacks(self) == [["one"], ["three", "two"]] assert self.c.layout.info()["current_stack"] == 1 self.c.layout.delete() assert _stacks(self) == [["one", "three", "two"]] info = self.c.groups()["a"] assert info["focus"] == "one" self.c.layout.delete() assert len(_stacks(self)) == 1 self.c.layout.add() assert _stacks(self) == [["one", "three", "two"], []] self.c.layout.rotate() assert _stacks(self) == [[], ["one", "three", "two"]] @Xephyr(False, StackConfig()) def test_stack_cmd_down(self): self.c.layout.down() @Xephyr(False, StackConfig()) def test_stack_addremove(self): one = self.testWindow("one") self.c.layout.next() two = self.testWindow("two") three = self.testWindow("three") assert _stacks(self) == [['one'], ['three', 'two']] assert self.c.layout.info()["current_stack"] == 1 self.kill(three) assert self.c.layout.info()["current_stack"] == 1 self.kill(two) assert self.c.layout.info()["current_stack"] == 0 self.c.layout.next() two = self.testWindow("two") self.c.layout.next() assert self.c.layout.info()["current_stack"] == 0 self.kill(one) assert self.c.layout.info()["current_stack"] == 1 @Xephyr(False, StackConfig()) def test_stack_rotation(self): self.c.layout.delete() self.testWindow("one") self.testWindow("two") self.testWindow("three") assert _stacks(self) == [["three", "two", "one"]] self.c.layout.down() assert _stacks(self) == [["one", "three", "two"]] self.c.layout.up() assert _stacks(self) == [["three", "two", "one"]] self.c.layout.down() self.c.layout.down() assert _stacks(self) == [["two", "one", "three"]] @Xephyr(False, StackConfig()) def test_stack_nextprev(self): self.c.layout.add() one = self.testWindow("one") two = self.testWindow("two") three = self.testWindow("three") assert self.c.groups()["a"]["focus"] == "three" self.c.layout.next() assert self.c.groups()["a"]["focus"] == "one" self.c.layout.previous() assert self.c.groups()["a"]["focus"] == "three" self.c.layout.previous() assert self.c.groups()["a"]["focus"] == "two" self.c.layout.next() self.c.layout.next() self.c.layout.next() assert self.c.groups()["a"]["focus"] == "two" self.kill(three) self.c.layout.next() assert self.c.groups()["a"]["focus"] == "one" self.c.layout.previous() assert self.c.groups()["a"]["focus"] == "two" self.c.layout.next() self.kill(two) self.c.layout.next() assert self.c.groups()["a"]["focus"] == "one" self.kill(one) self.c.layout.next() assert self.c.groups()["a"]["focus"] == None self.c.layout.previous() assert self.c.groups()["a"]["focus"] == None @Xephyr(False, StackConfig()) def test_stack_window_removal(self): self.c.layout.next() one = self.testWindow("one") two = self.testWindow("two") self.c.layout.down() self.kill(two) @Xephyr(False, StackConfig()) def test_stack_split(self): one = self.testWindow("one") two = self.testWindow("two") three = self.testWindow("three") stacks = self.c.layout.info()["stacks"] assert not stacks[1]["split"] self.c.layout.toggle_split() stacks = self.c.layout.info()["stacks"] assert stacks[1]["split"] @Xephyr(False, StackConfig()) def test_stack_shuffle(self): self.c.next_layout() one = self.testWindow("one") two = self.testWindow("two") three = self.testWindow("three") stack = self.c.layout.info()["stacks"][0] assert stack["clients"][stack["current"]] == "three" for i in range(5): self.c.layout.shuffle_up() stack = self.c.layout.info()["stacks"][0] assert stack["clients"][stack["current"]] == "three" for i in range(5): self.c.layout.shuffle_down() stack = self.c.layout.info()["stacks"][0] assert stack["clients"][stack["current"]] == "three" @Xephyr(False, StackConfig()) def test_stack_client_to(self): one = self.testWindow("one") two = self.testWindow("two") assert self.c.layout.info()["stacks"][0]["clients"] == ["one"] self.c.layout.client_to_previous() assert self.c.layout.info()["stacks"][0]["clients"] == ["two", "one"] self.c.layout.client_to_previous() assert self.c.layout.info()["stacks"][0]["clients"] == ["one"] assert self.c.layout.info()["stacks"][1]["clients"] == ["two"] self.c.layout.client_to_next() assert self.c.layout.info()["stacks"][0]["clients"] == ["two", "one"] @Xephyr(False, StackConfig()) def test_stack_info(self): one = self.testWindow("one") assert self.c.layout.info()["stacks"]
test/layouts/test_stack.py
from libqtile import layout import libqtile.manager import libqtile.config from ..utils import Xephyr class StackConfig: auto_fullscreen = True main = None groups = [ libqtile.config.Group("a"), libqtile.config.Group("b"), libqtile.config.Group("c"), libqtile.config.Group("d") ] layouts = [ layout.Stack(num_stacks=2), layout.Stack(num_stacks=1), ] floating_layout = libqtile.layout.floating.Floating() keys = [] mouse = [] screens = [] follow_mouse_focus = False def _stacks(self): stacks = [] for i in self.c.layout.info()["stacks"]: windows = i["clients"] current = i["current"] stacks.append(windows[current:] + windows[:current]) return stacks @Xephyr(False, StackConfig()) def test_stack_commands(self): assert self.c.layout.info()["current_stack"] == 0 self.testWindow("one") assert _stacks(self) == [["one"], []] assert self.c.layout.info()["current_stack"] == 0 self.testWindow("two") assert _stacks(self) == [["one"], ["two"]] assert self.c.layout.info()["current_stack"] == 1 self.testWindow("three") assert _stacks(self) == [["one"], ["three", "two"]] assert self.c.layout.info()["current_stack"] == 1 self.c.layout.delete() assert _stacks(self) == [["one", "three", "two"]] info = self.c.groups()["a"] assert info["focus"] == "one" self.c.layout.delete() assert len(_stacks(self)) == 1 self.c.layout.add() assert _stacks(self) == [["one", "three", "two"], []] self.c.layout.rotate() assert _stacks(self) == [[], ["one", "three", "two"]] @Xephyr(False, StackConfig()) def test_stack_cmd_down(self): self.c.layout.down() @Xephyr(False, StackConfig()) def test_stack_addremove(self): one = self.testWindow("one") self.c.layout.next() two = self.testWindow("two") three = self.testWindow("three") assert _stacks(self) == [['one'], ['three', 'two']] assert self.c.layout.info()["current_stack"] == 1 self.kill(three) assert self.c.layout.info()["current_stack"] == 1 self.kill(two) assert self.c.layout.info()["current_stack"] == 0 self.c.layout.next() two = self.testWindow("two") self.c.layout.next() assert self.c.layout.info()["current_stack"] == 0 self.kill(one) assert self.c.layout.info()["current_stack"] == 1 @Xephyr(False, StackConfig()) def test_stack_rotation(self): self.c.layout.delete() self.testWindow("one") self.testWindow("two") self.testWindow("three") assert _stacks(self) == [["three", "two", "one"]] self.c.layout.down() assert _stacks(self) == [["one", "three", "two"]] self.c.layout.up() assert _stacks(self) == [["three", "two", "one"]] self.c.layout.down() self.c.layout.down() assert _stacks(self) == [["two", "one", "three"]] @Xephyr(False, StackConfig()) def test_stack_nextprev(self): self.c.layout.add() one = self.testWindow("one") two = self.testWindow("two") three = self.testWindow("three") assert self.c.groups()["a"]["focus"] == "three" self.c.layout.next() assert self.c.groups()["a"]["focus"] == "one" self.c.layout.previous() assert self.c.groups()["a"]["focus"] == "three" self.c.layout.previous() assert self.c.groups()["a"]["focus"] == "two" self.c.layout.next() self.c.layout.next() self.c.layout.next() assert self.c.groups()["a"]["focus"] == "two" self.kill(three) self.c.layout.next() assert self.c.groups()["a"]["focus"] == "one" self.c.layout.previous() assert self.c.groups()["a"]["focus"] == "two" self.c.layout.next() self.kill(two) self.c.layout.next() assert self.c.groups()["a"]["focus"] == "one" self.kill(one) self.c.layout.next() assert self.c.groups()["a"]["focus"] == None self.c.layout.previous() assert self.c.groups()["a"]["focus"] == None @Xephyr(False, StackConfig()) def test_stack_window_removal(self): self.c.layout.next() one = self.testWindow("one") two = self.testWindow("two") self.c.layout.down() self.kill(two) @Xephyr(False, StackConfig()) def test_stack_split(self): one = self.testWindow("one") two = self.testWindow("two") three = self.testWindow("three") stacks = self.c.layout.info()["stacks"] assert not stacks[1]["split"] self.c.layout.toggle_split() stacks = self.c.layout.info()["stacks"] assert stacks[1]["split"] @Xephyr(False, StackConfig()) def test_stack_shuffle(self): self.c.next_layout() one = self.testWindow("one") two = self.testWindow("two") three = self.testWindow("three") stack = self.c.layout.info()["stacks"][0] assert stack["clients"][stack["current"]] == "three" for i in range(5): self.c.layout.shuffle_up() stack = self.c.layout.info()["stacks"][0] assert stack["clients"][stack["current"]] == "three" for i in range(5): self.c.layout.shuffle_down() stack = self.c.layout.info()["stacks"][0] assert stack["clients"][stack["current"]] == "three" @Xephyr(False, StackConfig()) def test_stack_client_to(self): one = self.testWindow("one") two = self.testWindow("two") assert self.c.layout.info()["stacks"][0]["clients"] == ["one"] self.c.layout.client_to_previous() assert self.c.layout.info()["stacks"][0]["clients"] == ["two", "one"] self.c.layout.client_to_previous() assert self.c.layout.info()["stacks"][0]["clients"] == ["one"] assert self.c.layout.info()["stacks"][1]["clients"] == ["two"] self.c.layout.client_to_next() assert self.c.layout.info()["stacks"][0]["clients"] == ["two", "one"] @Xephyr(False, StackConfig()) def test_stack_info(self): one = self.testWindow("one") assert self.c.layout.info()["stacks"]
0.540196
0.359786
import http.client, urllib.request, urllib.parse, urllib.error import requests import time import os import urllib.request, urllib.error, urllib.parse import socket import paho.mqtt.client as mqtt import RPi.GPIO as GPIO import sys import smtplib #SWITCH 1 (Door) #home/OpenMQTTGateway/SRFBtoMQTT 3151714 #SWITCH 2 (Deck) #home/OpenMQTTGateway/SRFBtoMQTT 10867362 #Remote 1(Dorian) A #home/OpenMQTTGateway/SRFBtoMQTT 6454993 #Remote 1(Dorian) B #home/OpenMQTTGateway/SRFBtoMQTT 6454994 #Remote 2(Gael) A #home/OpenMQTTGateway/SRFBtoMQTT 14993777 #Remote 2(Gael) B #home/OpenMQTTGateway/SRFBtoMQTT 14993778 #move/detected #home/OpenMQTTGateway/SRFBtoMQTT 14786398 #Door bell #home/OpenMQTTGateway/SRFBtoMQTT 16276098 #button bell 2 #home/OpenMQTTGateway/SRFBtoMQTT 4462722 deck_state = 0 class MyException(Exception): pass try: server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() except: print('Something went wrong') def getImageFromCamera1(): if os.path.exists("/home/pi/camera1.jpg"): os.remove("/home/pi/camera1.jpg") #url = "http://192.168.2.122:554/snapshot" #url = "http://192.168.2.80:554/snapshot" url = "http://192.168.2.29/snap.jpg?usr=admin&pwd=<PASSWORD>" try: r = requests.get(url, verify = False, timeout=2) open("/home/pi/camera1.jpg", 'w+b').write(r.content) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as error: print("Time out! or connection error :") print(error) os.system('cp /home/pi/camera_disconnected.jpg /home/pi/camera1.jpg') def getImageFromCamera2(): if os.path.exists("/home/pi/camera2.jpg"): os.remove("/home/pi/camera2.jpg") url = "http://192.168.2.13/snap.jpg?usr=admin&pwd=<PASSWORD>" try: r = requests.get(url, verify = False, timeout=2) open("/home/pi/camera2.jpg", 'w+b').write(r.content) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as error: print("Time out! or connection error :") print(error) os.system('cp /home/pi/camera_disconnected.jpg /home/pi/camera2.jpg') # The callback for when the client receives a CONNACK response from the server. def on_connect(client, userdata, flags, rc): print(("Connected with result code "+str(rc))) # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. client.subscribe("home/OpenMQTTGateway/SRFBtoMQTT") # The callback for when a PUBLISH message is received from the server. def on_message(client, userdata, msg): global deck_state print((msg.topic+" "+str(msg.payload)+"\n")) # if msg.topic == "Door/open": # print msg.topic if msg.payload == b'3151714': print("Switch 1 trigger received") print("trigger external door") client.publish("Door/open", payload='2', qos=1, retain=False) #Dorian if msg.payload == b'6454993': print("Remote 1 A trigger received") print("trigger external door") client.publish("Door/open", payload='12', qos=1, retain=False) #getImageFromCamera2(); getImageFromCamera1(); time.sleep(1) client.publish("Door/dorian", payload='<NAME>', qos=1, retain=False) if msg.payload == b'6454994': print("Remote 1 B trigger received") print("trigger external door") client.publish("Door/open", payload='2', qos=1, retain=False) getImageFromCamera1(); time.sleep(1) client.publish("Door/dorian", payload='<NAME>', qos=1, retain=False) #Gael if msg.payload == b'14993777': print("Remote 2 A trigger received") print("trigger external door") client.publish("Door/open", payload='12', qos=1, retain=False) #getImageFromCamera2(); getImageFromCamera1(); time.sleep(1) client.publish("Door/gael", payload='<NAME>', qos=1, retain=False) if msg.payload == b'14993778': print("Remote 2 B trigger received") print("trigger external door") client.publish("Door/open", payload='2', qos=1, retain=False) time.sleep(1) getImageFromCamera1(); client.publish("Door/gael", payload='<NAME>', qos=1, retain=False) if msg.payload == b'16276098': print("Door bell trigger received") print("trigger Ring") getImageFromCamera1(); time.sleep(1) client.publish("Door/bell", payload='1', qos=1, retain=False) if msg.payload == b'14786398': print("Move trigger received") print("trigger move") client.publish("move/detected", payload='1', qos=1, retain=False) if msg.payload == b'10867362': print("Switch 2 trigger received") print("trigger Deck light") if(deck_state == 1): newValue = '1' deck_state = 0 else: newValue = '2' deck_state = 1 client.publish("DECK_LEDS/stairs", payload=newValue, qos=1, retain=False) client.publish("DECK/LIGHT/command", payload=newValue, qos=1, retain=False) client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect("localhost") # Blocking call that processes network traffic, dispatches callbacks and # handles reconnecting. # Other loop*() functions are available that give a threaded interface and a # manual interface. client.loop_forever()
server/RFbridge.py
import http.client, urllib.request, urllib.parse, urllib.error import requests import time import os import urllib.request, urllib.error, urllib.parse import socket import paho.mqtt.client as mqtt import RPi.GPIO as GPIO import sys import smtplib #SWITCH 1 (Door) #home/OpenMQTTGateway/SRFBtoMQTT 3151714 #SWITCH 2 (Deck) #home/OpenMQTTGateway/SRFBtoMQTT 10867362 #Remote 1(Dorian) A #home/OpenMQTTGateway/SRFBtoMQTT 6454993 #Remote 1(Dorian) B #home/OpenMQTTGateway/SRFBtoMQTT 6454994 #Remote 2(Gael) A #home/OpenMQTTGateway/SRFBtoMQTT 14993777 #Remote 2(Gael) B #home/OpenMQTTGateway/SRFBtoMQTT 14993778 #move/detected #home/OpenMQTTGateway/SRFBtoMQTT 14786398 #Door bell #home/OpenMQTTGateway/SRFBtoMQTT 16276098 #button bell 2 #home/OpenMQTTGateway/SRFBtoMQTT 4462722 deck_state = 0 class MyException(Exception): pass try: server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() except: print('Something went wrong') def getImageFromCamera1(): if os.path.exists("/home/pi/camera1.jpg"): os.remove("/home/pi/camera1.jpg") #url = "http://192.168.2.122:554/snapshot" #url = "http://192.168.2.80:554/snapshot" url = "http://192.168.2.29/snap.jpg?usr=admin&pwd=<PASSWORD>" try: r = requests.get(url, verify = False, timeout=2) open("/home/pi/camera1.jpg", 'w+b').write(r.content) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as error: print("Time out! or connection error :") print(error) os.system('cp /home/pi/camera_disconnected.jpg /home/pi/camera1.jpg') def getImageFromCamera2(): if os.path.exists("/home/pi/camera2.jpg"): os.remove("/home/pi/camera2.jpg") url = "http://192.168.2.13/snap.jpg?usr=admin&pwd=<PASSWORD>" try: r = requests.get(url, verify = False, timeout=2) open("/home/pi/camera2.jpg", 'w+b').write(r.content) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as error: print("Time out! or connection error :") print(error) os.system('cp /home/pi/camera_disconnected.jpg /home/pi/camera2.jpg') # The callback for when the client receives a CONNACK response from the server. def on_connect(client, userdata, flags, rc): print(("Connected with result code "+str(rc))) # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. client.subscribe("home/OpenMQTTGateway/SRFBtoMQTT") # The callback for when a PUBLISH message is received from the server. def on_message(client, userdata, msg): global deck_state print((msg.topic+" "+str(msg.payload)+"\n")) # if msg.topic == "Door/open": # print msg.topic if msg.payload == b'3151714': print("Switch 1 trigger received") print("trigger external door") client.publish("Door/open", payload='2', qos=1, retain=False) #Dorian if msg.payload == b'6454993': print("Remote 1 A trigger received") print("trigger external door") client.publish("Door/open", payload='12', qos=1, retain=False) #getImageFromCamera2(); getImageFromCamera1(); time.sleep(1) client.publish("Door/dorian", payload='<NAME>', qos=1, retain=False) if msg.payload == b'6454994': print("Remote 1 B trigger received") print("trigger external door") client.publish("Door/open", payload='2', qos=1, retain=False) getImageFromCamera1(); time.sleep(1) client.publish("Door/dorian", payload='<NAME>', qos=1, retain=False) #Gael if msg.payload == b'14993777': print("Remote 2 A trigger received") print("trigger external door") client.publish("Door/open", payload='12', qos=1, retain=False) #getImageFromCamera2(); getImageFromCamera1(); time.sleep(1) client.publish("Door/gael", payload='<NAME>', qos=1, retain=False) if msg.payload == b'14993778': print("Remote 2 B trigger received") print("trigger external door") client.publish("Door/open", payload='2', qos=1, retain=False) time.sleep(1) getImageFromCamera1(); client.publish("Door/gael", payload='<NAME>', qos=1, retain=False) if msg.payload == b'16276098': print("Door bell trigger received") print("trigger Ring") getImageFromCamera1(); time.sleep(1) client.publish("Door/bell", payload='1', qos=1, retain=False) if msg.payload == b'14786398': print("Move trigger received") print("trigger move") client.publish("move/detected", payload='1', qos=1, retain=False) if msg.payload == b'10867362': print("Switch 2 trigger received") print("trigger Deck light") if(deck_state == 1): newValue = '1' deck_state = 0 else: newValue = '2' deck_state = 1 client.publish("DECK_LEDS/stairs", payload=newValue, qos=1, retain=False) client.publish("DECK/LIGHT/command", payload=newValue, qos=1, retain=False) client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect("localhost") # Blocking call that processes network traffic, dispatches callbacks and # handles reconnecting. # Other loop*() functions are available that give a threaded interface and a # manual interface. client.loop_forever()
0.09277
0.050447
__description__ = 'Script to search for Mac applications vulnerable to Sparkle.framework MITM attacks' __see__ = 'https://vulnsec.com/2016/osx-apps-vulnerabilities/' __author__ = '<NAME> (http://go16.fr)' __version__ = '0.0.1' __date__ = '2016/03/06' """ Source code put in public domain by <NAME>, no copyright Use at your own risk ! Credits: - https://macmule.com/ for the Mac-specific app discovery command History: 2016/03/06: initial commit """ import os import sys import glob import plistlib import subprocess import platform from urlparse import urlparse try: import biplist nobiplist = False except: nobiplist = True # Coloring definition INFO = '\033[94m' OK = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' AppPlist = '/Contents/Info.plist' SparklePlist = '/Contents/Frameworks/Sparkle.framework/Versions/A/Resources/Info.plist' # Quick check for platform (Mac-only) if platform.system() != 'Darwin': print '\n' + FAIL + 'ERROR :' + ENDC + ' Are you sure we\'re on a Mac ? ;)' sys.exit(1) # Get all registered apps apps = subprocess.check_output(['/usr/bin/mdfind', 'kind:app']) print '\n## SparkleCheck ##\n\nChecking Mac applications for vulnerable Sparkle Framework installs\n(see https://vulnsec.com/2016/osx-apps-vulnerabilities/)\n' print 'Usage : This script lists your applications embeding the Sparkle.framework.\nSparkle versions before 1.13.1 may be vulnerable if they update through HTTP instead of the more secure HTTPS protocol.\nPlease consider using/updating those through a secure VPN connexion first.\n' # biplist allows to read binary plist files used by some applications # see https://pypi.python.org/pypi/biplist/1.0.1 if nobiplist: print WARNING + 'NOTE : biplist library not installed : I might not be able to report on all applications \nConsider running pip install biplist\n' + ENDC else: print "NOTE : biplist library installed. That's good :)\n" # Let's go : iterate through /Applications/ for app in apps.splitlines(): # Look for Sparkle.framework plist file fullSparkelPath = app+SparklePlist if os.path.isfile(fullSparkelPath): pl = plistlib.readPlist(fullSparkelPath) # Try to detect Sparkle Version try: sparkleVersion = INFO + pl['CFBundleShortVersionString'] + ENDC except: try: sparkleVersion = INFO + pl['CFBundleVersion'] + '(Build version only)' + ENDC except: sparkleVersion = WARNING + '??' + ENDC # Version numbering is too unpredictable. # No safe way to decide automagically if version is vulnerable # (distutils.version or pkg_resources.parse_version do not seem reliable enough) # So we'll just display the version number extracted from the plist and let the user be judge # Look for App plist file fullAppPlist = app+AppPlist if os.path.isfile(fullAppPlist): try: apl = plistlib.readPlist(fullAppPlist) except: try: apl = biplist.readPlist(fullAppPlist) except: apl = False # Get app pretty name & version # Name if apl: try: appName = apl['BundleDisplayName'] except: try: appName = apl['CFBundleName'] except: try: appName = apl['CFBundleExecutable'] except: appName = app # Version try: appVersion = apl['CFBundleShortVersionString'] except: try: appVersion = apl['CFBundleVersion'] except: appVersion = '' else: appName = app # Try to detect update URL & extract protocol try: updateURL = apl['SUFeedURL'] proto = urlparse(updateURL).scheme if proto == 'https': proto = OK + 'https' + ENDC else: proto = FAIL + 'http' + ENDC except: proto = WARNING + '??' if nobiplist: proto = proto + ' (try installing biplist)' else: proto = proto + ' (no Sparkle.framework update URL found)' proto = proto+ENDC # Display result line print '## ' + INFO + appName + ' ' + appVersion + ENDC + ' has Sparkle version ' + sparkleVersion + ' and updates via ' + proto # exit with no errors sys.exit(0)
SparkleCheck.py
__description__ = 'Script to search for Mac applications vulnerable to Sparkle.framework MITM attacks' __see__ = 'https://vulnsec.com/2016/osx-apps-vulnerabilities/' __author__ = '<NAME> (http://go16.fr)' __version__ = '0.0.1' __date__ = '2016/03/06' """ Source code put in public domain by <NAME>, no copyright Use at your own risk ! Credits: - https://macmule.com/ for the Mac-specific app discovery command History: 2016/03/06: initial commit """ import os import sys import glob import plistlib import subprocess import platform from urlparse import urlparse try: import biplist nobiplist = False except: nobiplist = True # Coloring definition INFO = '\033[94m' OK = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' AppPlist = '/Contents/Info.plist' SparklePlist = '/Contents/Frameworks/Sparkle.framework/Versions/A/Resources/Info.plist' # Quick check for platform (Mac-only) if platform.system() != 'Darwin': print '\n' + FAIL + 'ERROR :' + ENDC + ' Are you sure we\'re on a Mac ? ;)' sys.exit(1) # Get all registered apps apps = subprocess.check_output(['/usr/bin/mdfind', 'kind:app']) print '\n## SparkleCheck ##\n\nChecking Mac applications for vulnerable Sparkle Framework installs\n(see https://vulnsec.com/2016/osx-apps-vulnerabilities/)\n' print 'Usage : This script lists your applications embeding the Sparkle.framework.\nSparkle versions before 1.13.1 may be vulnerable if they update through HTTP instead of the more secure HTTPS protocol.\nPlease consider using/updating those through a secure VPN connexion first.\n' # biplist allows to read binary plist files used by some applications # see https://pypi.python.org/pypi/biplist/1.0.1 if nobiplist: print WARNING + 'NOTE : biplist library not installed : I might not be able to report on all applications \nConsider running pip install biplist\n' + ENDC else: print "NOTE : biplist library installed. That's good :)\n" # Let's go : iterate through /Applications/ for app in apps.splitlines(): # Look for Sparkle.framework plist file fullSparkelPath = app+SparklePlist if os.path.isfile(fullSparkelPath): pl = plistlib.readPlist(fullSparkelPath) # Try to detect Sparkle Version try: sparkleVersion = INFO + pl['CFBundleShortVersionString'] + ENDC except: try: sparkleVersion = INFO + pl['CFBundleVersion'] + '(Build version only)' + ENDC except: sparkleVersion = WARNING + '??' + ENDC # Version numbering is too unpredictable. # No safe way to decide automagically if version is vulnerable # (distutils.version or pkg_resources.parse_version do not seem reliable enough) # So we'll just display the version number extracted from the plist and let the user be judge # Look for App plist file fullAppPlist = app+AppPlist if os.path.isfile(fullAppPlist): try: apl = plistlib.readPlist(fullAppPlist) except: try: apl = biplist.readPlist(fullAppPlist) except: apl = False # Get app pretty name & version # Name if apl: try: appName = apl['BundleDisplayName'] except: try: appName = apl['CFBundleName'] except: try: appName = apl['CFBundleExecutable'] except: appName = app # Version try: appVersion = apl['CFBundleShortVersionString'] except: try: appVersion = apl['CFBundleVersion'] except: appVersion = '' else: appName = app # Try to detect update URL & extract protocol try: updateURL = apl['SUFeedURL'] proto = urlparse(updateURL).scheme if proto == 'https': proto = OK + 'https' + ENDC else: proto = FAIL + 'http' + ENDC except: proto = WARNING + '??' if nobiplist: proto = proto + ' (try installing biplist)' else: proto = proto + ' (no Sparkle.framework update URL found)' proto = proto+ENDC # Display result line print '## ' + INFO + appName + ' ' + appVersion + ENDC + ' has Sparkle version ' + sparkleVersion + ' and updates via ' + proto # exit with no errors sys.exit(0)
0.336004
0.107813
from __future__ import absolute_import from __future__ import unicode_literals import os from django.conf import settings from django.db import connections from django.template import engines from corehq.sql_db.routers import db_for_read_write from corehq.warehouse.utils import django_batch_records from corehq.warehouse.models.meta import Batch from io import open class BaseETLMixin(object): @classmethod def load(cls, batch): raise NotImplementedError class CustomSQLETLMixin(BaseETLMixin): ''' Mixin for transferring data from a SQL store to another SQL store using a custom SQL script. ''' @classmethod def additional_sql_context(cls): ''' Override this method to provide additional context vars to the SQL script ''' return {} @classmethod def load(cls, batch): from corehq.warehouse.models.shared import WarehouseTable ''' Bulk loads records for a dim or fact table from their corresponding dependencies ''' assert issubclass(cls, WarehouseTable) database = db_for_read_write(cls) with connections[database].cursor() as cursor: cursor.execute(cls._sql_query_template(cls.slug, batch)) @classmethod def _table_context(cls, batch): ''' Get a dict of slugs to table name mapping :returns: Dict of slug to table_name { <slug>: <table_name>, ... } ''' from corehq.warehouse.models import get_cls_by_slug context = {cls.slug: cls._meta.db_table} for dep in cls.dependencies(): dep_cls = get_cls_by_slug(dep) context[dep] = dep_cls._meta.db_table context['start_datetime'] = batch.start_datetime.isoformat() context['end_datetime'] = batch.end_datetime.isoformat() context['batch_id'] = batch.id context.update(cls.additional_sql_context()) return context @classmethod def _sql_query_template(cls, template_name, batch): path = os.path.join( settings.BASE_DIR, 'corehq', 'warehouse', 'transforms', 'sql', '{}.sql'.format(template_name), ) if not os.path.exists(path): raise NotImplementedError( 'You must define {} in order to load data'.format(path) ) return _render_template(path, cls._table_context(batch)) class HQToWarehouseETLMixin(BaseETLMixin): ''' Mixin for transferring docs from Couch to a Django model. ''' @classmethod def field_mapping(cls): # Map source model fields to staging table fields # ( <source field>, <staging field> ) raise NotImplementedError @classmethod def record_iter(cls, start_datetime, end_datetime): raise NotImplementedError @classmethod def load(cls, batch): from corehq.warehouse.models.shared import WarehouseTable assert issubclass(cls, WarehouseTable) record_iter = cls.record_iter(batch.start_datetime, batch.end_datetime) django_batch_records(cls, record_iter, cls.field_mapping(), batch.id) def _render_template(path, context): with open(path, 'rb') as f: template_string = f.read() template = engines['django'].from_string(template_string) return template.render(context)
corehq/warehouse/etl.py
from __future__ import absolute_import from __future__ import unicode_literals import os from django.conf import settings from django.db import connections from django.template import engines from corehq.sql_db.routers import db_for_read_write from corehq.warehouse.utils import django_batch_records from corehq.warehouse.models.meta import Batch from io import open class BaseETLMixin(object): @classmethod def load(cls, batch): raise NotImplementedError class CustomSQLETLMixin(BaseETLMixin): ''' Mixin for transferring data from a SQL store to another SQL store using a custom SQL script. ''' @classmethod def additional_sql_context(cls): ''' Override this method to provide additional context vars to the SQL script ''' return {} @classmethod def load(cls, batch): from corehq.warehouse.models.shared import WarehouseTable ''' Bulk loads records for a dim or fact table from their corresponding dependencies ''' assert issubclass(cls, WarehouseTable) database = db_for_read_write(cls) with connections[database].cursor() as cursor: cursor.execute(cls._sql_query_template(cls.slug, batch)) @classmethod def _table_context(cls, batch): ''' Get a dict of slugs to table name mapping :returns: Dict of slug to table_name { <slug>: <table_name>, ... } ''' from corehq.warehouse.models import get_cls_by_slug context = {cls.slug: cls._meta.db_table} for dep in cls.dependencies(): dep_cls = get_cls_by_slug(dep) context[dep] = dep_cls._meta.db_table context['start_datetime'] = batch.start_datetime.isoformat() context['end_datetime'] = batch.end_datetime.isoformat() context['batch_id'] = batch.id context.update(cls.additional_sql_context()) return context @classmethod def _sql_query_template(cls, template_name, batch): path = os.path.join( settings.BASE_DIR, 'corehq', 'warehouse', 'transforms', 'sql', '{}.sql'.format(template_name), ) if not os.path.exists(path): raise NotImplementedError( 'You must define {} in order to load data'.format(path) ) return _render_template(path, cls._table_context(batch)) class HQToWarehouseETLMixin(BaseETLMixin): ''' Mixin for transferring docs from Couch to a Django model. ''' @classmethod def field_mapping(cls): # Map source model fields to staging table fields # ( <source field>, <staging field> ) raise NotImplementedError @classmethod def record_iter(cls, start_datetime, end_datetime): raise NotImplementedError @classmethod def load(cls, batch): from corehq.warehouse.models.shared import WarehouseTable assert issubclass(cls, WarehouseTable) record_iter = cls.record_iter(batch.start_datetime, batch.end_datetime) django_batch_records(cls, record_iter, cls.field_mapping(), batch.id) def _render_template(path, context): with open(path, 'rb') as f: template_string = f.read() template = engines['django'].from_string(template_string) return template.render(context)
0.760295
0.143938
from __future__ import unicode_literals import abc import logging import six import semantic_version from lymph.utils import observables, hash_id from lymph.core.versioning import compatible, serialize_version logger = logging.getLogger(__name__) # Event types propagated by Service when instances change. ADDED = 'ADDED' REMOVED = 'REMOVED' UPDATED = 'UPDATED' class ServiceInstance(object): def __init__(self, id=None, identity=None, **info): self.id = id self.identity = identity if identity else hash_id(info.get('endpoint')) self.info = {} self.update(**info) def update(self, **info): version = info.pop('version', None) if version: version = semantic_version.Version(version) self.version = version self.info.update(info) def __getattr__(self, name): try: return self.info[name] except KeyError: raise AttributeError(name) def serialize(self): d = { 'id': self.id, 'identity': self.identity, 'version': serialize_version(self.version), } d.update(self.info) return d @six.add_metaclass(abc.ABCMeta) class InstanceSet(observables.Observable): @abc.abstractmethod def __iter__(self): raise NotImplementedError() def match_version(self, version): return VersionedServiceView(self, version) class Service(InstanceSet): def __init__(self, name=None, instances=()): super(Service, self).__init__() self.name = name self.instances = {i.id: i for i in instances} self.version = None def __str__(self): return self.name def __iter__(self): return six.itervalues(self.instances) def __len__(self): return len(self.instances) def get_instance(self, prefix): for instance in six.itervalues(self.instances): if instance.id.startswith(prefix): return instance def identities(self): return list(self.instances.keys()) def remove(self, instance_id): try: instance = self.instances.pop(instance_id) except KeyError: pass else: self.notify_observers(REMOVED, instance) def update(self, instance_id, **info): try: instance = self.instances[instance_id] except KeyError: instance = self.instances[instance_id] = ServiceInstance(**info) self.notify_observers(ADDED, instance) else: instance.update(**info) self.notify_observers(UPDATED, instance) class VersionedServiceView(InstanceSet): def __init__(self, service, version): self.service = service self.spec = compatible(version) self.version = version def __str__(self): return '%s@%s' % (self.name, self.version) @property def name(self): return self.service.name def __iter__(self): for instance in self.service: if instance.version in self.spec: yield instance def observe(self, *args, **kwargs): return self.service.observe(*args, **kwargs)
lymph/core/services.py
from __future__ import unicode_literals import abc import logging import six import semantic_version from lymph.utils import observables, hash_id from lymph.core.versioning import compatible, serialize_version logger = logging.getLogger(__name__) # Event types propagated by Service when instances change. ADDED = 'ADDED' REMOVED = 'REMOVED' UPDATED = 'UPDATED' class ServiceInstance(object): def __init__(self, id=None, identity=None, **info): self.id = id self.identity = identity if identity else hash_id(info.get('endpoint')) self.info = {} self.update(**info) def update(self, **info): version = info.pop('version', None) if version: version = semantic_version.Version(version) self.version = version self.info.update(info) def __getattr__(self, name): try: return self.info[name] except KeyError: raise AttributeError(name) def serialize(self): d = { 'id': self.id, 'identity': self.identity, 'version': serialize_version(self.version), } d.update(self.info) return d @six.add_metaclass(abc.ABCMeta) class InstanceSet(observables.Observable): @abc.abstractmethod def __iter__(self): raise NotImplementedError() def match_version(self, version): return VersionedServiceView(self, version) class Service(InstanceSet): def __init__(self, name=None, instances=()): super(Service, self).__init__() self.name = name self.instances = {i.id: i for i in instances} self.version = None def __str__(self): return self.name def __iter__(self): return six.itervalues(self.instances) def __len__(self): return len(self.instances) def get_instance(self, prefix): for instance in six.itervalues(self.instances): if instance.id.startswith(prefix): return instance def identities(self): return list(self.instances.keys()) def remove(self, instance_id): try: instance = self.instances.pop(instance_id) except KeyError: pass else: self.notify_observers(REMOVED, instance) def update(self, instance_id, **info): try: instance = self.instances[instance_id] except KeyError: instance = self.instances[instance_id] = ServiceInstance(**info) self.notify_observers(ADDED, instance) else: instance.update(**info) self.notify_observers(UPDATED, instance) class VersionedServiceView(InstanceSet): def __init__(self, service, version): self.service = service self.spec = compatible(version) self.version = version def __str__(self): return '%s@%s' % (self.name, self.version) @property def name(self): return self.service.name def __iter__(self): for instance in self.service: if instance.version in self.spec: yield instance def observe(self, *args, **kwargs): return self.service.observe(*args, **kwargs)
0.582254
0.078536
import os, glob, sys, subprocess import PIL.Image from joblib import Parallel, delayed if len(sys.argv)<3: print >>sys.stderr, "Error: Wanted an RLV filename, a mode, and parameters for the modes" print >>sys.stderr, "Modes are:" print >>sys.stderr, "- GIVE <digit> - Obtain an arbitrary image that yields the number" print >>sys.stderr, "- GIVESTRONG <digit> <minDifference>- Obtain an arbitrary image that strongly yields the number, i.e., the other digit values in the output layer must be at least <minDifference> smaller." print >>sys.stderr, "- ROBUST <digitFile> <targetDigit> <maxDifferencePerPixel> <maxUnsmoothnessInNoise>- Obtain a digit image that is close to a given one, that resolves to the given target digit, where every pixel is at most maxDifferencePerPixel away from the initial image, and the maximal noise difference between two adjacent pixels is maxUnsmoothnessInNoise. The last two parameters should be >=0 and <=1 (such as, e.g., 0.05 for 5% deviation)" sys.exit(1) rlvFile = sys.argv[1] mode = sys.argv[2] # Read RLV Lines with open(rlvFile,"r") as inRLV: rlvLines = inRLV.readlines() if mode=="ANY": # Set the boundaries verifierProcess = subprocess.Popen("../../src/planet /dev/stdin",shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE) for a in rlvLines: verifierProcess.stdin.write(a) for i in range(0,28*28): verifierProcess.stdin.write("Assert <= 0.0 1.0 inX"+str(i)+"\n") verifierProcess.stdin.write("Assert >= 0.0 1.0 inX"+str(i)+"\n") verifierProcess.stdin.close() foundSATLine = False foundValuationLine = False values = {} for a in verifierProcess.stdout.readlines(): sys.stdout.write(a) a = a.strip() if a=="SAT": foundSATLine = True elif a=="Valuation:": foundValuationLine = True elif a.startswith("- ") and foundValuationLine: parts = a.split(" ") assert parts[0]=="-" assert parts[3]=="/" values[parts[1][0:len(parts[1])-1]] = float(parts[2]) assert verifierProcess.wait()==0 if not foundSATLine: print "No digit found." else: outFileName = "/tmp/outImage"+str(os.getpid())+"-any.png" outImage = PIL.Image.new("L", (28, 28)) for y in range(0,28): for x in range(0,28): outImage.putpixel((x,y),int(256*values["inX"+str(y*28+x)])) outImage.save(outFileName) print "Result is found in image file:",outFileName # Give mode elif mode=="GIVE" or mode=="GIVESTRONG": if len(sys.argv)<4: print >>sys.stderr, "Error: GIVE and GIVESTRING mode requires a digit number." sys.exit(1) digit = int(sys.argv[3]) if mode=="GIVESTRONG": if len(sys.argv)<5: print >>sys.stderr, "Error: GIVESTRING requires a minimum difference." sys.exit(1) minDifference = -1*float(sys.argv[4]) else: minDifference = -0.0000001 # Set the boundaries verifierProcess = subprocess.Popen("../../src/planet /dev/stdin",shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE) for a in rlvLines: verifierProcess.stdin.write(a) for i in range(0,28*28): verifierProcess.stdin.write("Assert <= 0.0 1.0 inX"+str(i)+"\n") verifierProcess.stdin.write("Assert >= 1.0 1.0 inX"+str(i)+"\n") # Set the output for i in range(0,10): if i!=digit: verifierProcess.stdin.write("Assert >= "+str(minDifference)+" 1.0 outX"+str(i)+" -1.0 outX"+str(digit)+"\n") verifierProcess.stdin.close() foundSATLine = False foundValuationLine = False values = {} for a in verifierProcess.stdout.readlines(): sys.stdout.write(a) a = a.strip() if a=="SAT": foundSATLine = True elif a=="Valuation:": foundValuationLine = True elif a.startswith("- ") and foundValuationLine: parts = a.split(" ") assert parts[0]=="-" assert parts[3]=="/" values[parts[1][0:len(parts[1])-1]] = float(parts[2]) assert verifierProcess.wait()==0 if not foundSATLine: print "No digit found." else: outFileName = "/tmp/outImage"+str(os.getpid())+"-"+str(digit)+str(minDifference)+".png" outImage = PIL.Image.new("L", (28, 28)) for y in range(0,28): for x in range(0,28): outImage.putpixel((x,y),int(256*values["inX"+str(y*28+x)])) outImage.save(outFileName) print "Result is found in image file:",outFileName # ROBUST mode elif mode=="ROBUST": if len(sys.argv)<7: print >>sys.stderr, "Error: ROBUST needs many parameters." sys.exit(1) digitFile = sys.argv[3] targetDigit = int(sys.argv[4]) maxDifferencePerPixel = float(sys.argv[5]) maxUnsmoothnessInNoise = float(sys.argv[6]) # Read image file im = PIL.Image.open(digitFile) #Can be many different formats. pix = im.load() assert im.size==(28,28) pixels = [pix[x,y] for y in range(0,28) for x in range(0,28)] # Set the boundaries verifierProcess = subprocess.Popen("../../src/planet /dev/stdin",shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE) for a in rlvLines: verifierProcess.stdin.write(a) for i in range(0,28*28): # Outermost pixels need to be excluded x = i % 28 y = int(i / 28) if x<3 or x>24 or y<3 or y>24: border = 0.0 else: border = maxDifferencePerPixel mini = max(0.0,pixels[i]/256.0-border) maxi = min(1.0,pixels[i]/256.0+border) verifierProcess.stdin.write("Assert <= "+str(mini)+" 1.0 inX"+str(i)+"\n") verifierProcess.stdin.write("Assert >= "+str(maxi)+" 1.0 inX"+str(i)+"\n") # Set the output for i in range(0,10): if i!=targetDigit: verifierProcess.stdin.write("Assert >= -0.000001 1.0 outX"+str(i)+" -1.0 outX"+str(targetDigit)+"\n") # Set the smoothness if maxUnsmoothnessInNoise<1.0: for x in range(0,28): for y in range(0,28): # Smooth down if (y<27): pixelDiff = (pixels[y*28+x]-pixels[(y+1)*28+x])/256.0 verifierProcess.stdin.write("Assert <= "+str(pixelDiff-maxUnsmoothnessInNoise)+" 1.0 inX"+str(y*28+x)+" -1.0 inX"+str((y+1)*28+x)+"\n") verifierProcess.stdin.write("Assert >= "+str(pixelDiff+maxUnsmoothnessInNoise)+" 1.0 inX"+str(y*28+x)+" -1.0 inX"+str((y+1)*28+x)+"\n") # Smooth right if (x<27): pixelDiff = (pixels[y*28+x]-pixels[y*28+x+1])/256.0 verifierProcess.stdin.write("Assert <= "+str(pixelDiff-maxUnsmoothnessInNoise)+" 1.0 inX"+str(y*28+x)+" -1.0 inX"+str(y*28+x+1)+"\n") verifierProcess.stdin.write("Assert >= "+str(pixelDiff+maxUnsmoothnessInNoise)+" 1.0 inX"+str(y*28+x)+" -1.0 inX"+str(y*28+x+1)+"\n") # Done with the input instance verifierProcess.stdin.close() foundSATLine = False foundValuationLine = False values = {} for a in verifierProcess.stdout.readlines(): sys.stdout.write(a) a = a.strip() if a=="SAT": foundSATLine = True elif a=="Valuation:": foundValuationLine = True elif a.startswith("- ") and foundValuationLine: parts = a.split(" ") assert parts[0]=="-" assert parts[3]=="/" values[parts[1][0:len(parts[1])-1]] = float(parts[2]) assert verifierProcess.wait()==0 if not foundSATLine: print "No digit found." else: outFileName = "/tmp/outImage"+str(os.getpid())+"-"+str(targetDigit)+"-"+str(maxDifferencePerPixel)+"-"+str(maxUnsmoothnessInNoise)+".png" outImage = PIL.Image.new("L", (28, 28)) for y in range(0,28): for x in range(0,28): outImage.putpixel((x,y),int(256*values["inX"+str(y*28+x)])) outImage.save(outFileName) print "Result is found in image file:",outFileName else: # Unknown mode. print >>sys.stderr, "Unknown 'prodNetwork' operation mode: ",mode sys.exit(1)
casestudies/MNIST/prodNetwork.py
import os, glob, sys, subprocess import PIL.Image from joblib import Parallel, delayed if len(sys.argv)<3: print >>sys.stderr, "Error: Wanted an RLV filename, a mode, and parameters for the modes" print >>sys.stderr, "Modes are:" print >>sys.stderr, "- GIVE <digit> - Obtain an arbitrary image that yields the number" print >>sys.stderr, "- GIVESTRONG <digit> <minDifference>- Obtain an arbitrary image that strongly yields the number, i.e., the other digit values in the output layer must be at least <minDifference> smaller." print >>sys.stderr, "- ROBUST <digitFile> <targetDigit> <maxDifferencePerPixel> <maxUnsmoothnessInNoise>- Obtain a digit image that is close to a given one, that resolves to the given target digit, where every pixel is at most maxDifferencePerPixel away from the initial image, and the maximal noise difference between two adjacent pixels is maxUnsmoothnessInNoise. The last two parameters should be >=0 and <=1 (such as, e.g., 0.05 for 5% deviation)" sys.exit(1) rlvFile = sys.argv[1] mode = sys.argv[2] # Read RLV Lines with open(rlvFile,"r") as inRLV: rlvLines = inRLV.readlines() if mode=="ANY": # Set the boundaries verifierProcess = subprocess.Popen("../../src/planet /dev/stdin",shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE) for a in rlvLines: verifierProcess.stdin.write(a) for i in range(0,28*28): verifierProcess.stdin.write("Assert <= 0.0 1.0 inX"+str(i)+"\n") verifierProcess.stdin.write("Assert >= 0.0 1.0 inX"+str(i)+"\n") verifierProcess.stdin.close() foundSATLine = False foundValuationLine = False values = {} for a in verifierProcess.stdout.readlines(): sys.stdout.write(a) a = a.strip() if a=="SAT": foundSATLine = True elif a=="Valuation:": foundValuationLine = True elif a.startswith("- ") and foundValuationLine: parts = a.split(" ") assert parts[0]=="-" assert parts[3]=="/" values[parts[1][0:len(parts[1])-1]] = float(parts[2]) assert verifierProcess.wait()==0 if not foundSATLine: print "No digit found." else: outFileName = "/tmp/outImage"+str(os.getpid())+"-any.png" outImage = PIL.Image.new("L", (28, 28)) for y in range(0,28): for x in range(0,28): outImage.putpixel((x,y),int(256*values["inX"+str(y*28+x)])) outImage.save(outFileName) print "Result is found in image file:",outFileName # Give mode elif mode=="GIVE" or mode=="GIVESTRONG": if len(sys.argv)<4: print >>sys.stderr, "Error: GIVE and GIVESTRING mode requires a digit number." sys.exit(1) digit = int(sys.argv[3]) if mode=="GIVESTRONG": if len(sys.argv)<5: print >>sys.stderr, "Error: GIVESTRING requires a minimum difference." sys.exit(1) minDifference = -1*float(sys.argv[4]) else: minDifference = -0.0000001 # Set the boundaries verifierProcess = subprocess.Popen("../../src/planet /dev/stdin",shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE) for a in rlvLines: verifierProcess.stdin.write(a) for i in range(0,28*28): verifierProcess.stdin.write("Assert <= 0.0 1.0 inX"+str(i)+"\n") verifierProcess.stdin.write("Assert >= 1.0 1.0 inX"+str(i)+"\n") # Set the output for i in range(0,10): if i!=digit: verifierProcess.stdin.write("Assert >= "+str(minDifference)+" 1.0 outX"+str(i)+" -1.0 outX"+str(digit)+"\n") verifierProcess.stdin.close() foundSATLine = False foundValuationLine = False values = {} for a in verifierProcess.stdout.readlines(): sys.stdout.write(a) a = a.strip() if a=="SAT": foundSATLine = True elif a=="Valuation:": foundValuationLine = True elif a.startswith("- ") and foundValuationLine: parts = a.split(" ") assert parts[0]=="-" assert parts[3]=="/" values[parts[1][0:len(parts[1])-1]] = float(parts[2]) assert verifierProcess.wait()==0 if not foundSATLine: print "No digit found." else: outFileName = "/tmp/outImage"+str(os.getpid())+"-"+str(digit)+str(minDifference)+".png" outImage = PIL.Image.new("L", (28, 28)) for y in range(0,28): for x in range(0,28): outImage.putpixel((x,y),int(256*values["inX"+str(y*28+x)])) outImage.save(outFileName) print "Result is found in image file:",outFileName # ROBUST mode elif mode=="ROBUST": if len(sys.argv)<7: print >>sys.stderr, "Error: ROBUST needs many parameters." sys.exit(1) digitFile = sys.argv[3] targetDigit = int(sys.argv[4]) maxDifferencePerPixel = float(sys.argv[5]) maxUnsmoothnessInNoise = float(sys.argv[6]) # Read image file im = PIL.Image.open(digitFile) #Can be many different formats. pix = im.load() assert im.size==(28,28) pixels = [pix[x,y] for y in range(0,28) for x in range(0,28)] # Set the boundaries verifierProcess = subprocess.Popen("../../src/planet /dev/stdin",shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE) for a in rlvLines: verifierProcess.stdin.write(a) for i in range(0,28*28): # Outermost pixels need to be excluded x = i % 28 y = int(i / 28) if x<3 or x>24 or y<3 or y>24: border = 0.0 else: border = maxDifferencePerPixel mini = max(0.0,pixels[i]/256.0-border) maxi = min(1.0,pixels[i]/256.0+border) verifierProcess.stdin.write("Assert <= "+str(mini)+" 1.0 inX"+str(i)+"\n") verifierProcess.stdin.write("Assert >= "+str(maxi)+" 1.0 inX"+str(i)+"\n") # Set the output for i in range(0,10): if i!=targetDigit: verifierProcess.stdin.write("Assert >= -0.000001 1.0 outX"+str(i)+" -1.0 outX"+str(targetDigit)+"\n") # Set the smoothness if maxUnsmoothnessInNoise<1.0: for x in range(0,28): for y in range(0,28): # Smooth down if (y<27): pixelDiff = (pixels[y*28+x]-pixels[(y+1)*28+x])/256.0 verifierProcess.stdin.write("Assert <= "+str(pixelDiff-maxUnsmoothnessInNoise)+" 1.0 inX"+str(y*28+x)+" -1.0 inX"+str((y+1)*28+x)+"\n") verifierProcess.stdin.write("Assert >= "+str(pixelDiff+maxUnsmoothnessInNoise)+" 1.0 inX"+str(y*28+x)+" -1.0 inX"+str((y+1)*28+x)+"\n") # Smooth right if (x<27): pixelDiff = (pixels[y*28+x]-pixels[y*28+x+1])/256.0 verifierProcess.stdin.write("Assert <= "+str(pixelDiff-maxUnsmoothnessInNoise)+" 1.0 inX"+str(y*28+x)+" -1.0 inX"+str(y*28+x+1)+"\n") verifierProcess.stdin.write("Assert >= "+str(pixelDiff+maxUnsmoothnessInNoise)+" 1.0 inX"+str(y*28+x)+" -1.0 inX"+str(y*28+x+1)+"\n") # Done with the input instance verifierProcess.stdin.close() foundSATLine = False foundValuationLine = False values = {} for a in verifierProcess.stdout.readlines(): sys.stdout.write(a) a = a.strip() if a=="SAT": foundSATLine = True elif a=="Valuation:": foundValuationLine = True elif a.startswith("- ") and foundValuationLine: parts = a.split(" ") assert parts[0]=="-" assert parts[3]=="/" values[parts[1][0:len(parts[1])-1]] = float(parts[2]) assert verifierProcess.wait()==0 if not foundSATLine: print "No digit found." else: outFileName = "/tmp/outImage"+str(os.getpid())+"-"+str(targetDigit)+"-"+str(maxDifferencePerPixel)+"-"+str(maxUnsmoothnessInNoise)+".png" outImage = PIL.Image.new("L", (28, 28)) for y in range(0,28): for x in range(0,28): outImage.putpixel((x,y),int(256*values["inX"+str(y*28+x)])) outImage.save(outFileName) print "Result is found in image file:",outFileName else: # Unknown mode. print >>sys.stderr, "Unknown 'prodNetwork' operation mode: ",mode sys.exit(1)
0.18769
0.459501
from .matrixgroup import MatrixGroup from .figure import Figure class LaserPath(MatrixGroup): """The main class of the module for coherent laser beams: it is the combination of Matrix() or MatrixGroup() to be used as a laser path with a laser beam (GaussianBeam) at the entrance. Usage is to create the LaserPath(), then append() elements and display(). You may change the inputBeam to any GaussianBeam(), or provide one to display(beam=GaussianBeam()) Parameters ---------- elements : list of elements A list of ABCD matrices in the imaging path label : string the label for the imaging path (Optional) Attributes ---------- inputBeam : object of GaussianBeam class the input beam of the imaging path is defined using this parameter. showElementLabels : bool If True, the labels of the elements will be shown on display. (default=True) showPointsOfInterest : bool If True, the points of interest will be shown on display. (default=True) showPointsOfInterestLabels : bool If True, the labels of the points of interest will be shown on display. (default=True) showPlanesAcrossPointsOfInterest : bool If True, the planes across the points of interest will be shown on display. (default=True) See Also -------- raytracing.GaussianBeam Notes ----- Gaussian laser beams are not "blocked" by aperture. The formalism does not explicitly allow that. However, if it appears that a GaussianBeam() would be clipped by finite aperture, a property is set to indicate it, but it will propagate nevertheless and without diffraction due to that aperture. """ def __init__(self, elements=None, label=""): self.inputBeam = None self.showElementLabels = True self.showPointsOfInterest = True self.showPointsOfInterestLabels = True self.showPlanesAcrossPointsOfInterest = True self.figure = Figure(opticalPath=self) self.design = self.figure.design super(LaserPath, self).__init__(elements=elements, label=label) def display(self, beams=None, comments=None): # pragma: no cover """ Display the optical system and trace the laser beam. If comments are included they will be displayed on a graph in the bottom half of the plot. Parameters ---------- inputBeam : object of GaussianBeam class inputBeams : list of object of GaussianBeam class A list of Gaussian beams comments : string If comments are included they will be displayed on a graph in the bottom half of the plot. (default=None) """ if beams is None: beams = [self.inputBeam] self.figure.displayGaussianBeam(beams=beams, comments=comments, title=self.label, backend='matplotlib', display3D=False)
raytracing/laserpath.py
from .matrixgroup import MatrixGroup from .figure import Figure class LaserPath(MatrixGroup): """The main class of the module for coherent laser beams: it is the combination of Matrix() or MatrixGroup() to be used as a laser path with a laser beam (GaussianBeam) at the entrance. Usage is to create the LaserPath(), then append() elements and display(). You may change the inputBeam to any GaussianBeam(), or provide one to display(beam=GaussianBeam()) Parameters ---------- elements : list of elements A list of ABCD matrices in the imaging path label : string the label for the imaging path (Optional) Attributes ---------- inputBeam : object of GaussianBeam class the input beam of the imaging path is defined using this parameter. showElementLabels : bool If True, the labels of the elements will be shown on display. (default=True) showPointsOfInterest : bool If True, the points of interest will be shown on display. (default=True) showPointsOfInterestLabels : bool If True, the labels of the points of interest will be shown on display. (default=True) showPlanesAcrossPointsOfInterest : bool If True, the planes across the points of interest will be shown on display. (default=True) See Also -------- raytracing.GaussianBeam Notes ----- Gaussian laser beams are not "blocked" by aperture. The formalism does not explicitly allow that. However, if it appears that a GaussianBeam() would be clipped by finite aperture, a property is set to indicate it, but it will propagate nevertheless and without diffraction due to that aperture. """ def __init__(self, elements=None, label=""): self.inputBeam = None self.showElementLabels = True self.showPointsOfInterest = True self.showPointsOfInterestLabels = True self.showPlanesAcrossPointsOfInterest = True self.figure = Figure(opticalPath=self) self.design = self.figure.design super(LaserPath, self).__init__(elements=elements, label=label) def display(self, beams=None, comments=None): # pragma: no cover """ Display the optical system and trace the laser beam. If comments are included they will be displayed on a graph in the bottom half of the plot. Parameters ---------- inputBeam : object of GaussianBeam class inputBeams : list of object of GaussianBeam class A list of Gaussian beams comments : string If comments are included they will be displayed on a graph in the bottom half of the plot. (default=None) """ if beams is None: beams = [self.inputBeam] self.figure.displayGaussianBeam(beams=beams, comments=comments, title=self.label, backend='matplotlib', display3D=False)
0.936503
0.733738
from main import * _logger = logging.getLogger(__name__) # List of REST resources in current file: # (url prefix) (method) (action) # /api/account.invoice.line GET - Read all (with optional filters, offset, limit, order) # /api/account.invoice.line/<id> GET - Read one # /api/account.invoice.line POST - Create one # /api/account.invoice.line/<id> PUT - Update one # /api/account.invoice.line/<id> DELETE - Delete one # /api/account.invoice.line/<id>/<method> PUT - Call method (with optional parameters) # List of IN/OUT data (json data and HTTP-headers) for each REST resource: # /api/account.invoice.line GET - Read all (with optional filters, offset, limit, order) # IN data: # HEADERS: # 'access_token' # JSON: # (optional filters (Odoo domain), offset, limit, order) # { # editable # "filters": "[('some_field_1', '=', some_value_1), ('some_field_2', '!=', some_value_2), ...]", # "offset": XXX, # "limit": XXX, # "order": "list_of_fields" # default 'name asc' # } # OUT data: OUT__account_invoice_line__read_all__SUCCESS_CODE = 200 # editable # JSON: # { # "count": XXX, # number of returned records # "results": [ OUT__account_invoice_line__read_all__JSON = ( # editable 'id', 'invoice_id', ('product_id', ( # many2one 'id', 'name', )), 'name', 'price_subtotal', ) # ] # } # /api/account.invoice.line/<id> GET - Read one # IN data: # HEADERS: # 'access_token' # JSON: # (optional parameter 'search_field' for search object not by 'id' field) # {"search_field": "some_field_name"} # editable # OUT data: OUT__account_invoice_line__read_one__SUCCESS_CODE = 200 # editable OUT__account_invoice_line__read_one__JSON = ( # editable # (The order of fields of different types maybe arbitrary) # simple fields (non relational): 'id', 'name', 'quantity', 'price_unit', 'price_subtotal', # many2one fields: ('invoice_id', ( 'id', 'number', )), ('product_id', ( 'id', 'name', 'type', ('categ_id', ( # many2one 'id', 'name', )), )), ('account_id', ( 'id', 'display_name', )), # many2many fields: ('invoice_line_tax_ids', [( 'id', 'name', 'type', 'amount', 'price_include', )]), ) # /api/account.invoice.line POST - Create one # IN data: # HEADERS: # 'access_token' # DEFAULTS: # (optional default values of fields) DEFAULTS__account_invoice_line__create_one__JSON = { # editable #"some_field_1": some_value_1, #"some_field_2": some_value_2, #... } # JSON: # (fields and its values of created object; # don't forget about model's mandatory fields!) # ... # editable # OUT data: OUT__account_invoice_line__create_one__SUCCESS_CODE = 200 # editable OUT__account_invoice_line__create_one__JSON = ( # editable 'id', ) # /api/account.invoice.line/<id> PUT - Update one # IN data: # HEADERS: # 'access_token' # JSON: # (fields and new values of updated object) # editable # ... # OUT data: OUT__account_invoice_line__update_one__SUCCESS_CODE = 200 # editable # /api/account.invoice.line/<id> DELETE - Delete one # IN data: # HEADERS: # 'access_token' # OUT data: OUT__account_invoice_line__delete_one__SUCCESS_CODE = 200 # editable # /api/account.invoice.line/<id>/<method> PUT - Call method (with optional parameters) # IN data: # HEADERS: # 'access_token' # JSON: # (named parameters of method) # editable # ... # OUT data: OUT__account_invoice_line__call_method__SUCCESS_CODE = 200 # editable # HTTP controller of REST resources: class ControllerREST(http.Controller): # Read all (with optional filters, offset, limit, order): @http.route('/api/account.invoice.line', methods=['GET'], type='http', auth='none') @check_permissions def api__account_invoice_line__GET(self): return wrap__resource__read_all( modelname = 'account.invoice.line', default_domain = [], success_code = OUT__account_invoice_line__read_all__SUCCESS_CODE, OUT_fields = OUT__account_invoice_line__read_all__JSON ) # Read one: @http.route('/api/account.invoice.line/<id>', methods=['GET'], type='http', auth='none') @check_permissions def api__account_invoice_line__id_GET(self, id): return wrap__resource__read_one( modelname = 'account.invoice.line', id = id, success_code = OUT__account_invoice_line__read_one__SUCCESS_CODE, OUT_fields = OUT__account_invoice_line__read_one__JSON ) # Create one: @http.route('/api/account.invoice.line', methods=['POST'], type='http', auth='none', csrf=False) @check_permissions def api__account_invoice_line__POST(self): return wrap__resource__create_one( modelname = 'account.invoice.line', default_vals = DEFAULTS__account_invoice_line__create_one__JSON, success_code = OUT__account_invoice_line__create_one__SUCCESS_CODE, OUT_fields = OUT__account_invoice_line__create_one__JSON ) # Update one: @http.route('/api/account.invoice.line/<id>', methods=['PUT'], type='http', auth='none', csrf=False) @check_permissions def api__account_invoice_line__id_PUT(self, id): return wrap__resource__update_one( modelname = 'account.invoice.line', id = id, success_code = OUT__account_invoice_line__update_one__SUCCESS_CODE ) # Delete one: @http.route('/api/account.invoice.line/<id>', methods=['DELETE'], type='http', auth='none', csrf=False) @check_permissions def api__account_invoice_line__id_DELETE(self, id): return wrap__resource__delete_one( modelname = 'account.invoice.line', id = id, success_code = OUT__account_invoice_line__delete_one__SUCCESS_CODE ) # Call method (with optional parameters): @http.route('/api/account.invoice.line/<id>/<method>', methods=['PUT'], type='http', auth='none', csrf=False) @check_permissions def api__account_invoice_line__id__method_PUT(self, id, method): return wrap__resource__call_method( modelname = 'account.invoice.line', id = id, method = method, success_code = OUT__account_invoice_line__call_method__SUCCESS_CODE )
addons/rest_api/controllers/model__account_invoice_line.py
from main import * _logger = logging.getLogger(__name__) # List of REST resources in current file: # (url prefix) (method) (action) # /api/account.invoice.line GET - Read all (with optional filters, offset, limit, order) # /api/account.invoice.line/<id> GET - Read one # /api/account.invoice.line POST - Create one # /api/account.invoice.line/<id> PUT - Update one # /api/account.invoice.line/<id> DELETE - Delete one # /api/account.invoice.line/<id>/<method> PUT - Call method (with optional parameters) # List of IN/OUT data (json data and HTTP-headers) for each REST resource: # /api/account.invoice.line GET - Read all (with optional filters, offset, limit, order) # IN data: # HEADERS: # 'access_token' # JSON: # (optional filters (Odoo domain), offset, limit, order) # { # editable # "filters": "[('some_field_1', '=', some_value_1), ('some_field_2', '!=', some_value_2), ...]", # "offset": XXX, # "limit": XXX, # "order": "list_of_fields" # default 'name asc' # } # OUT data: OUT__account_invoice_line__read_all__SUCCESS_CODE = 200 # editable # JSON: # { # "count": XXX, # number of returned records # "results": [ OUT__account_invoice_line__read_all__JSON = ( # editable 'id', 'invoice_id', ('product_id', ( # many2one 'id', 'name', )), 'name', 'price_subtotal', ) # ] # } # /api/account.invoice.line/<id> GET - Read one # IN data: # HEADERS: # 'access_token' # JSON: # (optional parameter 'search_field' for search object not by 'id' field) # {"search_field": "some_field_name"} # editable # OUT data: OUT__account_invoice_line__read_one__SUCCESS_CODE = 200 # editable OUT__account_invoice_line__read_one__JSON = ( # editable # (The order of fields of different types maybe arbitrary) # simple fields (non relational): 'id', 'name', 'quantity', 'price_unit', 'price_subtotal', # many2one fields: ('invoice_id', ( 'id', 'number', )), ('product_id', ( 'id', 'name', 'type', ('categ_id', ( # many2one 'id', 'name', )), )), ('account_id', ( 'id', 'display_name', )), # many2many fields: ('invoice_line_tax_ids', [( 'id', 'name', 'type', 'amount', 'price_include', )]), ) # /api/account.invoice.line POST - Create one # IN data: # HEADERS: # 'access_token' # DEFAULTS: # (optional default values of fields) DEFAULTS__account_invoice_line__create_one__JSON = { # editable #"some_field_1": some_value_1, #"some_field_2": some_value_2, #... } # JSON: # (fields and its values of created object; # don't forget about model's mandatory fields!) # ... # editable # OUT data: OUT__account_invoice_line__create_one__SUCCESS_CODE = 200 # editable OUT__account_invoice_line__create_one__JSON = ( # editable 'id', ) # /api/account.invoice.line/<id> PUT - Update one # IN data: # HEADERS: # 'access_token' # JSON: # (fields and new values of updated object) # editable # ... # OUT data: OUT__account_invoice_line__update_one__SUCCESS_CODE = 200 # editable # /api/account.invoice.line/<id> DELETE - Delete one # IN data: # HEADERS: # 'access_token' # OUT data: OUT__account_invoice_line__delete_one__SUCCESS_CODE = 200 # editable # /api/account.invoice.line/<id>/<method> PUT - Call method (with optional parameters) # IN data: # HEADERS: # 'access_token' # JSON: # (named parameters of method) # editable # ... # OUT data: OUT__account_invoice_line__call_method__SUCCESS_CODE = 200 # editable # HTTP controller of REST resources: class ControllerREST(http.Controller): # Read all (with optional filters, offset, limit, order): @http.route('/api/account.invoice.line', methods=['GET'], type='http', auth='none') @check_permissions def api__account_invoice_line__GET(self): return wrap__resource__read_all( modelname = 'account.invoice.line', default_domain = [], success_code = OUT__account_invoice_line__read_all__SUCCESS_CODE, OUT_fields = OUT__account_invoice_line__read_all__JSON ) # Read one: @http.route('/api/account.invoice.line/<id>', methods=['GET'], type='http', auth='none') @check_permissions def api__account_invoice_line__id_GET(self, id): return wrap__resource__read_one( modelname = 'account.invoice.line', id = id, success_code = OUT__account_invoice_line__read_one__SUCCESS_CODE, OUT_fields = OUT__account_invoice_line__read_one__JSON ) # Create one: @http.route('/api/account.invoice.line', methods=['POST'], type='http', auth='none', csrf=False) @check_permissions def api__account_invoice_line__POST(self): return wrap__resource__create_one( modelname = 'account.invoice.line', default_vals = DEFAULTS__account_invoice_line__create_one__JSON, success_code = OUT__account_invoice_line__create_one__SUCCESS_CODE, OUT_fields = OUT__account_invoice_line__create_one__JSON ) # Update one: @http.route('/api/account.invoice.line/<id>', methods=['PUT'], type='http', auth='none', csrf=False) @check_permissions def api__account_invoice_line__id_PUT(self, id): return wrap__resource__update_one( modelname = 'account.invoice.line', id = id, success_code = OUT__account_invoice_line__update_one__SUCCESS_CODE ) # Delete one: @http.route('/api/account.invoice.line/<id>', methods=['DELETE'], type='http', auth='none', csrf=False) @check_permissions def api__account_invoice_line__id_DELETE(self, id): return wrap__resource__delete_one( modelname = 'account.invoice.line', id = id, success_code = OUT__account_invoice_line__delete_one__SUCCESS_CODE ) # Call method (with optional parameters): @http.route('/api/account.invoice.line/<id>/<method>', methods=['PUT'], type='http', auth='none', csrf=False) @check_permissions def api__account_invoice_line__id__method_PUT(self, id, method): return wrap__resource__call_method( modelname = 'account.invoice.line', id = id, method = method, success_code = OUT__account_invoice_line__call_method__SUCCESS_CODE )
0.393385
0.095265
from _TFL import TFL from _TFL.portable_repr import portable_repr from _TFL.predicate import sorted from _TFL.pyk import pyk import _TFL._Meta.Object class Record (TFL.Meta.Object) : """Class emulating a struct/record (but dynamically). >>> r = Record (x = "y", kw = dict (foo = 42)) >>> print (r.x) y >>> r.kw {'foo': 42} >>> bool (r) True >>> bool (Record ()) False >>> r Record (kw = {'foo' : 42}, x = 'y') >>> from _TFL.json_dump import to_string as jsonified >>> jsonified (r) '{"kw": {"foo": 42}, "x": "y"}' """ _properties = () def __init__ (self, * ds, ** kw) : _kw = dict () for d in ds : _kw.update (d) _kw.update (kw) assert "_kw" not in _kw assert "copy" not in _kw assert "_formatted_kw" not in _kw assert "_properties" not in _kw assert not any (p in _kw for p in self._properties) self.__dict__ ["_kw"] = _kw # end def __init__ def copy (self, ** kw) : result = self.__class__ (** self._kw) result._kw.update (kw) return result # end def copy def _formatted_kw (self, seen = None) : if seen is None : seen = set ([id (self)]) return ", ".join \ ( ( "%s = %s" % (k, portable_repr.call (v, seen)) for (k, v) in sorted (pyk.iteritems (self._kw)) ) ) # end def _formatted_kw def __bool__ (self) : return bool (self._kw) # end def __bool__ def __contains__ (self, item) : return item in self._kw # end def __contains__ def __delattr__ (self, name) : del self._kw [name] # end def __delattr__ def __getattr__ (self, name) : try : return self._kw [name] except KeyError : raise AttributeError (name) # end def __getattr__ def __getitem__ (self, key) : return self._kw [key] # end def __getitem__ def __getstate__ (self) : return self._kw # end def __getstate__ def __iter__ (self) : return iter (self._kw) # end def __iter__ def __len__ (self) : return len (self._kw) # end def __len__ def __repr__ (self) : return portable_repr (self) # end def __repr__ def __setattr__ (self, name, value) : if name in self._properties : self.__super.__setattr__ (name, value) elif "." in name : this = self names = name.split (".") for name in names [:-1] : nested = this.__class__ () setattr (this, name, nested) this = nested setattr (this, names [-1], value) else : self._kw [name] = value # end def __setattr__ def __setitem__ (self, name, value) : if name in self._properties : self.__super.__setattr__ (name, value) else : self._kw [name] = value # end def __setitem__ def __setstate__ (self, state) : self.__dict__ ["_kw"] = state # end def __setstate__ def __str__ (self) : return "(%s)" % (self._formatted_kw (), ) # end def __str__ # end class Record class Record_S (Record) : """Record usable as dict for %-interpolation with nested attributes. >>> c = Record_S (x = 1) >>> o = Record_S (a = 42, b = Record_S (a = 137, b = "foo", c = c)) >>> print ("o.a = %(a)s, o.b.a = %(b.a)s, o.b.c.x = %(b.c.x)s" % o) o.a = 42, o.b.a = 137, o.b.c.x = 1 >>> c Record_S (x = 1) >>> o Record_S (a = 42, b = Record_S (a = 137, b = 'foo', c = Record_S (x = 1))) >>> from _TFL.json_dump import to_string as jsonified >>> jsonified (o) '{"a": 42, "b": {"a": 137, "b": "foo", "c": {"x": 1}}}' >>> c.y = o >>> c Record_S (x = 1, y = Record_S (a = 42, b = Record_S (a = 137, b = 'foo', c = Record_S (...)))) >>> o Record_S (a = 42, b = Record_S (a = 137, b = 'foo', c = Record_S (x = 1, y = Record_S (...)))) >>> jsonified (o) Traceback (most recent call last): ... ValueError: Circular reference detected """ def __getitem__ (self, key) : try : return self.__super.__getitem__ (key) except KeyError : o = self for k in key.split (".") : try : o = getattr (o, k) except AttributeError : raise KeyError (key) return o # end def __getitem__ # end class Record_S @portable_repr.add_type (Record) @portable_repr.recurses def _portable_repr_Record (obj, seen) : return "%s (%s)" % (obj.__class__.__name__, obj._formatted_kw (seen)) # end def _portable_repr_Record @portable_repr.recursion_repr.add_type (Record) def _recursion_repr_Record (obj) : return "%s (...)" % (obj.__class__.__name__, ) # end def _recursion_repr_Record @TFL._Add_Import_Callback ("_TFL.json_dump") def _import_cb_json_dump (module) : @module.default.add_type (Record) def json_encode_record (o) : return o._kw # end def _import_cb_json_dump if __name__ != "__main__" : TFL._Export ("*") ### __END__ TFL.Record
venv/Lib/site-packages/_TFL/Record.py
from _TFL import TFL from _TFL.portable_repr import portable_repr from _TFL.predicate import sorted from _TFL.pyk import pyk import _TFL._Meta.Object class Record (TFL.Meta.Object) : """Class emulating a struct/record (but dynamically). >>> r = Record (x = "y", kw = dict (foo = 42)) >>> print (r.x) y >>> r.kw {'foo': 42} >>> bool (r) True >>> bool (Record ()) False >>> r Record (kw = {'foo' : 42}, x = 'y') >>> from _TFL.json_dump import to_string as jsonified >>> jsonified (r) '{"kw": {"foo": 42}, "x": "y"}' """ _properties = () def __init__ (self, * ds, ** kw) : _kw = dict () for d in ds : _kw.update (d) _kw.update (kw) assert "_kw" not in _kw assert "copy" not in _kw assert "_formatted_kw" not in _kw assert "_properties" not in _kw assert not any (p in _kw for p in self._properties) self.__dict__ ["_kw"] = _kw # end def __init__ def copy (self, ** kw) : result = self.__class__ (** self._kw) result._kw.update (kw) return result # end def copy def _formatted_kw (self, seen = None) : if seen is None : seen = set ([id (self)]) return ", ".join \ ( ( "%s = %s" % (k, portable_repr.call (v, seen)) for (k, v) in sorted (pyk.iteritems (self._kw)) ) ) # end def _formatted_kw def __bool__ (self) : return bool (self._kw) # end def __bool__ def __contains__ (self, item) : return item in self._kw # end def __contains__ def __delattr__ (self, name) : del self._kw [name] # end def __delattr__ def __getattr__ (self, name) : try : return self._kw [name] except KeyError : raise AttributeError (name) # end def __getattr__ def __getitem__ (self, key) : return self._kw [key] # end def __getitem__ def __getstate__ (self) : return self._kw # end def __getstate__ def __iter__ (self) : return iter (self._kw) # end def __iter__ def __len__ (self) : return len (self._kw) # end def __len__ def __repr__ (self) : return portable_repr (self) # end def __repr__ def __setattr__ (self, name, value) : if name in self._properties : self.__super.__setattr__ (name, value) elif "." in name : this = self names = name.split (".") for name in names [:-1] : nested = this.__class__ () setattr (this, name, nested) this = nested setattr (this, names [-1], value) else : self._kw [name] = value # end def __setattr__ def __setitem__ (self, name, value) : if name in self._properties : self.__super.__setattr__ (name, value) else : self._kw [name] = value # end def __setitem__ def __setstate__ (self, state) : self.__dict__ ["_kw"] = state # end def __setstate__ def __str__ (self) : return "(%s)" % (self._formatted_kw (), ) # end def __str__ # end class Record class Record_S (Record) : """Record usable as dict for %-interpolation with nested attributes. >>> c = Record_S (x = 1) >>> o = Record_S (a = 42, b = Record_S (a = 137, b = "foo", c = c)) >>> print ("o.a = %(a)s, o.b.a = %(b.a)s, o.b.c.x = %(b.c.x)s" % o) o.a = 42, o.b.a = 137, o.b.c.x = 1 >>> c Record_S (x = 1) >>> o Record_S (a = 42, b = Record_S (a = 137, b = 'foo', c = Record_S (x = 1))) >>> from _TFL.json_dump import to_string as jsonified >>> jsonified (o) '{"a": 42, "b": {"a": 137, "b": "foo", "c": {"x": 1}}}' >>> c.y = o >>> c Record_S (x = 1, y = Record_S (a = 42, b = Record_S (a = 137, b = 'foo', c = Record_S (...)))) >>> o Record_S (a = 42, b = Record_S (a = 137, b = 'foo', c = Record_S (x = 1, y = Record_S (...)))) >>> jsonified (o) Traceback (most recent call last): ... ValueError: Circular reference detected """ def __getitem__ (self, key) : try : return self.__super.__getitem__ (key) except KeyError : o = self for k in key.split (".") : try : o = getattr (o, k) except AttributeError : raise KeyError (key) return o # end def __getitem__ # end class Record_S @portable_repr.add_type (Record) @portable_repr.recurses def _portable_repr_Record (obj, seen) : return "%s (%s)" % (obj.__class__.__name__, obj._formatted_kw (seen)) # end def _portable_repr_Record @portable_repr.recursion_repr.add_type (Record) def _recursion_repr_Record (obj) : return "%s (...)" % (obj.__class__.__name__, ) # end def _recursion_repr_Record @TFL._Add_Import_Callback ("_TFL.json_dump") def _import_cb_json_dump (module) : @module.default.add_type (Record) def json_encode_record (o) : return o._kw # end def _import_cb_json_dump if __name__ != "__main__" : TFL._Export ("*") ### __END__ TFL.Record
0.522202
0.273828
import unittest class MatMulTests(unittest.TestCase): def test_matmul(self): class A: def __init__(self,x): self.x = x def __matmul__(self,other): return self.x*(other.x) a = A(2) b = A(3) c = a @ b self.assertEqual(c, 6) self.assertEqual((a.x, b.x), (2,3)) self.assertEqual(a.__matmul__(b), 6) #self.assertEqual(repr(a.__matmul__), '<bound method A.__matmul__ of <__main__.A object>>') class B: def __init__(self,x): self.x = x a = B(2) b = B(3) self.assertRaises(TypeError, lambda x, y: x @ y, a, b) self.assertRaises(AttributeError, lambda x: x.__matmul__, a) def test_imatmul(self): class A: def __init__(self,x): self.x = x def __imatmul__(self,other): return A(self.x * other.x) a = A(2) b = A(3) a @= b self.assertEqual(a.x, 6) self.assertEqual(a.__imatmul__(b).x, 18) class B: def __init__(self,x): self.x = x a = B(2) b = B(3) def foo(x, y): x @= y return x self.assertRaises(TypeError, foo, a, b) self.assertRaises(AttributeError, lambda x: x.__imatmul__, a) class C: def __init__(self,x): self.x = x def __matmult__(self, other): return self.x * other.x def __imatmul__(self,other): return C(self.x * other.x) a = C(2) b = C(3) a @= b self.assertEqual(a.x, 6) self.assertEqual(type(a), C) self.assertEqual(a.__imatmul__(b).x, 18) class D: def __init__(self,x): self.x = x def __matmul__(self, other): return self.x * other.x a = D(2) b = D(3) a @= b self.assertEqual(a, 6) self.assertEqual(type(a), int) self.assertRaises(AttributeError, lambda x: x.__imatmul__, a) def test_rmatmul(self): class A: def __init__(self,x): self.x = x class B: def __init__(self, x): self.x = x def __rmatmul__(self, other): return self.x * other.x a = A(2) b = B(3) c = a @ b self.assertEqual(c, 6) def foo(x, y): z = x @ y return z self.assertRaises(TypeError, foo, b, a) self.assertRaises(AttributeError, lambda x: x.__rmatmul__, a) class C: def __init__(self,x): self.x = x def __matmul__(self,other): return self.x*(other.x) def __rmatmul__(self, other): return 0 class D: def __init__(self, x): self.x = x def __matmul__(self, other): return -1 def __rmatmul__(self, other): return -2 a = C(2) b = D(3) c = a @ b self.assertEqual(c, 6) d = b @ a self.assertEqual(d, -1) if __name__ == '__main__': unittest.main()
test/unit3/test_matmul.py
import unittest class MatMulTests(unittest.TestCase): def test_matmul(self): class A: def __init__(self,x): self.x = x def __matmul__(self,other): return self.x*(other.x) a = A(2) b = A(3) c = a @ b self.assertEqual(c, 6) self.assertEqual((a.x, b.x), (2,3)) self.assertEqual(a.__matmul__(b), 6) #self.assertEqual(repr(a.__matmul__), '<bound method A.__matmul__ of <__main__.A object>>') class B: def __init__(self,x): self.x = x a = B(2) b = B(3) self.assertRaises(TypeError, lambda x, y: x @ y, a, b) self.assertRaises(AttributeError, lambda x: x.__matmul__, a) def test_imatmul(self): class A: def __init__(self,x): self.x = x def __imatmul__(self,other): return A(self.x * other.x) a = A(2) b = A(3) a @= b self.assertEqual(a.x, 6) self.assertEqual(a.__imatmul__(b).x, 18) class B: def __init__(self,x): self.x = x a = B(2) b = B(3) def foo(x, y): x @= y return x self.assertRaises(TypeError, foo, a, b) self.assertRaises(AttributeError, lambda x: x.__imatmul__, a) class C: def __init__(self,x): self.x = x def __matmult__(self, other): return self.x * other.x def __imatmul__(self,other): return C(self.x * other.x) a = C(2) b = C(3) a @= b self.assertEqual(a.x, 6) self.assertEqual(type(a), C) self.assertEqual(a.__imatmul__(b).x, 18) class D: def __init__(self,x): self.x = x def __matmul__(self, other): return self.x * other.x a = D(2) b = D(3) a @= b self.assertEqual(a, 6) self.assertEqual(type(a), int) self.assertRaises(AttributeError, lambda x: x.__imatmul__, a) def test_rmatmul(self): class A: def __init__(self,x): self.x = x class B: def __init__(self, x): self.x = x def __rmatmul__(self, other): return self.x * other.x a = A(2) b = B(3) c = a @ b self.assertEqual(c, 6) def foo(x, y): z = x @ y return z self.assertRaises(TypeError, foo, b, a) self.assertRaises(AttributeError, lambda x: x.__rmatmul__, a) class C: def __init__(self,x): self.x = x def __matmul__(self,other): return self.x*(other.x) def __rmatmul__(self, other): return 0 class D: def __init__(self, x): self.x = x def __matmul__(self, other): return -1 def __rmatmul__(self, other): return -2 a = C(2) b = D(3) c = a @ b self.assertEqual(c, 6) d = b @ a self.assertEqual(d, -1) if __name__ == '__main__': unittest.main()
0.698535
0.715896
from discord.ext import commands from motor.motor_asyncio import AsyncIOMotorClient from main import config_var cluster = AsyncIOMotorClient(config_var['mango_link']) cursor = cluster["react_role"]['reaction_roles'] # code base on https://github.com/AdvicSaha443/Discord.py-Self-Role-Bot/blob/main/main.py class Reaction(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_raw_reaction_add(self, payload): if payload.member.bot: return else: check = await cursor.find_one({"id": payload.message_id}) if payload.message_id == check['id']: emojis = [] roles = [] for emoji in check['emojis']: emojis.append(emoji) for role in check['roles']: roles.append(role) guild = self.bot.get_guild(payload.guild_id) for i in range(len(emojis)): chose_emoji = str(payload.emoji) if chose_emoji == emojis[i]: selected_role = roles[i] role = guild.get_role(int(selected_role)) await payload.member.add_roles(role) @commands.Cog.listener() async def on_raw_reaction_remove(self, payload): check = await cursor.find_one({"id": payload.message_id}) if payload.message_id == check['id']: emojis = [] roles = [] for emoji in check['emojis']: emojis.append(emoji) for role in check['roles']: roles.append(role) guild = self.bot.get_guild(payload.guild_id) for i in range(len(emojis)): chose_emoji = str(payload.emoji) if chose_emoji == emojis[i]: selected_role = roles[i] role = guild.get_role(int(selected_role)) member = await(guild.fetch_member(payload.user_id)) if member is not None: await member.remove_roles(role) @commands.Cog.listener() async def on_raw_message_delete(self, payload): check = await cursor.find_one({"id": payload.message_id}) if check is not None: await cursor.delete_one(check)
cogs/settings/reaction.py
from discord.ext import commands from motor.motor_asyncio import AsyncIOMotorClient from main import config_var cluster = AsyncIOMotorClient(config_var['mango_link']) cursor = cluster["react_role"]['reaction_roles'] # code base on https://github.com/AdvicSaha443/Discord.py-Self-Role-Bot/blob/main/main.py class Reaction(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_raw_reaction_add(self, payload): if payload.member.bot: return else: check = await cursor.find_one({"id": payload.message_id}) if payload.message_id == check['id']: emojis = [] roles = [] for emoji in check['emojis']: emojis.append(emoji) for role in check['roles']: roles.append(role) guild = self.bot.get_guild(payload.guild_id) for i in range(len(emojis)): chose_emoji = str(payload.emoji) if chose_emoji == emojis[i]: selected_role = roles[i] role = guild.get_role(int(selected_role)) await payload.member.add_roles(role) @commands.Cog.listener() async def on_raw_reaction_remove(self, payload): check = await cursor.find_one({"id": payload.message_id}) if payload.message_id == check['id']: emojis = [] roles = [] for emoji in check['emojis']: emojis.append(emoji) for role in check['roles']: roles.append(role) guild = self.bot.get_guild(payload.guild_id) for i in range(len(emojis)): chose_emoji = str(payload.emoji) if chose_emoji == emojis[i]: selected_role = roles[i] role = guild.get_role(int(selected_role)) member = await(guild.fetch_member(payload.user_id)) if member is not None: await member.remove_roles(role) @commands.Cog.listener() async def on_raw_message_delete(self, payload): check = await cursor.find_one({"id": payload.message_id}) if check is not None: await cursor.delete_one(check)
0.503418
0.094135
import requests from requests.exceptions import RequestException from typing import Dict def check_response(request_func): """ Verify the resquest response, checking for errors. Args: resp (Dict): The response Raises: BadRequest: Bad request, check your request Unauthorized: Invalid token Forbidden: You do not have permition to this resource. NotFound: Resource not found. Check the endpoint UnProcessableEntity: The server was unable to process the request UnknownServerError: Internal server error Returns: Dict: The response """ def wrapper(*args, **kwargs): from timeit import default_timer as timer try: start = timer() resp = request_func(*args, **kwargs) end = timer() print( f'The request to ClickSign api lasted {(end - start):.3f} seconds' ) except RequestException: raise if resp.status_code == 400: raise BadRequest() elif resp.status_code == 401: raise Unauthorized() elif resp.status_code == 403: raise Forbidden() elif resp.status_code == 404: raise NotFound() elif resp.status_code == 422: raise UnProcessableEntity(resp.json().get('errors')) elif resp.status_code == 500: raise UnknownServerError() return resp return wrapper @check_response def make_response(method, url, params, timeout, json=None): return requests.request(method=method, url=url, json=json, params=params, timeout=timeout) class Forbidden(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: Forbidden! Please verify if your token is valid and if you are in the right environment.' class Unauthorized(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: Unauthorized! Invalid token.' class BadRequest(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: BadRequest! The request you send is not valid.' class UnProcessableEntity(Exception): def __init__(self, error): self.error = error def __str__(self): return f'ClickSign API Error: UnProcessableEntity! The server was not able to process the request. The server error was: {self.error}.' class NotFound(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: NotFound! The server was not able to find this recurse.' class UnknownServerError(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: UnknownServerError! The server was not able to process the request. Internal server error.'
clicksign_api_wrapper/exceptions.py
import requests from requests.exceptions import RequestException from typing import Dict def check_response(request_func): """ Verify the resquest response, checking for errors. Args: resp (Dict): The response Raises: BadRequest: Bad request, check your request Unauthorized: Invalid token Forbidden: You do not have permition to this resource. NotFound: Resource not found. Check the endpoint UnProcessableEntity: The server was unable to process the request UnknownServerError: Internal server error Returns: Dict: The response """ def wrapper(*args, **kwargs): from timeit import default_timer as timer try: start = timer() resp = request_func(*args, **kwargs) end = timer() print( f'The request to ClickSign api lasted {(end - start):.3f} seconds' ) except RequestException: raise if resp.status_code == 400: raise BadRequest() elif resp.status_code == 401: raise Unauthorized() elif resp.status_code == 403: raise Forbidden() elif resp.status_code == 404: raise NotFound() elif resp.status_code == 422: raise UnProcessableEntity(resp.json().get('errors')) elif resp.status_code == 500: raise UnknownServerError() return resp return wrapper @check_response def make_response(method, url, params, timeout, json=None): return requests.request(method=method, url=url, json=json, params=params, timeout=timeout) class Forbidden(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: Forbidden! Please verify if your token is valid and if you are in the right environment.' class Unauthorized(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: Unauthorized! Invalid token.' class BadRequest(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: BadRequest! The request you send is not valid.' class UnProcessableEntity(Exception): def __init__(self, error): self.error = error def __str__(self): return f'ClickSign API Error: UnProcessableEntity! The server was not able to process the request. The server error was: {self.error}.' class NotFound(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: NotFound! The server was not able to find this recurse.' class UnknownServerError(Exception): def __init__(self): pass def __str__(self): return 'ClickSign API Error: UnknownServerError! The server was not able to process the request. Internal server error.'
0.751739
0.102439
from __future__ import print_function import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from os import path import argparse parser = argparse.ArgumentParser(description='Setup credentials and tokens for Google API') parser.add_argument("-drive",action="store_true",default=False,dest="drive",help="Setup for Drive API") parser.add_argument("-docs",action="store_true",default=False,dest="docs",help="Setup for Docs API") parser.add_argument("-sheets",action="store_true",default=False,dest="sheets",help="Setup for Sheet API") CURDIR = path.dirname(path.abspath(__file__)) SCOPES_DOCS = ['https://www.googleapis.com/auth/documents'] SCOPES_DRIVE = ['https://www.googleapis.com/auth/drive'] SCOPES_SHEETS = ['https://www.googleapis.com/auth/spreadsheets'] def generate_token_docs(): creds = None if os.path.exists(path.join(CURDIR,"tokens",'token_docs.pickle')): with open(path.join(CURDIR,"tokens",'token_docs.pickle'), 'rb') as token: creds = pickle.load(token) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path.join(CURDIR,'credentials','credentials_docs.json'), SCOPES_DOCS) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(path.join(CURDIR,"tokens",'token_docs.pickle'), 'wb') as token: pickle.dump(creds, token) service = build('docs', 'v1', credentials=creds) def generate_token_sheets(): creds = None if os.path.exists(path.join(CURDIR,"tokens",'token_sheets.pickle')): with open(path.join(CURDIR,"tokens",'token_sheets.pickle'), 'rb') as token: creds = pickle.load(token) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path.join(CURDIR,'credentials','credentials_sheets.json'), SCOPES_SHEETS) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(path.join(CURDIR,"tokens",'token_sheets.pickle'), 'wb') as token: pickle.dump(creds, token) service = build('sheets', 'v4', credentials=creds) def generate_token_drive(): creds = None if os.path.exists(path.join(CURDIR,"tokens",'token_drive.pickle')): with open(path.join(CURDIR,"tokens",'token_drive.pickle'), 'rb') as token: creds = pickle.load(token) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path.join(CURDIR,'credentials','credentials_drive.json'), SCOPES_DRIVE) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(path.join(CURDIR,"tokens",'token_drive.pickle'), 'wb') as token: pickle.dump(creds, token) if __name__ == '__main__': args = parser.parse_args() if args.drive: generate_token_drive() if args.sheets: generate_token_sheets() if args.docs: generate_token_docs()
setup.py
from __future__ import print_function import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from os import path import argparse parser = argparse.ArgumentParser(description='Setup credentials and tokens for Google API') parser.add_argument("-drive",action="store_true",default=False,dest="drive",help="Setup for Drive API") parser.add_argument("-docs",action="store_true",default=False,dest="docs",help="Setup for Docs API") parser.add_argument("-sheets",action="store_true",default=False,dest="sheets",help="Setup for Sheet API") CURDIR = path.dirname(path.abspath(__file__)) SCOPES_DOCS = ['https://www.googleapis.com/auth/documents'] SCOPES_DRIVE = ['https://www.googleapis.com/auth/drive'] SCOPES_SHEETS = ['https://www.googleapis.com/auth/spreadsheets'] def generate_token_docs(): creds = None if os.path.exists(path.join(CURDIR,"tokens",'token_docs.pickle')): with open(path.join(CURDIR,"tokens",'token_docs.pickle'), 'rb') as token: creds = pickle.load(token) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path.join(CURDIR,'credentials','credentials_docs.json'), SCOPES_DOCS) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(path.join(CURDIR,"tokens",'token_docs.pickle'), 'wb') as token: pickle.dump(creds, token) service = build('docs', 'v1', credentials=creds) def generate_token_sheets(): creds = None if os.path.exists(path.join(CURDIR,"tokens",'token_sheets.pickle')): with open(path.join(CURDIR,"tokens",'token_sheets.pickle'), 'rb') as token: creds = pickle.load(token) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path.join(CURDIR,'credentials','credentials_sheets.json'), SCOPES_SHEETS) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(path.join(CURDIR,"tokens",'token_sheets.pickle'), 'wb') as token: pickle.dump(creds, token) service = build('sheets', 'v4', credentials=creds) def generate_token_drive(): creds = None if os.path.exists(path.join(CURDIR,"tokens",'token_drive.pickle')): with open(path.join(CURDIR,"tokens",'token_drive.pickle'), 'rb') as token: creds = pickle.load(token) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path.join(CURDIR,'credentials','credentials_drive.json'), SCOPES_DRIVE) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(path.join(CURDIR,"tokens",'token_drive.pickle'), 'wb') as token: pickle.dump(creds, token) if __name__ == '__main__': args = parser.parse_args() if args.drive: generate_token_drive() if args.sheets: generate_token_sheets() if args.docs: generate_token_docs()
0.27474
0.094552
class Shield: def __init__(self): self.s2 = 0 self.s1 = 0 self.s0 = 0 def tick(self, inputs): sensor_right = inputs[0] sensor_down = inputs[1] sensor_left = inputs[2] sensor_up = inputs[3] b = inputs[4] o3_1 = inputs[5] o2_1 = inputs[6] o1_1 = inputs[7] o3_2 = inputs[8] o2_2 = inputs[9] o1_2 = inputs[10] o3_3 = inputs[11] o2_3 = inputs[12] o1_3 = inputs[13] s2 = self.s2 s1 = self.s1 s0 = self.s0 tmp11 = (1 - (1 if o1_3 else 0)) tmp10 = (1 if o3_3 else tmp11) tmp12 = (1 - (1 if o1_2 else 0)) tmp9 = (tmp10 if o3_2 else tmp12) tmp13 = (1 - (1 if o1_1 else 0)) tmp8 = (tmp9 if o3_1 else tmp13) tmp7 = (tmp8 if sensor_up else 0) tmp19 = (1 if o1_3 else 0) tmp18 = (1 - (tmp19 if o2_3 else 0)) tmp17 = (1 if o3_3 else tmp18) tmp22 = (1 - (1 if o3_3 else tmp18)) tmp21 = (1 if o1_2 else tmp22) tmp23 = (1 - (tmp17 if o1_2 else 1)) tmp20 = (1 - (tmp21 if o2_2 else tmp23)) tmp16 = (tmp17 if o3_2 else tmp20) tmp26 = (1 - (tmp17 if o3_2 else tmp20)) tmp25 = (1 if o1_1 else tmp26) tmp27 = (1 - (tmp16 if o1_1 else 1)) tmp24 = (1 - (tmp25 if o2_1 else tmp27)) tmp15 = (tmp16 if o3_1 else tmp24) tmp14 = (1 if sensor_up else tmp15) tmp6 = (tmp7 if sensor_right else tmp14) tmp33 = (1 if o1_2 else 0) tmp32 = (1 - (tmp33 if o2_2 else tmp22)) tmp31 = (tmp17 if o3_2 else tmp32) tmp35 = (1 if o1_1 else 0) tmp36 = (1 - (tmp17 if o3_2 else tmp32)) tmp34 = (1 - (tmp35 if o2_1 else tmp36)) tmp30 = (tmp31 if o3_1 else tmp34) tmp29 = (1 if sensor_up else tmp30) tmp40 = (1 - (tmp33 if o2_2 else tmp23)) tmp39 = (tmp17 if o3_2 else tmp40) tmp42 = (1 - (tmp39 if o1_1 else 1)) tmp41 = (1 - (tmp35 if o2_1 else tmp42)) tmp38 = (tmp39 if o3_1 else tmp41) tmp37 = (1 if sensor_up else tmp38) tmp28 = (tmp29 if sensor_right else tmp37) tmp5 = (tmp6 if sensor_left else tmp28) tmp49 = (1 if o2_3 else tmp11) tmp48 = (1 if o3_3 else tmp49) tmp50 = (tmp48 if o2_2 else tmp12) tmp47 = (tmp48 if o3_2 else tmp50) tmp51 = (tmp47 if o2_1 else tmp13) tmp46 = (tmp47 if o3_1 else tmp51) tmp56 = (1 - (1 if o3_3 else tmp11)) tmp55 = (1 if o1_2 else tmp56) tmp54 = (1 - (tmp55 if o2_2 else tmp33)) tmp53 = (tmp10 if o3_2 else tmp54) tmp59 = (1 - (tmp10 if o3_2 else tmp54)) tmp58 = (1 if o1_1 else tmp59) tmp57 = (1 - (tmp58 if o2_1 else tmp35)) tmp52 = (tmp53 if o3_1 else tmp57) tmp45 = (1 - (tmp46 if sensor_up else tmp52)) tmp44 = (1 if sensor_right else tmp45) tmp65 = (tmp48 if o1_2 else 1) tmp67 = (1 - (1 if o3_3 else tmp49)) tmp66 = (1 - (1 if o1_2 else tmp67)) tmp64 = (tmp65 if o2_2 else tmp66) tmp63 = (tmp48 if o3_2 else tmp64) tmp69 = (tmp63 if o1_1 else 1) tmp71 = (1 - (tmp48 if o3_2 else tmp64)) tmp70 = (1 - (1 if o1_1 else tmp71)) tmp68 = (tmp69 if o2_1 else tmp70) tmp62 = (tmp63 if o3_1 else tmp68) tmp74 = (1 - (tmp33 if o2_2 else tmp55)) tmp73 = (tmp10 if o3_2 else tmp74) tmp77 = (1 - (tmp10 if o3_2 else tmp74)) tmp76 = (1 if o1_1 else tmp77) tmp75 = (1 - (tmp35 if o2_1 else tmp76)) tmp72 = (tmp73 if o3_1 else tmp75) tmp61 = (tmp62 if sensor_up else tmp72) tmp81 = (tmp65 if o2_2 else tmp12) tmp80 = (tmp48 if o3_2 else tmp81) tmp83 = (tmp80 if o1_1 else 1) tmp82 = (tmp83 if o2_1 else tmp13) tmp79 = (tmp80 if o3_1 else tmp82) tmp78 = (tmp79 if sensor_up else tmp8) tmp60 = (1 - (tmp61 if sensor_right else tmp78)) tmp43 = (1 - (tmp44 if sensor_left else tmp60)) tmp4 = (tmp5 if sensor_down else tmp43) tmp92 = (tmp10 if o1_2 else 1) tmp91 = (tmp10 if o2_2 else tmp92) tmp90 = (tmp91 if o3_2 else tmp12) tmp93 = (tmp90 if o1_1 else 1) tmp89 = (tmp90 if o2_1 else tmp93) tmp88 = (tmp89 if o3_1 else tmp13) tmp98 = (tmp17 if o1_2 else 1) tmp97 = (tmp17 if o2_2 else tmp98) tmp99 = (1 - (tmp21 if o2_2 else tmp22)) tmp96 = (tmp97 if o3_2 else tmp99) tmp100 = (tmp96 if o1_1 else 1) tmp95 = (tmp96 if o2_1 else tmp100) tmp103 = (1 - (tmp97 if o3_2 else tmp99)) tmp102 = (1 if o1_1 else tmp103) tmp101 = (1 - (tmp102 if o2_1 else tmp103)) tmp94 = (tmp95 if o3_1 else tmp101) tmp87 = (tmp88 if sensor_up else tmp94) tmp107 = (tmp97 if o3_2 else tmp20) tmp108 = (tmp107 if o1_1 else 1) tmp106 = (tmp107 if o2_1 else tmp108) tmp111 = (1 - (tmp97 if o3_2 else tmp20)) tmp110 = (1 if o1_1 else tmp111) tmp112 = (1 - (tmp107 if o1_1 else 1)) tmp109 = (1 - (tmp110 if o2_1 else tmp112)) tmp105 = (tmp106 if o3_1 else tmp109) tmp104 = (1 if sensor_up else tmp105) tmp86 = (tmp87 if sensor_right else tmp104) tmp117 = (tmp97 if o3_2 else tmp32) tmp118 = (tmp117 if o1_1 else 1) tmp116 = (tmp117 if o2_1 else tmp118) tmp120 = (1 - (tmp97 if o3_2 else tmp32)) tmp119 = (1 - (tmp35 if o2_1 else tmp120)) tmp115 = (tmp116 if o3_1 else tmp119) tmp114 = (1 if sensor_up else tmp115) tmp124 = (tmp97 if o3_2 else tmp40) tmp125 = (tmp124 if o1_1 else 1) tmp123 = (tmp124 if o2_1 else tmp125) tmp127 = (1 - (tmp124 if o1_1 else 1)) tmp126 = (1 - (tmp35 if o2_1 else tmp127)) tmp122 = (tmp123 if o3_1 else tmp126) tmp121 = (1 if sensor_up else tmp122) tmp113 = (tmp114 if sensor_right else tmp121) tmp85 = (tmp86 if sensor_left else tmp113) tmp134 = (tmp48 if o2_2 else tmp65) tmp135 = (tmp48 if o2_2 else tmp66) tmp133 = (tmp134 if o3_2 else tmp135) tmp136 = (tmp133 if o1_1 else 1) tmp132 = (tmp133 if o2_1 else tmp136) tmp139 = (1 - (tmp134 if o3_2 else tmp135)) tmp138 = (1 - (1 if o1_1 else tmp139)) tmp137 = (tmp133 if o2_1 else tmp138) tmp131 = (tmp132 if o3_1 else tmp137) tmp143 = (1 - (1 if o1_2 else tmp56)) tmp142 = (tmp91 if o3_2 else tmp143) tmp144 = (tmp142 if o1_1 else 1) tmp141 = (tmp142 if o2_1 else tmp144) tmp146 = (1 - (tmp91 if o3_2 else tmp143)) tmp145 = (1 - (1 if o1_1 else tmp146)) tmp140 = (tmp141 if o3_1 else tmp145) tmp130 = (tmp131 if sensor_up else tmp140) tmp150 = (tmp134 if o3_2 else tmp50) tmp151 = (tmp150 if o1_1 else 1) tmp149 = (tmp150 if o2_1 else tmp151) tmp152 = (tmp150 if o2_1 else tmp13) tmp148 = (tmp149 if o3_1 else tmp152) tmp155 = (tmp91 if o3_2 else tmp54) tmp156 = (tmp155 if o1_1 else 1) tmp154 = (tmp155 if o2_1 else tmp156) tmp159 = (1 - (tmp91 if o3_2 else tmp54)) tmp158 = (1 if o1_1 else tmp159) tmp157 = (1 - (tmp158 if o2_1 else tmp35)) tmp153 = (tmp154 if o3_1 else tmp157) tmp147 = (tmp148 if sensor_up else tmp153) tmp129 = (tmp130 if sensor_right else tmp147) tmp164 = (tmp134 if o3_2 else tmp64) tmp165 = (tmp164 if o1_1 else 1) tmp163 = (tmp164 if o2_1 else tmp165) tmp168 = (1 - (tmp134 if o3_2 else tmp64)) tmp167 = (1 - (1 if o1_1 else tmp168)) tmp166 = (tmp165 if o2_1 else tmp167) tmp162 = (tmp163 if o3_1 else tmp166) tmp171 = (tmp91 if o3_2 else tmp74) tmp172 = (tmp171 if o1_1 else 1) tmp170 = (tmp171 if o2_1 else tmp172) tmp175 = (1 - (tmp91 if o3_2 else tmp74)) tmp174 = (1 if o1_1 else tmp175) tmp173 = (1 - (tmp35 if o2_1 else tmp174)) tmp169 = (tmp170 if o3_1 else tmp173) tmp161 = (tmp162 if sensor_up else tmp169) tmp179 = (tmp134 if o3_2 else tmp81) tmp180 = (tmp179 if o1_1 else 1) tmp178 = (tmp179 if o2_1 else tmp180) tmp181 = (tmp180 if o2_1 else tmp13) tmp177 = (tmp178 if o3_1 else tmp181) tmp176 = (tmp177 if sensor_up else tmp88) tmp160 = (tmp161 if sensor_right else tmp176) tmp128 = (tmp129 if sensor_left else tmp160) tmp84 = (tmp85 if sensor_down else tmp128) tmp3 = (tmp4 if b else tmp84) tmp2 = (1 if s0 else tmp3) tmp1 = (1 - (tmp2 if s1 else tmp84)) o1__s = tmp1 tmp192 = (1 - (1 if o2_3 else 0)) tmp191 = (1 if o3_3 else tmp192) tmp193 = (1 - (1 if o2_2 else 0)) tmp190 = (tmp191 if o3_2 else tmp193) tmp194 = (1 - (1 if o2_1 else 0)) tmp189 = (tmp190 if o3_1 else tmp194) tmp188 = (tmp189 if sensor_up else 0) tmp200 = (1 if o1_3 else 0) tmp199 = (1 - (tmp200 if o2_3 else 0)) tmp198 = (1 if o3_3 else tmp199) tmp203 = (1 - (1 if o3_3 else tmp199)) tmp202 = (1 if o1_2 else tmp203) tmp204 = (1 - (tmp198 if o1_2 else 1)) tmp201 = (1 - (tmp202 if o2_2 else tmp204)) tmp197 = (tmp198 if o3_2 else tmp201) tmp207 = (1 - (tmp198 if o3_2 else tmp201)) tmp206 = (1 if o1_1 else tmp207) tmp208 = (1 - (tmp197 if o1_1 else 1)) tmp205 = (1 - (tmp206 if o2_1 else tmp208)) tmp196 = (tmp197 if o3_1 else tmp205) tmp195 = (1 if sensor_up else tmp196) tmp187 = (tmp188 if sensor_right else tmp195) tmp214 = (tmp200 if o2_3 else 1) tmp213 = (1 if o3_3 else tmp214) tmp216 = (tmp213 if o1_2 else 0) tmp217 = (tmp213 if o1_2 else 1) tmp215 = (tmp216 if o2_2 else tmp217) tmp212 = (tmp213 if o3_2 else tmp215) tmp219 = (tmp212 if o1_1 else 0) tmp220 = (tmp212 if o1_1 else 1) tmp218 = (tmp219 if o2_1 else tmp220) tmp211 = (tmp212 if o3_1 else tmp218) tmp224 = (1 - (tmp191 if o1_2 else 1)) tmp223 = (1 - (1 if o2_2 else tmp224)) tmp222 = (tmp191 if o3_2 else tmp223) tmp226 = (1 - (tmp222 if o1_1 else 1)) tmp225 = (1 - (1 if o2_1 else tmp226)) tmp221 = (tmp222 if o3_1 else tmp225) tmp210 = (1 - (tmp211 if sensor_up else tmp221)) tmp209 = (1 - (1 if sensor_right else tmp210)) tmp186 = (tmp187 if sensor_left else tmp209) tmp233 = (1 - (1 if o1_2 else tmp198)) tmp232 = (1 - (tmp202 if o2_2 else tmp233)) tmp231 = (tmp198 if o3_2 else tmp232) tmp236 = (1 - (tmp198 if o3_2 else tmp232)) tmp235 = (1 if o1_1 else tmp236) tmp237 = (1 - (1 if o1_1 else tmp231)) tmp234 = (1 - (tmp235 if o2_1 else tmp237)) tmp230 = (tmp231 if o3_1 else tmp234) tmp229 = (1 if sensor_up else tmp230) tmp241 = (1 - (tmp202 if o2_2 else 0)) tmp240 = (tmp198 if o3_2 else tmp241) tmp244 = (1 - (tmp198 if o3_2 else tmp241)) tmp243 = (1 if o1_1 else tmp244) tmp242 = (1 - (tmp243 if o2_1 else 0)) tmp239 = (tmp240 if o3_1 else tmp242) tmp238 = (1 if sensor_up else tmp239) tmp228 = (tmp229 if sensor_right else tmp238) tmp251 = (1 - (1 if o1_3 else 0)) tmp250 = (1 if o2_3 else tmp251) tmp249 = (1 if o3_3 else tmp250) tmp253 = (tmp249 if o1_2 else 1) tmp255 = (1 - (1 if o3_3 else tmp250)) tmp254 = (1 - (1 if o1_2 else tmp255)) tmp252 = (tmp253 if o2_2 else tmp254) tmp248 = (tmp249 if o3_2 else tmp252) tmp257 = (tmp248 if o1_1 else 1) tmp259 = (1 - (tmp249 if o3_2 else tmp252)) tmp258 = (1 - (1 if o1_1 else tmp259)) tmp256 = (tmp257 if o2_1 else tmp258) tmp247 = (tmp248 if o3_1 else tmp256) tmp262 = (1 if o2_2 else tmp254) tmp261 = (tmp249 if o3_2 else tmp262) tmp265 = (1 - (tmp249 if o3_2 else tmp262)) tmp264 = (1 - (1 if o1_1 else tmp265)) tmp263 = (1 if o2_1 else tmp264) tmp260 = (tmp261 if o3_1 else tmp263) tmp246 = (tmp247 if sensor_up else tmp260) tmp269 = (tmp216 if o2_2 else 1) tmp268 = (tmp213 if o3_2 else tmp269) tmp271 = (tmp268 if o1_1 else 0) tmp270 = (tmp271 if o2_1 else 1) tmp267 = (tmp268 if o3_1 else tmp270) tmp266 = (1 - (tmp267 if sensor_up else tmp189)) tmp245 = (1 - (tmp246 if sensor_right else tmp266)) tmp227 = (tmp228 if sensor_left else tmp245) tmp185 = (tmp186 if sensor_down else tmp227) tmp280 = (tmp191 if o1_2 else 1) tmp279 = (tmp191 if o2_2 else tmp280) tmp278 = (tmp279 if o3_2 else tmp193) tmp281 = (tmp278 if o1_1 else 1) tmp277 = (tmp278 if o2_1 else tmp281) tmp276 = (tmp277 if o3_1 else tmp194) tmp286 = (tmp198 if o1_2 else 1) tmp285 = (tmp198 if o2_2 else tmp286) tmp287 = (1 - (tmp202 if o2_2 else tmp203)) tmp284 = (tmp285 if o3_2 else tmp287) tmp288 = (tmp284 if o1_1 else 1) tmp283 = (tmp284 if o2_1 else tmp288) tmp291 = (1 - (tmp285 if o3_2 else tmp287)) tmp290 = (1 if o1_1 else tmp291) tmp289 = (1 - (tmp290 if o2_1 else tmp291)) tmp282 = (tmp283 if o3_1 else tmp289) tmp275 = (tmp276 if sensor_up else tmp282) tmp295 = (tmp285 if o3_2 else tmp201) tmp296 = (tmp295 if o1_1 else 1) tmp294 = (tmp295 if o2_1 else tmp296) tmp299 = (1 - (tmp285 if o3_2 else tmp201)) tmp298 = (1 if o1_1 else tmp299) tmp300 = (1 - (tmp295 if o1_1 else 1)) tmp297 = (1 - (tmp298 if o2_1 else tmp300)) tmp293 = (tmp294 if o3_1 else tmp297) tmp292 = (1 if sensor_up else tmp293) tmp274 = (tmp275 if sensor_right else tmp292) tmp306 = (tmp213 if o2_2 else tmp217) tmp307 = (tmp216 if o2_2 else tmp213) tmp305 = (tmp306 if o3_2 else tmp307) tmp308 = (tmp305 if o1_1 else 1) tmp304 = (tmp305 if o2_1 else tmp308) tmp310 = (tmp305 if o1_1 else 0) tmp309 = (tmp310 if o2_1 else tmp305) tmp303 = (tmp304 if o3_1 else tmp309) tmp315 = (1 - (1 if o3_3 else tmp192)) tmp314 = (1 - (1 if o2_2 else tmp315)) tmp313 = (tmp279 if o3_2 else tmp314) tmp316 = (tmp313 if o1_1 else 1) tmp312 = (tmp313 if o2_1 else tmp316) tmp318 = (1 - (tmp279 if o3_2 else tmp314)) tmp317 = (1 - (1 if o2_1 else tmp318)) tmp311 = (tmp312 if o3_1 else tmp317) tmp302 = (tmp303 if sensor_up else tmp311) tmp322 = (tmp306 if o3_2 else tmp215) tmp323 = (tmp322 if o1_1 else 1) tmp321 = (tmp322 if o2_1 else tmp323) tmp325 = (tmp322 if o1_1 else 0) tmp324 = (tmp325 if o2_1 else tmp323) tmp320 = (tmp321 if o3_1 else tmp324) tmp328 = (tmp279 if o3_2 else tmp223) tmp329 = (tmp328 if o1_1 else 1) tmp327 = (tmp328 if o2_1 else tmp329) tmp331 = (1 - (tmp328 if o1_1 else 1)) tmp330 = (1 - (1 if o2_1 else tmp331)) tmp326 = (tmp327 if o3_1 else tmp330) tmp319 = (tmp320 if sensor_up else tmp326) tmp301 = (tmp302 if sensor_right else tmp319) tmp273 = (tmp274 if sensor_left else tmp301) tmp337 = (tmp285 if o3_2 else tmp232) tmp338 = (tmp337 if o1_1 else 1) tmp336 = (tmp337 if o2_1 else tmp338) tmp341 = (1 - (tmp285 if o3_2 else tmp232)) tmp340 = (1 if o1_1 else tmp341) tmp342 = (1 - (1 if o1_1 else tmp337)) tmp339 = (1 - (tmp340 if o2_1 else tmp342)) tmp335 = (tmp336 if o3_1 else tmp339) tmp334 = (1 if sensor_up else tmp335) tmp346 = (tmp285 if o3_2 else tmp241) tmp347 = (tmp346 if o1_1 else 1) tmp345 = (tmp346 if o2_1 else tmp347) tmp350 = (1 - (tmp285 if o3_2 else tmp241)) tmp349 = (1 if o1_1 else tmp350) tmp348 = (1 - (tmp349 if o2_1 else 0)) tmp344 = (tmp345 if o3_1 else tmp348) tmp343 = (1 if sensor_up else tmp344) tmp333 = (tmp334 if sensor_right else tmp343) tmp357 = (1 if o1_2 else tmp213) tmp356 = (tmp216 if o2_2 else tmp357) tmp355 = (tmp306 if o3_2 else tmp356) tmp358 = (tmp355 if o1_1 else 1) tmp354 = (tmp355 if o2_1 else tmp358) tmp360 = (tmp355 if o1_1 else 0) tmp361 = (1 if o1_1 else tmp355) tmp359 = (tmp360 if o2_1 else tmp361) tmp353 = (tmp354 if o3_1 else tmp359) tmp366 = (1 - (1 if o1_2 else tmp191)) tmp365 = (1 - (1 if o2_2 else tmp366)) tmp364 = (tmp279 if o3_2 else tmp365) tmp367 = (tmp364 if o1_1 else 1) tmp363 = (tmp364 if o2_1 else tmp367) tmp369 = (1 - (1 if o1_1 else tmp364)) tmp368 = (1 - (1 if o2_1 else tmp369)) tmp362 = (tmp363 if o3_1 else tmp368) tmp352 = (tmp353 if sensor_up else tmp362) tmp373 = (tmp306 if o3_2 else tmp269) tmp374 = (tmp373 if o1_1 else 1) tmp372 = (tmp373 if o2_1 else tmp374) tmp376 = (tmp373 if o1_1 else 0) tmp375 = (tmp376 if o2_1 else 1) tmp371 = (tmp372 if o3_1 else tmp375) tmp370 = (tmp371 if sensor_up else tmp276) tmp351 = (tmp352 if sensor_right else tmp370) tmp332 = (tmp333 if sensor_left else tmp351) tmp272 = (tmp273 if sensor_down else tmp332) tmp184 = (tmp185 if b else tmp272) tmp183 = (1 if s0 else tmp184) tmp182 = (1 - (tmp183 if s1 else tmp272)) o2__s = tmp182 tmp387 = (1 if o3_3 else 0) tmp388 = (tmp387 if o1_2 else 1) tmp386 = (tmp387 if o2_2 else tmp388) tmp385 = (tmp386 if o3_2 else 0) tmp389 = (tmp385 if o1_1 else 1) tmp384 = (tmp385 if o2_1 else tmp389) tmp383 = (tmp384 if o3_1 else 0) tmp396 = (1 if o1_3 else 0) tmp395 = (1 - (tmp396 if o2_3 else 0)) tmp394 = (1 if o3_3 else tmp395) tmp397 = (tmp394 if o1_2 else 1) tmp393 = (tmp394 if o2_2 else tmp397) tmp400 = (1 - (1 if o3_3 else tmp395)) tmp399 = (1 if o1_2 else tmp400) tmp398 = (1 - (tmp399 if o2_2 else tmp400)) tmp392 = (tmp393 if o3_2 else tmp398) tmp401 = (tmp392 if o1_1 else 1) tmp391 = (tmp392 if o2_1 else tmp401) tmp404 = (1 - (tmp393 if o3_2 else tmp398)) tmp403 = (1 if o1_1 else tmp404) tmp402 = (1 - (tmp403 if o2_1 else tmp404)) tmp390 = (tmp391 if o3_1 else tmp402) tmp382 = (tmp383 if sensor_up else tmp390) tmp410 = (tmp387 if o1_2 else 0) tmp409 = (tmp387 if o2_2 else tmp410) tmp408 = (tmp386 if o3_2 else tmp409) tmp411 = (tmp408 if o1_1 else 1) tmp407 = (tmp408 if o2_1 else tmp411) tmp413 = (tmp408 if o1_1 else 0) tmp412 = (tmp408 if o2_1 else tmp413) tmp406 = (tmp407 if o3_1 else tmp412) tmp419 = (1 - (1 if o3_3 else 0)) tmp418 = (1 if o1_2 else tmp419) tmp420 = (1 - (tmp387 if o1_2 else 0)) tmp417 = (1 - (tmp418 if o2_2 else tmp420)) tmp416 = (tmp386 if o3_2 else tmp417) tmp421 = (tmp416 if o1_1 else 1) tmp415 = (tmp416 if o2_1 else tmp421) tmp424 = (1 - (tmp386 if o3_2 else tmp417)) tmp423 = (1 if o1_1 else tmp424) tmp425 = (1 - (tmp416 if o1_1 else 0)) tmp422 = (1 - (tmp423 if o2_1 else tmp425)) tmp414 = (tmp415 if o3_1 else tmp422) tmp405 = (tmp406 if sensor_up else tmp414) tmp381 = (tmp382 if sensor_right else tmp405) tmp433 = (tmp396 if o2_3 else 1) tmp432 = (1 if o3_3 else tmp433) tmp434 = (tmp432 if o1_2 else 1) tmp431 = (tmp432 if o2_2 else tmp434) tmp436 = (tmp432 if o1_2 else 0) tmp435 = (tmp436 if o2_2 else tmp432) tmp430 = (tmp431 if o3_2 else tmp435) tmp437 = (tmp430 if o1_1 else 1) tmp429 = (tmp430 if o2_1 else tmp437) tmp439 = (tmp430 if o1_1 else 0) tmp438 = (tmp439 if o2_1 else tmp430) tmp428 = (tmp429 if o3_1 else tmp438) tmp445 = (1 - (1 if o2_3 else 0)) tmp444 = (1 if o3_3 else tmp445) tmp446 = (tmp444 if o1_2 else 1) tmp443 = (tmp444 if o2_2 else tmp446) tmp448 = (1 - (1 if o3_3 else tmp445)) tmp447 = (1 - (1 if o2_2 else tmp448)) tmp442 = (tmp443 if o3_2 else tmp447) tmp449 = (tmp442 if o1_1 else 1) tmp441 = (tmp442 if o2_1 else tmp449) tmp451 = (1 - (tmp443 if o3_2 else tmp447)) tmp450 = (1 - (1 if o2_1 else tmp451)) tmp440 = (tmp441 if o3_1 else tmp450) tmp427 = (tmp428 if sensor_up else tmp440) tmp455 = (tmp386 if o3_2 else tmp410) tmp456 = (tmp455 if o1_1 else 1) tmp454 = (tmp455 if o2_1 else tmp456) tmp457 = (tmp455 if o1_1 else 0) tmp453 = (tmp454 if o3_1 else tmp457) tmp461 = (1 - (1 if o2_2 else tmp420)) tmp460 = (tmp386 if o3_2 else tmp461) tmp462 = (tmp460 if o1_1 else 1) tmp459 = (tmp460 if o2_1 else tmp462) tmp464 = (1 - (tmp460 if o1_1 else 0)) tmp463 = (1 - (1 if o2_1 else tmp464)) tmp458 = (tmp459 if o3_1 else tmp463) tmp452 = (tmp453 if sensor_up else tmp458) tmp426 = (tmp427 if sensor_right else tmp452) tmp380 = (tmp381 if sensor_left else tmp426) tmp474 = (1 - (1 if o1_3 else 0)) tmp473 = (1 if o2_3 else tmp474) tmp472 = (1 if o3_3 else tmp473) tmp475 = (tmp472 if o1_2 else 1) tmp471 = (tmp472 if o2_2 else tmp475) tmp478 = (1 - (1 if o3_3 else tmp473)) tmp477 = (1 - (1 if o1_2 else tmp478)) tmp476 = (tmp472 if o2_2 else tmp477) tmp470 = (tmp471 if o3_2 else tmp476) tmp479 = (tmp470 if o1_1 else 1) tmp469 = (tmp470 if o2_1 else tmp479) tmp482 = (1 - (tmp471 if o3_2 else tmp476)) tmp481 = (1 - (1 if o1_1 else tmp482)) tmp480 = (tmp470 if o2_1 else tmp481) tmp468 = (tmp469 if o3_1 else tmp480) tmp487 = (1 if o3_3 else tmp474) tmp488 = (tmp487 if o1_2 else 1) tmp486 = (tmp487 if o2_2 else tmp488) tmp490 = (1 - (1 if o3_3 else tmp474)) tmp489 = (1 - (1 if o1_2 else tmp490)) tmp485 = (tmp486 if o3_2 else tmp489) tmp491 = (tmp485 if o1_1 else 1) tmp484 = (tmp485 if o2_1 else tmp491) tmp493 = (1 - (tmp486 if o3_2 else tmp489)) tmp492 = (1 - (1 if o1_1 else tmp493)) tmp483 = (tmp484 if o3_1 else tmp492) tmp467 = (tmp468 if sensor_up else tmp483) tmp498 = (tmp387 if o2_2 else 0) tmp497 = (tmp386 if o3_2 else tmp498) tmp499 = (tmp497 if o1_1 else 1) tmp496 = (tmp497 if o2_1 else tmp499) tmp500 = (tmp497 if o2_1 else 0) tmp495 = (tmp496 if o3_1 else tmp500) tmp504 = (1 - (tmp418 if o2_2 else 1)) tmp503 = (tmp386 if o3_2 else tmp504) tmp505 = (tmp503 if o1_1 else 1) tmp502 = (tmp503 if o2_1 else tmp505) tmp508 = (1 - (tmp386 if o3_2 else tmp504)) tmp507 = (1 if o1_1 else tmp508) tmp506 = (1 - (tmp507 if o2_1 else 1)) tmp501 = (tmp502 if o3_1 else tmp506) tmp494 = (tmp495 if sensor_up else tmp501) tmp466 = (tmp467 if sensor_right else tmp494) tmp516 = (tmp396 if o2_3 else tmp474) tmp515 = (1 if o3_3 else tmp516) tmp517 = (tmp515 if o1_2 else 1) tmp514 = (tmp515 if o2_2 else tmp517) tmp519 = (tmp515 if o1_2 else 0) tmp521 = (1 - (1 if o3_3 else tmp516)) tmp520 = (1 - (1 if o1_2 else tmp521)) tmp518 = (tmp519 if o2_2 else tmp520) tmp513 = (tmp514 if o3_2 else tmp518) tmp522 = (tmp513 if o1_1 else 1) tmp512 = (tmp513 if o2_1 else tmp522) tmp524 = (tmp513 if o1_1 else 0) tmp526 = (1 - (tmp514 if o3_2 else tmp518)) tmp525 = (1 - (1 if o1_1 else tmp526)) tmp523 = (tmp524 if o2_1 else tmp525) tmp511 = (tmp512 if o3_1 else tmp523) tmp532 = (1 - (1 if o2_3 else tmp396)) tmp531 = (1 if o3_3 else tmp532) tmp533 = (tmp531 if o1_2 else 1) tmp530 = (tmp531 if o2_2 else tmp533) tmp536 = (1 - (1 if o3_3 else tmp532)) tmp535 = (1 if o1_2 else tmp536) tmp534 = (1 - (1 if o2_2 else tmp535)) tmp529 = (tmp530 if o3_2 else tmp534) tmp537 = (tmp529 if o1_1 else 1) tmp528 = (tmp529 if o2_1 else tmp537) tmp540 = (1 - (tmp530 if o3_2 else tmp534)) tmp539 = (1 if o1_1 else tmp540) tmp538 = (1 - (1 if o2_1 else tmp539)) tmp527 = (tmp528 if o3_1 else tmp538) tmp510 = (tmp511 if sensor_up else tmp527) tmp545 = (tmp410 if o2_2 else 0) tmp544 = (tmp386 if o3_2 else tmp545) tmp546 = (tmp544 if o1_1 else 1) tmp543 = (tmp544 if o2_1 else tmp546) tmp548 = (tmp544 if o1_1 else 0) tmp547 = (tmp548 if o2_1 else 0) tmp542 = (tmp543 if o3_1 else tmp547) tmp541 = (tmp542 if sensor_up else tmp383) tmp509 = (tmp510 if sensor_right else tmp541) tmp465 = (tmp466 if sensor_left else tmp509) tmp379 = (1 - (tmp380 if sensor_down else tmp465)) tmp378 = (1 if b else tmp379) tmp377 = (1 - (tmp378 if s1 else tmp379)) o3__s = tmp377 recovery__s = 0 tmp558 = (1 if o3_3 else 0) tmp557 = (tmp558 if o3_2 else 0) tmp556 = (tmp557 if o3_1 else 0) tmp563 = (1 if o1_3 else 0) tmp562 = (1 - (tmp563 if o2_3 else 0)) tmp561 = (1 if o3_3 else tmp562) tmp566 = (1 - (1 if o3_3 else tmp562)) tmp565 = (1 if o1_2 else tmp566) tmp564 = (1 - (tmp565 if o2_2 else tmp566)) tmp560 = (tmp561 if o3_2 else tmp564) tmp569 = (1 - (tmp561 if o3_2 else tmp564)) tmp568 = (1 if o1_1 else tmp569) tmp567 = (1 - (tmp568 if o2_1 else tmp569)) tmp559 = (tmp560 if o3_1 else tmp567) tmp555 = (tmp556 if sensor_up else tmp559) tmp574 = (1 if o2_3 else tmp563) tmp573 = (1 if o3_3 else tmp574) tmp576 = (tmp573 if o1_2 else 0) tmp575 = (tmp573 if o2_2 else tmp576) tmp572 = (tmp573 if o3_2 else tmp575) tmp578 = (tmp572 if o1_1 else 0) tmp577 = (tmp572 if o2_1 else tmp578) tmp571 = (tmp572 if o3_1 else tmp577) tmp583 = (1 - (1 if o1_3 else 0)) tmp582 = (1 - (tmp563 if o2_3 else tmp583)) tmp581 = (1 if o3_3 else tmp582) tmp586 = (1 - (1 if o3_3 else tmp582)) tmp585 = (1 if o1_2 else tmp586) tmp587 = (1 - (tmp581 if o1_2 else 0)) tmp584 = (1 - (tmp585 if o2_2 else tmp587)) tmp580 = (tmp581 if o3_2 else tmp584) tmp590 = (1 - (tmp581 if o3_2 else tmp584)) tmp589 = (1 if o1_1 else tmp590) tmp591 = (1 - (tmp580 if o1_1 else 0)) tmp588 = (1 - (tmp589 if o2_1 else tmp591)) tmp579 = (tmp580 if o3_1 else tmp588) tmp570 = (tmp571 if sensor_up else tmp579) tmp554 = (tmp555 if sensor_right else tmp570) tmp597 = (tmp563 if o2_3 else 1) tmp596 = (1 if o3_3 else tmp597) tmp599 = (tmp596 if o1_2 else 0) tmp598 = (tmp599 if o2_2 else tmp596) tmp595 = (tmp596 if o3_2 else tmp598) tmp601 = (tmp595 if o1_1 else 0) tmp600 = (tmp601 if o2_1 else tmp595) tmp594 = (tmp595 if o3_1 else tmp600) tmp605 = (1 - (1 if o2_3 else 0)) tmp604 = (1 if o3_3 else tmp605) tmp607 = (1 - (1 if o3_3 else tmp605)) tmp606 = (1 - (1 if o2_2 else tmp607)) tmp603 = (tmp604 if o3_2 else tmp606) tmp609 = (1 - (tmp604 if o3_2 else tmp606)) tmp608 = (1 - (1 if o2_1 else tmp609)) tmp602 = (tmp603 if o3_1 else tmp608) tmp593 = (tmp594 if sensor_up else tmp602) tmp613 = (1 if o3_3 else tmp563) tmp614 = (tmp613 if o1_2 else 0) tmp612 = (tmp613 if o3_2 else tmp614) tmp615 = (tmp612 if o1_1 else 0) tmp611 = (tmp612 if o3_1 else tmp615) tmp619 = (1 - (1 if o2_3 else tmp583)) tmp618 = (1 if o3_3 else tmp619) tmp621 = (1 - (tmp618 if o1_2 else 0)) tmp620 = (1 - (1 if o2_2 else tmp621)) tmp617 = (tmp618 if o3_2 else tmp620) tmp623 = (1 - (tmp617 if o1_1 else 0)) tmp622 = (1 - (1 if o2_1 else tmp623)) tmp616 = (tmp617 if o3_1 else tmp622) tmp610 = (tmp611 if sensor_up else tmp616) tmp592 = (tmp593 if sensor_right else tmp610) tmp553 = (tmp554 if sensor_left else tmp592) tmp630 = (1 if o2_3 else tmp583) tmp629 = (1 if o3_3 else tmp630) tmp633 = (1 - (1 if o3_3 else tmp630)) tmp632 = (1 - (1 if o1_2 else tmp633)) tmp631 = (tmp629 if o2_2 else tmp632) tmp628 = (tmp629 if o3_2 else tmp631) tmp636 = (1 - (tmp629 if o3_2 else tmp631)) tmp635 = (1 - (1 if o1_1 else tmp636)) tmp634 = (tmp628 if o2_1 else tmp635) tmp627 = (tmp628 if o3_1 else tmp634) tmp639 = (1 if o3_3 else tmp583) tmp641 = (1 - (1 if o3_3 else tmp583)) tmp640 = (1 - (1 if o1_2 else tmp641)) tmp638 = (tmp639 if o3_2 else tmp640) tmp643 = (1 - (tmp639 if o3_2 else tmp640)) tmp642 = (1 - (1 if o1_1 else tmp643)) tmp637 = (tmp638 if o3_1 else tmp642) tmp626 = (tmp627 if sensor_up else tmp637) tmp648 = (1 if o2_3 else 0) tmp647 = (1 if o3_3 else tmp648) tmp649 = (tmp647 if o2_2 else 0) tmp646 = (tmp647 if o3_2 else tmp649) tmp650 = (tmp646 if o2_1 else 0) tmp645 = (tmp646 if o3_1 else tmp650) tmp654 = (1 - (tmp563 if o2_3 else 1)) tmp653 = (1 if o3_3 else tmp654) tmp657 = (1 - (1 if o3_3 else tmp654)) tmp656 = (1 if o1_2 else tmp657) tmp655 = (1 - (tmp656 if o2_2 else 1)) tmp652 = (tmp653 if o3_2 else tmp655) tmp660 = (1 - (tmp653 if o3_2 else tmp655)) tmp659 = (1 if o1_1 else tmp660) tmp658 = (1 - (tmp659 if o2_1 else 1)) tmp651 = (tmp652 if o3_1 else tmp658) tmp644 = (tmp645 if sensor_up else tmp651) tmp625 = (tmp626 if sensor_right else tmp644) tmp666 = (tmp563 if o2_3 else tmp583) tmp665 = (1 if o3_3 else tmp666) tmp668 = (tmp665 if o1_2 else 0) tmp670 = (1 - (1 if o3_3 else tmp666)) tmp669 = (1 - (1 if o1_2 else tmp670)) tmp667 = (tmp668 if o2_2 else tmp669) tmp664 = (tmp665 if o3_2 else tmp667) tmp672 = (tmp664 if o1_1 else 0) tmp674 = (1 - (tmp665 if o3_2 else tmp667)) tmp673 = (1 - (1 if o1_1 else tmp674)) tmp671 = (tmp672 if o2_1 else tmp673) tmp663 = (tmp664 if o3_1 else tmp671) tmp678 = (1 - (1 if o2_3 else tmp563)) tmp677 = (1 if o3_3 else tmp678) tmp681 = (1 - (1 if o3_3 else tmp678)) tmp680 = (1 if o1_2 else tmp681) tmp679 = (1 - (1 if o2_2 else tmp680)) tmp676 = (tmp677 if o3_2 else tmp679) tmp684 = (1 - (tmp677 if o3_2 else tmp679)) tmp683 = (1 if o1_1 else tmp684) tmp682 = (1 - (1 if o2_1 else tmp683)) tmp675 = (tmp676 if o3_1 else tmp682) tmp662 = (tmp663 if sensor_up else tmp675) tmp689 = (tmp563 if o2_3 else 0) tmp688 = (1 if o3_3 else tmp689) tmp691 = (tmp688 if o1_2 else 0) tmp690 = (tmp691 if o2_2 else 0) tmp687 = (tmp688 if o3_2 else tmp690) tmp693 = (tmp687 if o1_1 else 0) tmp692 = (tmp693 if o2_1 else 0) tmp686 = (tmp687 if o3_1 else tmp692) tmp685 = (tmp686 if sensor_up else tmp556) tmp661 = (tmp662 if sensor_right else tmp685) tmp624 = (tmp625 if sensor_left else tmp661) tmp552 = (tmp553 if sensor_down else tmp624) tmp702 = (tmp574 if o3_3 else 0) tmp703 = (tmp702 if o1_2 else 0) tmp701 = (tmp702 if o2_2 else tmp703) tmp700 = (tmp701 if o3_2 else 0) tmp704 = (tmp700 if o1_1 else 0) tmp699 = (tmp700 if o2_1 else tmp704) tmp698 = (tmp699 if o3_1 else 0) tmp709 = (tmp574 if o3_3 else tmp562) tmp710 = (tmp709 if o1_2 else 0) tmp708 = (tmp709 if o2_2 else tmp710) tmp713 = (1 - (tmp574 if o3_3 else tmp562)) tmp712 = (1 if o1_2 else tmp713) tmp711 = (1 - (tmp712 if o2_2 else tmp713)) tmp707 = (tmp708 if o3_2 else tmp711) tmp714 = (tmp707 if o1_1 else 0) tmp706 = (tmp707 if o2_1 else tmp714) tmp717 = (1 - (tmp708 if o3_2 else tmp711)) tmp716 = (1 if o1_1 else tmp717) tmp715 = (1 - (tmp716 if o2_1 else tmp717)) tmp705 = (tmp706 if o3_1 else tmp715) tmp697 = (tmp698 if sensor_up else tmp705) tmp721 = (tmp574 if o1_2 else 0) tmp720 = (tmp574 if o2_2 else tmp721) tmp722 = (tmp720 if o1_1 else 0) tmp719 = (tmp720 if o2_1 else tmp722) tmp727 = (tmp574 if o3_3 else tmp582) tmp728 = (tmp727 if o1_2 else 0) tmp726 = (tmp727 if o2_2 else tmp728) tmp731 = (1 - (tmp574 if o3_3 else tmp582)) tmp730 = (1 if o1_2 else tmp731) tmp732 = (1 - (tmp727 if o1_2 else 0)) tmp729 = (1 - (tmp730 if o2_2 else tmp732)) tmp725 = (tmp726 if o3_2 else tmp729) tmp733 = (tmp725 if o1_1 else 0) tmp724 = (tmp725 if o2_1 else tmp733) tmp736 = (1 - (tmp726 if o3_2 else tmp729)) tmp735 = (1 if o1_1 else tmp736) tmp737 = (1 - (tmp725 if o1_1 else 0)) tmp734 = (1 - (tmp735 if o2_1 else tmp737)) tmp723 = (tmp724 if o3_1 else tmp734) tmp718 = (tmp719 if sensor_up else tmp723) tmp696 = (tmp697 if sensor_right else tmp718) tmp744 = (tmp574 if o3_3 else tmp597) tmp745 = (tmp744 if o1_2 else 0) tmp743 = (tmp744 if o2_2 else tmp745) tmp746 = (tmp745 if o2_2 else tmp744) tmp742 = (tmp743 if o3_2 else tmp746) tmp747 = (tmp742 if o1_1 else 0) tmp741 = (tmp742 if o2_1 else tmp747) tmp748 = (tmp747 if o2_1 else tmp742) tmp740 = (tmp741 if o3_1 else tmp748) tmp753 = (tmp574 if o3_3 else tmp605) tmp754 = (tmp753 if o1_2 else 0) tmp752 = (tmp753 if o2_2 else tmp754) tmp756 = (1 - (tmp574 if o3_3 else tmp605)) tmp755 = (1 - (1 if o2_2 else tmp756)) tmp751 = (tmp752 if o3_2 else tmp755) tmp757 = (tmp751 if o1_1 else 0) tmp750 = (tmp751 if o2_1 else tmp757) tmp759 = (1 - (tmp752 if o3_2 else tmp755)) tmp758 = (1 - (1 if o2_1 else tmp759)) tmp749 = (tmp750 if o3_1 else tmp758) tmp739 = (tmp740 if sensor_up else tmp749) tmp765 = (tmp574 if o3_3 else tmp563) tmp766 = (tmp765 if o1_2 else 0) tmp764 = (tmp765 if o2_2 else tmp766) tmp763 = (tmp764 if o3_2 else tmp766) tmp767 = (tmp763 if o1_1 else 0) tmp762 = (tmp763 if o2_1 else tmp767) tmp761 = (tmp762 if o3_1 else tmp767) tmp772 = (tmp574 if o3_3 else tmp619) tmp773 = (tmp772 if o1_2 else 0) tmp771 = (tmp772 if o2_2 else tmp773) tmp775 = (1 - (tmp772 if o1_2 else 0)) tmp774 = (1 - (1 if o2_2 else tmp775)) tmp770 = (tmp771 if o3_2 else tmp774) tmp776 = (tmp770 if o1_1 else 0) tmp769 = (tmp770 if o2_1 else tmp776) tmp778 = (1 - (tmp770 if o1_1 else 0)) tmp777 = (1 - (1 if o2_1 else tmp778)) tmp768 = (tmp769 if o3_1 else tmp777) tmp760 = (tmp761 if sensor_up else tmp768) tmp738 = (tmp739 if sensor_right else tmp760) tmp695 = (tmp696 if sensor_left else tmp738) tmp786 = (tmp574 if o3_3 else tmp630) tmp787 = (tmp786 if o1_2 else 0) tmp785 = (tmp786 if o2_2 else tmp787) tmp790 = (1 - (tmp574 if o3_3 else tmp630)) tmp789 = (1 - (1 if o1_2 else tmp790)) tmp788 = (tmp786 if o2_2 else tmp789) tmp784 = (tmp785 if o3_2 else tmp788) tmp791 = (tmp784 if o1_1 else 0) tmp783 = (tmp784 if o2_1 else tmp791) tmp794 = (1 - (tmp785 if o3_2 else tmp788)) tmp793 = (1 - (1 if o1_1 else tmp794)) tmp792 = (tmp784 if o2_1 else tmp793) tmp782 = (tmp783 if o3_1 else tmp792) tmp799 = (tmp574 if o3_3 else tmp583) tmp800 = (tmp799 if o1_2 else 0) tmp798 = (tmp799 if o2_2 else tmp800) tmp802 = (1 - (tmp574 if o3_3 else tmp583)) tmp801 = (1 - (1 if o1_2 else tmp802)) tmp797 = (tmp798 if o3_2 else tmp801) tmp803 = (tmp797 if o1_1 else 0) tmp796 = (tmp797 if o2_1 else tmp803) tmp805 = (1 - (tmp798 if o3_2 else tmp801)) tmp804 = (1 - (1 if o1_1 else tmp805)) tmp795 = (tmp796 if o3_1 else tmp804) tmp781 = (tmp782 if sensor_up else tmp795) tmp811 = (tmp574 if o3_3 else tmp648) tmp812 = (tmp811 if o1_2 else 0) tmp810 = (tmp811 if o2_2 else tmp812) tmp813 = (tmp811 if o2_2 else 0) tmp809 = (tmp810 if o3_2 else tmp813) tmp814 = (tmp809 if o1_1 else 0) tmp808 = (tmp809 if o2_1 else tmp814) tmp815 = (tmp809 if o2_1 else 0) tmp807 = (tmp808 if o3_1 else tmp815) tmp820 = (tmp574 if o3_3 else tmp654) tmp821 = (tmp820 if o1_2 else 0) tmp819 = (tmp820 if o2_2 else tmp821) tmp824 = (1 - (tmp574 if o3_3 else tmp654)) tmp823 = (1 if o1_2 else tmp824) tmp822 = (1 - (tmp823 if o2_2 else 1)) tmp818 = (tmp819 if o3_2 else tmp822) tmp825 = (tmp818 if o1_1 else 0) tmp817 = (tmp818 if o2_1 else tmp825) tmp828 = (1 - (tmp819 if o3_2 else tmp822)) tmp827 = (1 if o1_1 else tmp828) tmp826 = (1 - (tmp827 if o2_1 else 1)) tmp816 = (tmp817 if o3_1 else tmp826) tmp806 = (tmp807 if sensor_up else tmp816) tmp780 = (tmp781 if sensor_right else tmp806) tmp835 = (tmp574 if o3_3 else tmp666) tmp836 = (tmp835 if o1_2 else 0) tmp834 = (tmp835 if o2_2 else tmp836) tmp839 = (1 - (tmp574 if o3_3 else tmp666)) tmp838 = (1 - (1 if o1_2 else tmp839)) tmp837 = (tmp836 if o2_2 else tmp838) tmp833 = (tmp834 if o3_2 else tmp837) tmp840 = (tmp833 if o1_1 else 0) tmp832 = (tmp833 if o2_1 else tmp840) tmp843 = (1 - (tmp834 if o3_2 else tmp837)) tmp842 = (1 - (1 if o1_1 else tmp843)) tmp841 = (tmp840 if o2_1 else tmp842) tmp831 = (tmp832 if o3_1 else tmp841) tmp848 = (tmp574 if o3_3 else tmp678) tmp849 = (tmp848 if o1_2 else 0) tmp847 = (tmp848 if o2_2 else tmp849) tmp852 = (1 - (tmp574 if o3_3 else tmp678)) tmp851 = (1 if o1_2 else tmp852) tmp850 = (1 - (1 if o2_2 else tmp851)) tmp846 = (tmp847 if o3_2 else tmp850) tmp853 = (tmp846 if o1_1 else 0) tmp845 = (tmp846 if o2_1 else tmp853) tmp856 = (1 - (tmp847 if o3_2 else tmp850)) tmp855 = (1 if o1_1 else tmp856) tmp854 = (1 - (1 if o2_1 else tmp855)) tmp844 = (tmp845 if o3_1 else tmp854) tmp830 = (tmp831 if sensor_up else tmp844) tmp862 = (tmp574 if o3_3 else tmp689) tmp863 = (tmp862 if o1_2 else 0) tmp861 = (tmp862 if o2_2 else tmp863) tmp864 = (tmp863 if o2_2 else 0) tmp860 = (tmp861 if o3_2 else tmp864) tmp865 = (tmp860 if o1_1 else 0) tmp859 = (tmp860 if o2_1 else tmp865) tmp866 = (tmp865 if o2_1 else 0) tmp858 = (tmp859 if o3_1 else tmp866) tmp857 = (tmp858 if sensor_up else tmp698) tmp829 = (tmp830 if sensor_right else tmp857) tmp779 = (tmp780 if sensor_left else tmp829) tmp694 = (tmp695 if sensor_down else tmp779) tmp551 = (tmp552 if b else tmp694) tmp550 = (1 - (tmp551 if s1 else tmp694)) tmp549 = (1 - (1 if s2 else tmp550)) s2n = tmp549 tmp878 = (1 if o3_3 else 0) tmp879 = (tmp878 if o1_2 else 1) tmp877 = (tmp878 if o2_2 else tmp879) tmp876 = (tmp877 if o3_2 else 0) tmp880 = (tmp876 if o1_1 else 1) tmp875 = (tmp876 if o2_1 else tmp880) tmp874 = (tmp875 if o3_1 else 0) tmp887 = (1 if o1_3 else 0) tmp886 = (1 - (tmp887 if o2_3 else 0)) tmp885 = (1 if o3_3 else tmp886) tmp888 = (tmp885 if o1_2 else 1) tmp884 = (tmp885 if o2_2 else tmp888) tmp891 = (1 - (1 if o3_3 else tmp886)) tmp890 = (1 if o1_2 else tmp891) tmp889 = (1 - (tmp890 if o2_2 else tmp891)) tmp883 = (tmp884 if o3_2 else tmp889) tmp892 = (tmp883 if o1_1 else 1) tmp882 = (tmp883 if o2_1 else tmp892) tmp895 = (1 - (tmp884 if o3_2 else tmp889)) tmp894 = (1 if o1_1 else tmp895) tmp893 = (1 - (tmp894 if o2_1 else tmp895)) tmp881 = (tmp882 if o3_1 else tmp893) tmp873 = (tmp874 if sensor_up else tmp881) tmp901 = (tmp878 if o1_2 else 0) tmp900 = (tmp878 if o2_2 else tmp901) tmp899 = (tmp877 if o3_2 else tmp900) tmp902 = (tmp899 if o1_1 else 1) tmp898 = (tmp899 if o2_1 else tmp902) tmp904 = (tmp899 if o1_1 else 0) tmp903 = (tmp899 if o2_1 else tmp904) tmp897 = (tmp898 if o3_1 else tmp903) tmp910 = (1 - (1 if o3_3 else 0)) tmp909 = (1 if o1_2 else tmp910) tmp911 = (1 - (tmp878 if o1_2 else 0)) tmp908 = (1 - (tmp909 if o2_2 else tmp911)) tmp907 = (tmp877 if o3_2 else tmp908) tmp912 = (tmp907 if o1_1 else 1) tmp906 = (tmp907 if o2_1 else tmp912) tmp915 = (1 - (tmp877 if o3_2 else tmp908)) tmp914 = (1 if o1_1 else tmp915) tmp916 = (1 - (tmp907 if o1_1 else 0)) tmp913 = (1 - (tmp914 if o2_1 else tmp916)) tmp905 = (tmp906 if o3_1 else tmp913) tmp896 = (tmp897 if sensor_up else tmp905) tmp872 = (tmp873 if sensor_right else tmp896) tmp924 = (tmp887 if o2_3 else 1) tmp923 = (1 if o3_3 else tmp924) tmp925 = (tmp923 if o1_2 else 1) tmp922 = (tmp923 if o2_2 else tmp925) tmp927 = (tmp923 if o1_2 else 0) tmp926 = (tmp927 if o2_2 else tmp923) tmp921 = (tmp922 if o3_2 else tmp926) tmp928 = (tmp921 if o1_1 else 1) tmp920 = (tmp921 if o2_1 else tmp928) tmp930 = (tmp921 if o1_1 else 0) tmp929 = (tmp930 if o2_1 else tmp921) tmp919 = (tmp920 if o3_1 else tmp929) tmp936 = (1 - (1 if o2_3 else 0)) tmp935 = (1 if o3_3 else tmp936) tmp937 = (tmp935 if o1_2 else 1) tmp934 = (tmp935 if o2_2 else tmp937) tmp939 = (1 - (1 if o3_3 else tmp936)) tmp938 = (1 - (1 if o2_2 else tmp939)) tmp933 = (tmp934 if o3_2 else tmp938) tmp940 = (tmp933 if o1_1 else 1) tmp932 = (tmp933 if o2_1 else tmp940) tmp942 = (1 - (tmp934 if o3_2 else tmp938)) tmp941 = (1 - (1 if o2_1 else tmp942)) tmp931 = (tmp932 if o3_1 else tmp941) tmp918 = (tmp919 if sensor_up else tmp931) tmp946 = (tmp877 if o3_2 else tmp901) tmp947 = (tmp946 if o1_1 else 1) tmp945 = (tmp946 if o2_1 else tmp947) tmp948 = (tmp946 if o1_1 else 0) tmp944 = (tmp945 if o3_1 else tmp948) tmp952 = (1 - (1 if o2_2 else tmp911)) tmp951 = (tmp877 if o3_2 else tmp952) tmp953 = (tmp951 if o1_1 else 1) tmp950 = (tmp951 if o2_1 else tmp953) tmp955 = (1 - (tmp951 if o1_1 else 0)) tmp954 = (1 - (1 if o2_1 else tmp955)) tmp949 = (tmp950 if o3_1 else tmp954) tmp943 = (tmp944 if sensor_up else tmp949) tmp917 = (tmp918 if sensor_right else tmp943) tmp871 = (tmp872 if sensor_left else tmp917) tmp965 = (1 - (1 if o1_3 else 0)) tmp964 = (1 if o2_3 else tmp965) tmp963 = (1 if o3_3 else tmp964) tmp966 = (tmp963 if o1_2 else 1) tmp962 = (tmp963 if o2_2 else tmp966) tmp969 = (1 - (1 if o3_3 else tmp964)) tmp968 = (1 - (1 if o1_2 else tmp969)) tmp967 = (tmp963 if o2_2 else tmp968) tmp961 = (tmp962 if o3_2 else tmp967) tmp970 = (tmp961 if o1_1 else 1) tmp960 = (tmp961 if o2_1 else tmp970) tmp973 = (1 - (tmp962 if o3_2 else tmp967)) tmp972 = (1 - (1 if o1_1 else tmp973)) tmp971 = (tmp961 if o2_1 else tmp972) tmp959 = (tmp960 if o3_1 else tmp971) tmp978 = (1 if o3_3 else tmp965) tmp979 = (tmp978 if o1_2 else 1) tmp977 = (tmp978 if o2_2 else tmp979) tmp981 = (1 - (1 if o3_3 else tmp965)) tmp980 = (1 - (1 if o1_2 else tmp981)) tmp976 = (tmp977 if o3_2 else tmp980) tmp982 = (tmp976 if o1_1 else 1) tmp975 = (tmp976 if o2_1 else tmp982) tmp984 = (1 - (tmp977 if o3_2 else tmp980)) tmp983 = (1 - (1 if o1_1 else tmp984)) tmp974 = (tmp975 if o3_1 else tmp983) tmp958 = (tmp959 if sensor_up else tmp974) tmp989 = (tmp878 if o2_2 else 0) tmp988 = (tmp877 if o3_2 else tmp989) tmp990 = (tmp988 if o1_1 else 1) tmp987 = (tmp988 if o2_1 else tmp990) tmp991 = (tmp988 if o2_1 else 0) tmp986 = (tmp987 if o3_1 else tmp991) tmp995 = (1 - (tmp909 if o2_2 else 1)) tmp994 = (tmp877 if o3_2 else tmp995) tmp996 = (tmp994 if o1_1 else 1) tmp993 = (tmp994 if o2_1 else tmp996) tmp999 = (1 - (tmp877 if o3_2 else tmp995)) tmp998 = (1 if o1_1 else tmp999) tmp997 = (1 - (tmp998 if o2_1 else 1)) tmp992 = (tmp993 if o3_1 else tmp997) tmp985 = (tmp986 if sensor_up else tmp992) tmp957 = (tmp958 if sensor_right else tmp985) tmp1007 = (tmp887 if o2_3 else tmp965) tmp1006 = (1 if o3_3 else tmp1007) tmp1008 = (tmp1006 if o1_2 else 1) tmp1005 = (tmp1006 if o2_2 else tmp1008) tmp1010 = (tmp1006 if o1_2 else 0) tmp1012 = (1 - (1 if o3_3 else tmp1007)) tmp1011 = (1 - (1 if o1_2 else tmp1012)) tmp1009 = (tmp1010 if o2_2 else tmp1011) tmp1004 = (tmp1005 if o3_2 else tmp1009) tmp1013 = (tmp1004 if o1_1 else 1) tmp1003 = (tmp1004 if o2_1 else tmp1013) tmp1015 = (tmp1004 if o1_1 else 0) tmp1017 = (1 - (tmp1005 if o3_2 else tmp1009)) tmp1016 = (1 - (1 if o1_1 else tmp1017)) tmp1014 = (tmp1015 if o2_1 else tmp1016) tmp1002 = (tmp1003 if o3_1 else tmp1014) tmp1023 = (1 - (1 if o2_3 else tmp887)) tmp1022 = (1 if o3_3 else tmp1023) tmp1024 = (tmp1022 if o1_2 else 1) tmp1021 = (tmp1022 if o2_2 else tmp1024) tmp1027 = (1 - (1 if o3_3 else tmp1023)) tmp1026 = (1 if o1_2 else tmp1027) tmp1025 = (1 - (1 if o2_2 else tmp1026)) tmp1020 = (tmp1021 if o3_2 else tmp1025) tmp1028 = (tmp1020 if o1_1 else 1) tmp1019 = (tmp1020 if o2_1 else tmp1028) tmp1031 = (1 - (tmp1021 if o3_2 else tmp1025)) tmp1030 = (1 if o1_1 else tmp1031) tmp1029 = (1 - (1 if o2_1 else tmp1030)) tmp1018 = (tmp1019 if o3_1 else tmp1029) tmp1001 = (tmp1002 if sensor_up else tmp1018) tmp1036 = (tmp901 if o2_2 else 0) tmp1035 = (tmp877 if o3_2 else tmp1036) tmp1037 = (tmp1035 if o1_1 else 1) tmp1034 = (tmp1035 if o2_1 else tmp1037) tmp1039 = (tmp1035 if o1_1 else 0) tmp1038 = (tmp1039 if o2_1 else 0) tmp1033 = (tmp1034 if o3_1 else tmp1038) tmp1032 = (tmp1033 if sensor_up else tmp874) tmp1000 = (tmp1001 if sensor_right else tmp1032) tmp956 = (tmp957 if sensor_left else tmp1000) tmp870 = (tmp871 if sensor_down else tmp956) tmp869 = (tmp870 if b else 0) tmp868 = (1 - (tmp869 if s0 else 0)) tmp867 = (1 - (1 if s1 else tmp868)) s1n = tmp867 tmp1051 = (1 if o3_3 else 0) tmp1052 = (tmp1051 if o1_2 else 1) tmp1050 = (tmp1051 if o2_2 else tmp1052) tmp1049 = (tmp1050 if o3_2 else 0) tmp1053 = (tmp1049 if o1_1 else 1) tmp1048 = (tmp1049 if o2_1 else tmp1053) tmp1047 = (tmp1048 if o3_1 else 0) tmp1060 = (1 if o1_3 else 0) tmp1059 = (1 - (tmp1060 if o2_3 else 0)) tmp1058 = (1 if o3_3 else tmp1059) tmp1061 = (tmp1058 if o1_2 else 1) tmp1057 = (tmp1058 if o2_2 else tmp1061) tmp1064 = (1 - (1 if o3_3 else tmp1059)) tmp1063 = (1 if o1_2 else tmp1064) tmp1062 = (1 - (tmp1063 if o2_2 else tmp1064)) tmp1056 = (tmp1057 if o3_2 else tmp1062) tmp1065 = (tmp1056 if o1_1 else 1) tmp1055 = (tmp1056 if o2_1 else tmp1065) tmp1068 = (1 - (tmp1057 if o3_2 else tmp1062)) tmp1067 = (1 if o1_1 else tmp1068) tmp1066 = (1 - (tmp1067 if o2_1 else tmp1068)) tmp1054 = (tmp1055 if o3_1 else tmp1066) tmp1046 = (tmp1047 if sensor_up else tmp1054) tmp1074 = (tmp1051 if o1_2 else 0) tmp1073 = (tmp1051 if o2_2 else tmp1074) tmp1072 = (tmp1050 if o3_2 else tmp1073) tmp1075 = (tmp1072 if o1_1 else 1) tmp1071 = (tmp1072 if o2_1 else tmp1075) tmp1077 = (tmp1072 if o1_1 else 0) tmp1076 = (tmp1072 if o2_1 else tmp1077) tmp1070 = (tmp1071 if o3_1 else tmp1076) tmp1083 = (1 - (1 if o3_3 else 0)) tmp1082 = (1 if o1_2 else tmp1083) tmp1084 = (1 - (tmp1051 if o1_2 else 0)) tmp1081 = (1 - (tmp1082 if o2_2 else tmp1084)) tmp1080 = (tmp1050 if o3_2 else tmp1081) tmp1085 = (tmp1080 if o1_1 else 1) tmp1079 = (tmp1080 if o2_1 else tmp1085) tmp1088 = (1 - (tmp1050 if o3_2 else tmp1081)) tmp1087 = (1 if o1_1 else tmp1088) tmp1089 = (1 - (tmp1080 if o1_1 else 0)) tmp1086 = (1 - (tmp1087 if o2_1 else tmp1089)) tmp1078 = (tmp1079 if o3_1 else tmp1086) tmp1069 = (tmp1070 if sensor_up else tmp1078) tmp1045 = (tmp1046 if sensor_right else tmp1069) tmp1097 = (tmp1060 if o2_3 else 1) tmp1096 = (1 if o3_3 else tmp1097) tmp1098 = (tmp1096 if o1_2 else 1) tmp1095 = (tmp1096 if o2_2 else tmp1098) tmp1100 = (tmp1096 if o1_2 else 0) tmp1099 = (tmp1100 if o2_2 else tmp1096) tmp1094 = (tmp1095 if o3_2 else tmp1099) tmp1101 = (tmp1094 if o1_1 else 1) tmp1093 = (tmp1094 if o2_1 else tmp1101) tmp1103 = (tmp1094 if o1_1 else 0) tmp1102 = (tmp1103 if o2_1 else tmp1094) tmp1092 = (tmp1093 if o3_1 else tmp1102) tmp1109 = (1 - (1 if o2_3 else 0)) tmp1108 = (1 if o3_3 else tmp1109) tmp1110 = (tmp1108 if o1_2 else 1) tmp1107 = (tmp1108 if o2_2 else tmp1110) tmp1112 = (1 - (1 if o3_3 else tmp1109)) tmp1111 = (1 - (1 if o2_2 else tmp1112)) tmp1106 = (tmp1107 if o3_2 else tmp1111) tmp1113 = (tmp1106 if o1_1 else 1) tmp1105 = (tmp1106 if o2_1 else tmp1113) tmp1115 = (1 - (tmp1107 if o3_2 else tmp1111)) tmp1114 = (1 - (1 if o2_1 else tmp1115)) tmp1104 = (tmp1105 if o3_1 else tmp1114) tmp1091 = (tmp1092 if sensor_up else tmp1104) tmp1119 = (tmp1050 if o3_2 else tmp1074) tmp1120 = (tmp1119 if o1_1 else 1) tmp1118 = (tmp1119 if o2_1 else tmp1120) tmp1121 = (tmp1119 if o1_1 else 0) tmp1117 = (tmp1118 if o3_1 else tmp1121) tmp1125 = (1 - (1 if o2_2 else tmp1084)) tmp1124 = (tmp1050 if o3_2 else tmp1125) tmp1126 = (tmp1124 if o1_1 else 1) tmp1123 = (tmp1124 if o2_1 else tmp1126) tmp1128 = (1 - (tmp1124 if o1_1 else 0)) tmp1127 = (1 - (1 if o2_1 else tmp1128)) tmp1122 = (tmp1123 if o3_1 else tmp1127) tmp1116 = (tmp1117 if sensor_up else tmp1122) tmp1090 = (tmp1091 if sensor_right else tmp1116) tmp1044 = (tmp1045 if sensor_left else tmp1090) tmp1138 = (1 - (1 if o1_3 else 0)) tmp1137 = (1 if o2_3 else tmp1138) tmp1136 = (1 if o3_3 else tmp1137) tmp1139 = (tmp1136 if o1_2 else 1) tmp1135 = (tmp1136 if o2_2 else tmp1139) tmp1142 = (1 - (1 if o3_3 else tmp1137)) tmp1141 = (1 - (1 if o1_2 else tmp1142)) tmp1140 = (tmp1136 if o2_2 else tmp1141) tmp1134 = (tmp1135 if o3_2 else tmp1140) tmp1143 = (tmp1134 if o1_1 else 1) tmp1133 = (tmp1134 if o2_1 else tmp1143) tmp1146 = (1 - (tmp1135 if o3_2 else tmp1140)) tmp1145 = (1 - (1 if o1_1 else tmp1146)) tmp1144 = (tmp1134 if o2_1 else tmp1145) tmp1132 = (tmp1133 if o3_1 else tmp1144) tmp1151 = (1 if o3_3 else tmp1138) tmp1152 = (tmp1151 if o1_2 else 1) tmp1150 = (tmp1151 if o2_2 else tmp1152) tmp1154 = (1 - (1 if o3_3 else tmp1138)) tmp1153 = (1 - (1 if o1_2 else tmp1154)) tmp1149 = (tmp1150 if o3_2 else tmp1153) tmp1155 = (tmp1149 if o1_1 else 1) tmp1148 = (tmp1149 if o2_1 else tmp1155) tmp1157 = (1 - (tmp1150 if o3_2 else tmp1153)) tmp1156 = (1 - (1 if o1_1 else tmp1157)) tmp1147 = (tmp1148 if o3_1 else tmp1156) tmp1131 = (tmp1132 if sensor_up else tmp1147) tmp1162 = (tmp1051 if o2_2 else 0) tmp1161 = (tmp1050 if o3_2 else tmp1162) tmp1163 = (tmp1161 if o1_1 else 1) tmp1160 = (tmp1161 if o2_1 else tmp1163) tmp1164 = (tmp1161 if o2_1 else 0) tmp1159 = (tmp1160 if o3_1 else tmp1164) tmp1168 = (1 - (tmp1082 if o2_2 else 1)) tmp1167 = (tmp1050 if o3_2 else tmp1168) tmp1169 = (tmp1167 if o1_1 else 1) tmp1166 = (tmp1167 if o2_1 else tmp1169) tmp1172 = (1 - (tmp1050 if o3_2 else tmp1168)) tmp1171 = (1 if o1_1 else tmp1172) tmp1170 = (1 - (tmp1171 if o2_1 else 1)) tmp1165 = (tmp1166 if o3_1 else tmp1170) tmp1158 = (tmp1159 if sensor_up else tmp1165) tmp1130 = (tmp1131 if sensor_right else tmp1158) tmp1180 = (tmp1060 if o2_3 else tmp1138) tmp1179 = (1 if o3_3 else tmp1180) tmp1181 = (tmp1179 if o1_2 else 1) tmp1178 = (tmp1179 if o2_2 else tmp1181) tmp1183 = (tmp1179 if o1_2 else 0) tmp1185 = (1 - (1 if o3_3 else tmp1180)) tmp1184 = (1 - (1 if o1_2 else tmp1185)) tmp1182 = (tmp1183 if o2_2 else tmp1184) tmp1177 = (tmp1178 if o3_2 else tmp1182) tmp1186 = (tmp1177 if o1_1 else 1) tmp1176 = (tmp1177 if o2_1 else tmp1186) tmp1188 = (tmp1177 if o1_1 else 0) tmp1190 = (1 - (tmp1178 if o3_2 else tmp1182)) tmp1189 = (1 - (1 if o1_1 else tmp1190)) tmp1187 = (tmp1188 if o2_1 else tmp1189) tmp1175 = (tmp1176 if o3_1 else tmp1187) tmp1196 = (1 - (1 if o2_3 else tmp1060)) tmp1195 = (1 if o3_3 else tmp1196) tmp1197 = (tmp1195 if o1_2 else 1) tmp1194 = (tmp1195 if o2_2 else tmp1197) tmp1200 = (1 - (1 if o3_3 else tmp1196)) tmp1199 = (1 if o1_2 else tmp1200) tmp1198 = (1 - (1 if o2_2 else tmp1199)) tmp1193 = (tmp1194 if o3_2 else tmp1198) tmp1201 = (tmp1193 if o1_1 else 1) tmp1192 = (tmp1193 if o2_1 else tmp1201) tmp1204 = (1 - (tmp1194 if o3_2 else tmp1198)) tmp1203 = (1 if o1_1 else tmp1204) tmp1202 = (1 - (1 if o2_1 else tmp1203)) tmp1191 = (tmp1192 if o3_1 else tmp1202) tmp1174 = (tmp1175 if sensor_up else tmp1191) tmp1209 = (tmp1074 if o2_2 else 0) tmp1208 = (tmp1050 if o3_2 else tmp1209) tmp1210 = (tmp1208 if o1_1 else 1) tmp1207 = (tmp1208 if o2_1 else tmp1210) tmp1212 = (tmp1208 if o1_1 else 0) tmp1211 = (tmp1212 if o2_1 else 0) tmp1206 = (tmp1207 if o3_1 else tmp1211) tmp1205 = (tmp1206 if sensor_up else tmp1047) tmp1173 = (tmp1174 if sensor_right else tmp1205) tmp1129 = (tmp1130 if sensor_left else tmp1173) tmp1043 = (tmp1044 if sensor_down else tmp1129) tmp1042 = (1 - (tmp1043 if b else 0)) tmp1041 = (1 if s0 else tmp1042) tmp1040 = (1 - (1 if s1 else tmp1041)) s0n = tmp1040 self.s2 = s2n self.s1 = s1n self.s0 = s0n return [ o3__s, o2__s, o1__s, recovery__s]
envs/grid_world/9x9_illustrative/9x9_illustrative_3.py
class Shield: def __init__(self): self.s2 = 0 self.s1 = 0 self.s0 = 0 def tick(self, inputs): sensor_right = inputs[0] sensor_down = inputs[1] sensor_left = inputs[2] sensor_up = inputs[3] b = inputs[4] o3_1 = inputs[5] o2_1 = inputs[6] o1_1 = inputs[7] o3_2 = inputs[8] o2_2 = inputs[9] o1_2 = inputs[10] o3_3 = inputs[11] o2_3 = inputs[12] o1_3 = inputs[13] s2 = self.s2 s1 = self.s1 s0 = self.s0 tmp11 = (1 - (1 if o1_3 else 0)) tmp10 = (1 if o3_3 else tmp11) tmp12 = (1 - (1 if o1_2 else 0)) tmp9 = (tmp10 if o3_2 else tmp12) tmp13 = (1 - (1 if o1_1 else 0)) tmp8 = (tmp9 if o3_1 else tmp13) tmp7 = (tmp8 if sensor_up else 0) tmp19 = (1 if o1_3 else 0) tmp18 = (1 - (tmp19 if o2_3 else 0)) tmp17 = (1 if o3_3 else tmp18) tmp22 = (1 - (1 if o3_3 else tmp18)) tmp21 = (1 if o1_2 else tmp22) tmp23 = (1 - (tmp17 if o1_2 else 1)) tmp20 = (1 - (tmp21 if o2_2 else tmp23)) tmp16 = (tmp17 if o3_2 else tmp20) tmp26 = (1 - (tmp17 if o3_2 else tmp20)) tmp25 = (1 if o1_1 else tmp26) tmp27 = (1 - (tmp16 if o1_1 else 1)) tmp24 = (1 - (tmp25 if o2_1 else tmp27)) tmp15 = (tmp16 if o3_1 else tmp24) tmp14 = (1 if sensor_up else tmp15) tmp6 = (tmp7 if sensor_right else tmp14) tmp33 = (1 if o1_2 else 0) tmp32 = (1 - (tmp33 if o2_2 else tmp22)) tmp31 = (tmp17 if o3_2 else tmp32) tmp35 = (1 if o1_1 else 0) tmp36 = (1 - (tmp17 if o3_2 else tmp32)) tmp34 = (1 - (tmp35 if o2_1 else tmp36)) tmp30 = (tmp31 if o3_1 else tmp34) tmp29 = (1 if sensor_up else tmp30) tmp40 = (1 - (tmp33 if o2_2 else tmp23)) tmp39 = (tmp17 if o3_2 else tmp40) tmp42 = (1 - (tmp39 if o1_1 else 1)) tmp41 = (1 - (tmp35 if o2_1 else tmp42)) tmp38 = (tmp39 if o3_1 else tmp41) tmp37 = (1 if sensor_up else tmp38) tmp28 = (tmp29 if sensor_right else tmp37) tmp5 = (tmp6 if sensor_left else tmp28) tmp49 = (1 if o2_3 else tmp11) tmp48 = (1 if o3_3 else tmp49) tmp50 = (tmp48 if o2_2 else tmp12) tmp47 = (tmp48 if o3_2 else tmp50) tmp51 = (tmp47 if o2_1 else tmp13) tmp46 = (tmp47 if o3_1 else tmp51) tmp56 = (1 - (1 if o3_3 else tmp11)) tmp55 = (1 if o1_2 else tmp56) tmp54 = (1 - (tmp55 if o2_2 else tmp33)) tmp53 = (tmp10 if o3_2 else tmp54) tmp59 = (1 - (tmp10 if o3_2 else tmp54)) tmp58 = (1 if o1_1 else tmp59) tmp57 = (1 - (tmp58 if o2_1 else tmp35)) tmp52 = (tmp53 if o3_1 else tmp57) tmp45 = (1 - (tmp46 if sensor_up else tmp52)) tmp44 = (1 if sensor_right else tmp45) tmp65 = (tmp48 if o1_2 else 1) tmp67 = (1 - (1 if o3_3 else tmp49)) tmp66 = (1 - (1 if o1_2 else tmp67)) tmp64 = (tmp65 if o2_2 else tmp66) tmp63 = (tmp48 if o3_2 else tmp64) tmp69 = (tmp63 if o1_1 else 1) tmp71 = (1 - (tmp48 if o3_2 else tmp64)) tmp70 = (1 - (1 if o1_1 else tmp71)) tmp68 = (tmp69 if o2_1 else tmp70) tmp62 = (tmp63 if o3_1 else tmp68) tmp74 = (1 - (tmp33 if o2_2 else tmp55)) tmp73 = (tmp10 if o3_2 else tmp74) tmp77 = (1 - (tmp10 if o3_2 else tmp74)) tmp76 = (1 if o1_1 else tmp77) tmp75 = (1 - (tmp35 if o2_1 else tmp76)) tmp72 = (tmp73 if o3_1 else tmp75) tmp61 = (tmp62 if sensor_up else tmp72) tmp81 = (tmp65 if o2_2 else tmp12) tmp80 = (tmp48 if o3_2 else tmp81) tmp83 = (tmp80 if o1_1 else 1) tmp82 = (tmp83 if o2_1 else tmp13) tmp79 = (tmp80 if o3_1 else tmp82) tmp78 = (tmp79 if sensor_up else tmp8) tmp60 = (1 - (tmp61 if sensor_right else tmp78)) tmp43 = (1 - (tmp44 if sensor_left else tmp60)) tmp4 = (tmp5 if sensor_down else tmp43) tmp92 = (tmp10 if o1_2 else 1) tmp91 = (tmp10 if o2_2 else tmp92) tmp90 = (tmp91 if o3_2 else tmp12) tmp93 = (tmp90 if o1_1 else 1) tmp89 = (tmp90 if o2_1 else tmp93) tmp88 = (tmp89 if o3_1 else tmp13) tmp98 = (tmp17 if o1_2 else 1) tmp97 = (tmp17 if o2_2 else tmp98) tmp99 = (1 - (tmp21 if o2_2 else tmp22)) tmp96 = (tmp97 if o3_2 else tmp99) tmp100 = (tmp96 if o1_1 else 1) tmp95 = (tmp96 if o2_1 else tmp100) tmp103 = (1 - (tmp97 if o3_2 else tmp99)) tmp102 = (1 if o1_1 else tmp103) tmp101 = (1 - (tmp102 if o2_1 else tmp103)) tmp94 = (tmp95 if o3_1 else tmp101) tmp87 = (tmp88 if sensor_up else tmp94) tmp107 = (tmp97 if o3_2 else tmp20) tmp108 = (tmp107 if o1_1 else 1) tmp106 = (tmp107 if o2_1 else tmp108) tmp111 = (1 - (tmp97 if o3_2 else tmp20)) tmp110 = (1 if o1_1 else tmp111) tmp112 = (1 - (tmp107 if o1_1 else 1)) tmp109 = (1 - (tmp110 if o2_1 else tmp112)) tmp105 = (tmp106 if o3_1 else tmp109) tmp104 = (1 if sensor_up else tmp105) tmp86 = (tmp87 if sensor_right else tmp104) tmp117 = (tmp97 if o3_2 else tmp32) tmp118 = (tmp117 if o1_1 else 1) tmp116 = (tmp117 if o2_1 else tmp118) tmp120 = (1 - (tmp97 if o3_2 else tmp32)) tmp119 = (1 - (tmp35 if o2_1 else tmp120)) tmp115 = (tmp116 if o3_1 else tmp119) tmp114 = (1 if sensor_up else tmp115) tmp124 = (tmp97 if o3_2 else tmp40) tmp125 = (tmp124 if o1_1 else 1) tmp123 = (tmp124 if o2_1 else tmp125) tmp127 = (1 - (tmp124 if o1_1 else 1)) tmp126 = (1 - (tmp35 if o2_1 else tmp127)) tmp122 = (tmp123 if o3_1 else tmp126) tmp121 = (1 if sensor_up else tmp122) tmp113 = (tmp114 if sensor_right else tmp121) tmp85 = (tmp86 if sensor_left else tmp113) tmp134 = (tmp48 if o2_2 else tmp65) tmp135 = (tmp48 if o2_2 else tmp66) tmp133 = (tmp134 if o3_2 else tmp135) tmp136 = (tmp133 if o1_1 else 1) tmp132 = (tmp133 if o2_1 else tmp136) tmp139 = (1 - (tmp134 if o3_2 else tmp135)) tmp138 = (1 - (1 if o1_1 else tmp139)) tmp137 = (tmp133 if o2_1 else tmp138) tmp131 = (tmp132 if o3_1 else tmp137) tmp143 = (1 - (1 if o1_2 else tmp56)) tmp142 = (tmp91 if o3_2 else tmp143) tmp144 = (tmp142 if o1_1 else 1) tmp141 = (tmp142 if o2_1 else tmp144) tmp146 = (1 - (tmp91 if o3_2 else tmp143)) tmp145 = (1 - (1 if o1_1 else tmp146)) tmp140 = (tmp141 if o3_1 else tmp145) tmp130 = (tmp131 if sensor_up else tmp140) tmp150 = (tmp134 if o3_2 else tmp50) tmp151 = (tmp150 if o1_1 else 1) tmp149 = (tmp150 if o2_1 else tmp151) tmp152 = (tmp150 if o2_1 else tmp13) tmp148 = (tmp149 if o3_1 else tmp152) tmp155 = (tmp91 if o3_2 else tmp54) tmp156 = (tmp155 if o1_1 else 1) tmp154 = (tmp155 if o2_1 else tmp156) tmp159 = (1 - (tmp91 if o3_2 else tmp54)) tmp158 = (1 if o1_1 else tmp159) tmp157 = (1 - (tmp158 if o2_1 else tmp35)) tmp153 = (tmp154 if o3_1 else tmp157) tmp147 = (tmp148 if sensor_up else tmp153) tmp129 = (tmp130 if sensor_right else tmp147) tmp164 = (tmp134 if o3_2 else tmp64) tmp165 = (tmp164 if o1_1 else 1) tmp163 = (tmp164 if o2_1 else tmp165) tmp168 = (1 - (tmp134 if o3_2 else tmp64)) tmp167 = (1 - (1 if o1_1 else tmp168)) tmp166 = (tmp165 if o2_1 else tmp167) tmp162 = (tmp163 if o3_1 else tmp166) tmp171 = (tmp91 if o3_2 else tmp74) tmp172 = (tmp171 if o1_1 else 1) tmp170 = (tmp171 if o2_1 else tmp172) tmp175 = (1 - (tmp91 if o3_2 else tmp74)) tmp174 = (1 if o1_1 else tmp175) tmp173 = (1 - (tmp35 if o2_1 else tmp174)) tmp169 = (tmp170 if o3_1 else tmp173) tmp161 = (tmp162 if sensor_up else tmp169) tmp179 = (tmp134 if o3_2 else tmp81) tmp180 = (tmp179 if o1_1 else 1) tmp178 = (tmp179 if o2_1 else tmp180) tmp181 = (tmp180 if o2_1 else tmp13) tmp177 = (tmp178 if o3_1 else tmp181) tmp176 = (tmp177 if sensor_up else tmp88) tmp160 = (tmp161 if sensor_right else tmp176) tmp128 = (tmp129 if sensor_left else tmp160) tmp84 = (tmp85 if sensor_down else tmp128) tmp3 = (tmp4 if b else tmp84) tmp2 = (1 if s0 else tmp3) tmp1 = (1 - (tmp2 if s1 else tmp84)) o1__s = tmp1 tmp192 = (1 - (1 if o2_3 else 0)) tmp191 = (1 if o3_3 else tmp192) tmp193 = (1 - (1 if o2_2 else 0)) tmp190 = (tmp191 if o3_2 else tmp193) tmp194 = (1 - (1 if o2_1 else 0)) tmp189 = (tmp190 if o3_1 else tmp194) tmp188 = (tmp189 if sensor_up else 0) tmp200 = (1 if o1_3 else 0) tmp199 = (1 - (tmp200 if o2_3 else 0)) tmp198 = (1 if o3_3 else tmp199) tmp203 = (1 - (1 if o3_3 else tmp199)) tmp202 = (1 if o1_2 else tmp203) tmp204 = (1 - (tmp198 if o1_2 else 1)) tmp201 = (1 - (tmp202 if o2_2 else tmp204)) tmp197 = (tmp198 if o3_2 else tmp201) tmp207 = (1 - (tmp198 if o3_2 else tmp201)) tmp206 = (1 if o1_1 else tmp207) tmp208 = (1 - (tmp197 if o1_1 else 1)) tmp205 = (1 - (tmp206 if o2_1 else tmp208)) tmp196 = (tmp197 if o3_1 else tmp205) tmp195 = (1 if sensor_up else tmp196) tmp187 = (tmp188 if sensor_right else tmp195) tmp214 = (tmp200 if o2_3 else 1) tmp213 = (1 if o3_3 else tmp214) tmp216 = (tmp213 if o1_2 else 0) tmp217 = (tmp213 if o1_2 else 1) tmp215 = (tmp216 if o2_2 else tmp217) tmp212 = (tmp213 if o3_2 else tmp215) tmp219 = (tmp212 if o1_1 else 0) tmp220 = (tmp212 if o1_1 else 1) tmp218 = (tmp219 if o2_1 else tmp220) tmp211 = (tmp212 if o3_1 else tmp218) tmp224 = (1 - (tmp191 if o1_2 else 1)) tmp223 = (1 - (1 if o2_2 else tmp224)) tmp222 = (tmp191 if o3_2 else tmp223) tmp226 = (1 - (tmp222 if o1_1 else 1)) tmp225 = (1 - (1 if o2_1 else tmp226)) tmp221 = (tmp222 if o3_1 else tmp225) tmp210 = (1 - (tmp211 if sensor_up else tmp221)) tmp209 = (1 - (1 if sensor_right else tmp210)) tmp186 = (tmp187 if sensor_left else tmp209) tmp233 = (1 - (1 if o1_2 else tmp198)) tmp232 = (1 - (tmp202 if o2_2 else tmp233)) tmp231 = (tmp198 if o3_2 else tmp232) tmp236 = (1 - (tmp198 if o3_2 else tmp232)) tmp235 = (1 if o1_1 else tmp236) tmp237 = (1 - (1 if o1_1 else tmp231)) tmp234 = (1 - (tmp235 if o2_1 else tmp237)) tmp230 = (tmp231 if o3_1 else tmp234) tmp229 = (1 if sensor_up else tmp230) tmp241 = (1 - (tmp202 if o2_2 else 0)) tmp240 = (tmp198 if o3_2 else tmp241) tmp244 = (1 - (tmp198 if o3_2 else tmp241)) tmp243 = (1 if o1_1 else tmp244) tmp242 = (1 - (tmp243 if o2_1 else 0)) tmp239 = (tmp240 if o3_1 else tmp242) tmp238 = (1 if sensor_up else tmp239) tmp228 = (tmp229 if sensor_right else tmp238) tmp251 = (1 - (1 if o1_3 else 0)) tmp250 = (1 if o2_3 else tmp251) tmp249 = (1 if o3_3 else tmp250) tmp253 = (tmp249 if o1_2 else 1) tmp255 = (1 - (1 if o3_3 else tmp250)) tmp254 = (1 - (1 if o1_2 else tmp255)) tmp252 = (tmp253 if o2_2 else tmp254) tmp248 = (tmp249 if o3_2 else tmp252) tmp257 = (tmp248 if o1_1 else 1) tmp259 = (1 - (tmp249 if o3_2 else tmp252)) tmp258 = (1 - (1 if o1_1 else tmp259)) tmp256 = (tmp257 if o2_1 else tmp258) tmp247 = (tmp248 if o3_1 else tmp256) tmp262 = (1 if o2_2 else tmp254) tmp261 = (tmp249 if o3_2 else tmp262) tmp265 = (1 - (tmp249 if o3_2 else tmp262)) tmp264 = (1 - (1 if o1_1 else tmp265)) tmp263 = (1 if o2_1 else tmp264) tmp260 = (tmp261 if o3_1 else tmp263) tmp246 = (tmp247 if sensor_up else tmp260) tmp269 = (tmp216 if o2_2 else 1) tmp268 = (tmp213 if o3_2 else tmp269) tmp271 = (tmp268 if o1_1 else 0) tmp270 = (tmp271 if o2_1 else 1) tmp267 = (tmp268 if o3_1 else tmp270) tmp266 = (1 - (tmp267 if sensor_up else tmp189)) tmp245 = (1 - (tmp246 if sensor_right else tmp266)) tmp227 = (tmp228 if sensor_left else tmp245) tmp185 = (tmp186 if sensor_down else tmp227) tmp280 = (tmp191 if o1_2 else 1) tmp279 = (tmp191 if o2_2 else tmp280) tmp278 = (tmp279 if o3_2 else tmp193) tmp281 = (tmp278 if o1_1 else 1) tmp277 = (tmp278 if o2_1 else tmp281) tmp276 = (tmp277 if o3_1 else tmp194) tmp286 = (tmp198 if o1_2 else 1) tmp285 = (tmp198 if o2_2 else tmp286) tmp287 = (1 - (tmp202 if o2_2 else tmp203)) tmp284 = (tmp285 if o3_2 else tmp287) tmp288 = (tmp284 if o1_1 else 1) tmp283 = (tmp284 if o2_1 else tmp288) tmp291 = (1 - (tmp285 if o3_2 else tmp287)) tmp290 = (1 if o1_1 else tmp291) tmp289 = (1 - (tmp290 if o2_1 else tmp291)) tmp282 = (tmp283 if o3_1 else tmp289) tmp275 = (tmp276 if sensor_up else tmp282) tmp295 = (tmp285 if o3_2 else tmp201) tmp296 = (tmp295 if o1_1 else 1) tmp294 = (tmp295 if o2_1 else tmp296) tmp299 = (1 - (tmp285 if o3_2 else tmp201)) tmp298 = (1 if o1_1 else tmp299) tmp300 = (1 - (tmp295 if o1_1 else 1)) tmp297 = (1 - (tmp298 if o2_1 else tmp300)) tmp293 = (tmp294 if o3_1 else tmp297) tmp292 = (1 if sensor_up else tmp293) tmp274 = (tmp275 if sensor_right else tmp292) tmp306 = (tmp213 if o2_2 else tmp217) tmp307 = (tmp216 if o2_2 else tmp213) tmp305 = (tmp306 if o3_2 else tmp307) tmp308 = (tmp305 if o1_1 else 1) tmp304 = (tmp305 if o2_1 else tmp308) tmp310 = (tmp305 if o1_1 else 0) tmp309 = (tmp310 if o2_1 else tmp305) tmp303 = (tmp304 if o3_1 else tmp309) tmp315 = (1 - (1 if o3_3 else tmp192)) tmp314 = (1 - (1 if o2_2 else tmp315)) tmp313 = (tmp279 if o3_2 else tmp314) tmp316 = (tmp313 if o1_1 else 1) tmp312 = (tmp313 if o2_1 else tmp316) tmp318 = (1 - (tmp279 if o3_2 else tmp314)) tmp317 = (1 - (1 if o2_1 else tmp318)) tmp311 = (tmp312 if o3_1 else tmp317) tmp302 = (tmp303 if sensor_up else tmp311) tmp322 = (tmp306 if o3_2 else tmp215) tmp323 = (tmp322 if o1_1 else 1) tmp321 = (tmp322 if o2_1 else tmp323) tmp325 = (tmp322 if o1_1 else 0) tmp324 = (tmp325 if o2_1 else tmp323) tmp320 = (tmp321 if o3_1 else tmp324) tmp328 = (tmp279 if o3_2 else tmp223) tmp329 = (tmp328 if o1_1 else 1) tmp327 = (tmp328 if o2_1 else tmp329) tmp331 = (1 - (tmp328 if o1_1 else 1)) tmp330 = (1 - (1 if o2_1 else tmp331)) tmp326 = (tmp327 if o3_1 else tmp330) tmp319 = (tmp320 if sensor_up else tmp326) tmp301 = (tmp302 if sensor_right else tmp319) tmp273 = (tmp274 if sensor_left else tmp301) tmp337 = (tmp285 if o3_2 else tmp232) tmp338 = (tmp337 if o1_1 else 1) tmp336 = (tmp337 if o2_1 else tmp338) tmp341 = (1 - (tmp285 if o3_2 else tmp232)) tmp340 = (1 if o1_1 else tmp341) tmp342 = (1 - (1 if o1_1 else tmp337)) tmp339 = (1 - (tmp340 if o2_1 else tmp342)) tmp335 = (tmp336 if o3_1 else tmp339) tmp334 = (1 if sensor_up else tmp335) tmp346 = (tmp285 if o3_2 else tmp241) tmp347 = (tmp346 if o1_1 else 1) tmp345 = (tmp346 if o2_1 else tmp347) tmp350 = (1 - (tmp285 if o3_2 else tmp241)) tmp349 = (1 if o1_1 else tmp350) tmp348 = (1 - (tmp349 if o2_1 else 0)) tmp344 = (tmp345 if o3_1 else tmp348) tmp343 = (1 if sensor_up else tmp344) tmp333 = (tmp334 if sensor_right else tmp343) tmp357 = (1 if o1_2 else tmp213) tmp356 = (tmp216 if o2_2 else tmp357) tmp355 = (tmp306 if o3_2 else tmp356) tmp358 = (tmp355 if o1_1 else 1) tmp354 = (tmp355 if o2_1 else tmp358) tmp360 = (tmp355 if o1_1 else 0) tmp361 = (1 if o1_1 else tmp355) tmp359 = (tmp360 if o2_1 else tmp361) tmp353 = (tmp354 if o3_1 else tmp359) tmp366 = (1 - (1 if o1_2 else tmp191)) tmp365 = (1 - (1 if o2_2 else tmp366)) tmp364 = (tmp279 if o3_2 else tmp365) tmp367 = (tmp364 if o1_1 else 1) tmp363 = (tmp364 if o2_1 else tmp367) tmp369 = (1 - (1 if o1_1 else tmp364)) tmp368 = (1 - (1 if o2_1 else tmp369)) tmp362 = (tmp363 if o3_1 else tmp368) tmp352 = (tmp353 if sensor_up else tmp362) tmp373 = (tmp306 if o3_2 else tmp269) tmp374 = (tmp373 if o1_1 else 1) tmp372 = (tmp373 if o2_1 else tmp374) tmp376 = (tmp373 if o1_1 else 0) tmp375 = (tmp376 if o2_1 else 1) tmp371 = (tmp372 if o3_1 else tmp375) tmp370 = (tmp371 if sensor_up else tmp276) tmp351 = (tmp352 if sensor_right else tmp370) tmp332 = (tmp333 if sensor_left else tmp351) tmp272 = (tmp273 if sensor_down else tmp332) tmp184 = (tmp185 if b else tmp272) tmp183 = (1 if s0 else tmp184) tmp182 = (1 - (tmp183 if s1 else tmp272)) o2__s = tmp182 tmp387 = (1 if o3_3 else 0) tmp388 = (tmp387 if o1_2 else 1) tmp386 = (tmp387 if o2_2 else tmp388) tmp385 = (tmp386 if o3_2 else 0) tmp389 = (tmp385 if o1_1 else 1) tmp384 = (tmp385 if o2_1 else tmp389) tmp383 = (tmp384 if o3_1 else 0) tmp396 = (1 if o1_3 else 0) tmp395 = (1 - (tmp396 if o2_3 else 0)) tmp394 = (1 if o3_3 else tmp395) tmp397 = (tmp394 if o1_2 else 1) tmp393 = (tmp394 if o2_2 else tmp397) tmp400 = (1 - (1 if o3_3 else tmp395)) tmp399 = (1 if o1_2 else tmp400) tmp398 = (1 - (tmp399 if o2_2 else tmp400)) tmp392 = (tmp393 if o3_2 else tmp398) tmp401 = (tmp392 if o1_1 else 1) tmp391 = (tmp392 if o2_1 else tmp401) tmp404 = (1 - (tmp393 if o3_2 else tmp398)) tmp403 = (1 if o1_1 else tmp404) tmp402 = (1 - (tmp403 if o2_1 else tmp404)) tmp390 = (tmp391 if o3_1 else tmp402) tmp382 = (tmp383 if sensor_up else tmp390) tmp410 = (tmp387 if o1_2 else 0) tmp409 = (tmp387 if o2_2 else tmp410) tmp408 = (tmp386 if o3_2 else tmp409) tmp411 = (tmp408 if o1_1 else 1) tmp407 = (tmp408 if o2_1 else tmp411) tmp413 = (tmp408 if o1_1 else 0) tmp412 = (tmp408 if o2_1 else tmp413) tmp406 = (tmp407 if o3_1 else tmp412) tmp419 = (1 - (1 if o3_3 else 0)) tmp418 = (1 if o1_2 else tmp419) tmp420 = (1 - (tmp387 if o1_2 else 0)) tmp417 = (1 - (tmp418 if o2_2 else tmp420)) tmp416 = (tmp386 if o3_2 else tmp417) tmp421 = (tmp416 if o1_1 else 1) tmp415 = (tmp416 if o2_1 else tmp421) tmp424 = (1 - (tmp386 if o3_2 else tmp417)) tmp423 = (1 if o1_1 else tmp424) tmp425 = (1 - (tmp416 if o1_1 else 0)) tmp422 = (1 - (tmp423 if o2_1 else tmp425)) tmp414 = (tmp415 if o3_1 else tmp422) tmp405 = (tmp406 if sensor_up else tmp414) tmp381 = (tmp382 if sensor_right else tmp405) tmp433 = (tmp396 if o2_3 else 1) tmp432 = (1 if o3_3 else tmp433) tmp434 = (tmp432 if o1_2 else 1) tmp431 = (tmp432 if o2_2 else tmp434) tmp436 = (tmp432 if o1_2 else 0) tmp435 = (tmp436 if o2_2 else tmp432) tmp430 = (tmp431 if o3_2 else tmp435) tmp437 = (tmp430 if o1_1 else 1) tmp429 = (tmp430 if o2_1 else tmp437) tmp439 = (tmp430 if o1_1 else 0) tmp438 = (tmp439 if o2_1 else tmp430) tmp428 = (tmp429 if o3_1 else tmp438) tmp445 = (1 - (1 if o2_3 else 0)) tmp444 = (1 if o3_3 else tmp445) tmp446 = (tmp444 if o1_2 else 1) tmp443 = (tmp444 if o2_2 else tmp446) tmp448 = (1 - (1 if o3_3 else tmp445)) tmp447 = (1 - (1 if o2_2 else tmp448)) tmp442 = (tmp443 if o3_2 else tmp447) tmp449 = (tmp442 if o1_1 else 1) tmp441 = (tmp442 if o2_1 else tmp449) tmp451 = (1 - (tmp443 if o3_2 else tmp447)) tmp450 = (1 - (1 if o2_1 else tmp451)) tmp440 = (tmp441 if o3_1 else tmp450) tmp427 = (tmp428 if sensor_up else tmp440) tmp455 = (tmp386 if o3_2 else tmp410) tmp456 = (tmp455 if o1_1 else 1) tmp454 = (tmp455 if o2_1 else tmp456) tmp457 = (tmp455 if o1_1 else 0) tmp453 = (tmp454 if o3_1 else tmp457) tmp461 = (1 - (1 if o2_2 else tmp420)) tmp460 = (tmp386 if o3_2 else tmp461) tmp462 = (tmp460 if o1_1 else 1) tmp459 = (tmp460 if o2_1 else tmp462) tmp464 = (1 - (tmp460 if o1_1 else 0)) tmp463 = (1 - (1 if o2_1 else tmp464)) tmp458 = (tmp459 if o3_1 else tmp463) tmp452 = (tmp453 if sensor_up else tmp458) tmp426 = (tmp427 if sensor_right else tmp452) tmp380 = (tmp381 if sensor_left else tmp426) tmp474 = (1 - (1 if o1_3 else 0)) tmp473 = (1 if o2_3 else tmp474) tmp472 = (1 if o3_3 else tmp473) tmp475 = (tmp472 if o1_2 else 1) tmp471 = (tmp472 if o2_2 else tmp475) tmp478 = (1 - (1 if o3_3 else tmp473)) tmp477 = (1 - (1 if o1_2 else tmp478)) tmp476 = (tmp472 if o2_2 else tmp477) tmp470 = (tmp471 if o3_2 else tmp476) tmp479 = (tmp470 if o1_1 else 1) tmp469 = (tmp470 if o2_1 else tmp479) tmp482 = (1 - (tmp471 if o3_2 else tmp476)) tmp481 = (1 - (1 if o1_1 else tmp482)) tmp480 = (tmp470 if o2_1 else tmp481) tmp468 = (tmp469 if o3_1 else tmp480) tmp487 = (1 if o3_3 else tmp474) tmp488 = (tmp487 if o1_2 else 1) tmp486 = (tmp487 if o2_2 else tmp488) tmp490 = (1 - (1 if o3_3 else tmp474)) tmp489 = (1 - (1 if o1_2 else tmp490)) tmp485 = (tmp486 if o3_2 else tmp489) tmp491 = (tmp485 if o1_1 else 1) tmp484 = (tmp485 if o2_1 else tmp491) tmp493 = (1 - (tmp486 if o3_2 else tmp489)) tmp492 = (1 - (1 if o1_1 else tmp493)) tmp483 = (tmp484 if o3_1 else tmp492) tmp467 = (tmp468 if sensor_up else tmp483) tmp498 = (tmp387 if o2_2 else 0) tmp497 = (tmp386 if o3_2 else tmp498) tmp499 = (tmp497 if o1_1 else 1) tmp496 = (tmp497 if o2_1 else tmp499) tmp500 = (tmp497 if o2_1 else 0) tmp495 = (tmp496 if o3_1 else tmp500) tmp504 = (1 - (tmp418 if o2_2 else 1)) tmp503 = (tmp386 if o3_2 else tmp504) tmp505 = (tmp503 if o1_1 else 1) tmp502 = (tmp503 if o2_1 else tmp505) tmp508 = (1 - (tmp386 if o3_2 else tmp504)) tmp507 = (1 if o1_1 else tmp508) tmp506 = (1 - (tmp507 if o2_1 else 1)) tmp501 = (tmp502 if o3_1 else tmp506) tmp494 = (tmp495 if sensor_up else tmp501) tmp466 = (tmp467 if sensor_right else tmp494) tmp516 = (tmp396 if o2_3 else tmp474) tmp515 = (1 if o3_3 else tmp516) tmp517 = (tmp515 if o1_2 else 1) tmp514 = (tmp515 if o2_2 else tmp517) tmp519 = (tmp515 if o1_2 else 0) tmp521 = (1 - (1 if o3_3 else tmp516)) tmp520 = (1 - (1 if o1_2 else tmp521)) tmp518 = (tmp519 if o2_2 else tmp520) tmp513 = (tmp514 if o3_2 else tmp518) tmp522 = (tmp513 if o1_1 else 1) tmp512 = (tmp513 if o2_1 else tmp522) tmp524 = (tmp513 if o1_1 else 0) tmp526 = (1 - (tmp514 if o3_2 else tmp518)) tmp525 = (1 - (1 if o1_1 else tmp526)) tmp523 = (tmp524 if o2_1 else tmp525) tmp511 = (tmp512 if o3_1 else tmp523) tmp532 = (1 - (1 if o2_3 else tmp396)) tmp531 = (1 if o3_3 else tmp532) tmp533 = (tmp531 if o1_2 else 1) tmp530 = (tmp531 if o2_2 else tmp533) tmp536 = (1 - (1 if o3_3 else tmp532)) tmp535 = (1 if o1_2 else tmp536) tmp534 = (1 - (1 if o2_2 else tmp535)) tmp529 = (tmp530 if o3_2 else tmp534) tmp537 = (tmp529 if o1_1 else 1) tmp528 = (tmp529 if o2_1 else tmp537) tmp540 = (1 - (tmp530 if o3_2 else tmp534)) tmp539 = (1 if o1_1 else tmp540) tmp538 = (1 - (1 if o2_1 else tmp539)) tmp527 = (tmp528 if o3_1 else tmp538) tmp510 = (tmp511 if sensor_up else tmp527) tmp545 = (tmp410 if o2_2 else 0) tmp544 = (tmp386 if o3_2 else tmp545) tmp546 = (tmp544 if o1_1 else 1) tmp543 = (tmp544 if o2_1 else tmp546) tmp548 = (tmp544 if o1_1 else 0) tmp547 = (tmp548 if o2_1 else 0) tmp542 = (tmp543 if o3_1 else tmp547) tmp541 = (tmp542 if sensor_up else tmp383) tmp509 = (tmp510 if sensor_right else tmp541) tmp465 = (tmp466 if sensor_left else tmp509) tmp379 = (1 - (tmp380 if sensor_down else tmp465)) tmp378 = (1 if b else tmp379) tmp377 = (1 - (tmp378 if s1 else tmp379)) o3__s = tmp377 recovery__s = 0 tmp558 = (1 if o3_3 else 0) tmp557 = (tmp558 if o3_2 else 0) tmp556 = (tmp557 if o3_1 else 0) tmp563 = (1 if o1_3 else 0) tmp562 = (1 - (tmp563 if o2_3 else 0)) tmp561 = (1 if o3_3 else tmp562) tmp566 = (1 - (1 if o3_3 else tmp562)) tmp565 = (1 if o1_2 else tmp566) tmp564 = (1 - (tmp565 if o2_2 else tmp566)) tmp560 = (tmp561 if o3_2 else tmp564) tmp569 = (1 - (tmp561 if o3_2 else tmp564)) tmp568 = (1 if o1_1 else tmp569) tmp567 = (1 - (tmp568 if o2_1 else tmp569)) tmp559 = (tmp560 if o3_1 else tmp567) tmp555 = (tmp556 if sensor_up else tmp559) tmp574 = (1 if o2_3 else tmp563) tmp573 = (1 if o3_3 else tmp574) tmp576 = (tmp573 if o1_2 else 0) tmp575 = (tmp573 if o2_2 else tmp576) tmp572 = (tmp573 if o3_2 else tmp575) tmp578 = (tmp572 if o1_1 else 0) tmp577 = (tmp572 if o2_1 else tmp578) tmp571 = (tmp572 if o3_1 else tmp577) tmp583 = (1 - (1 if o1_3 else 0)) tmp582 = (1 - (tmp563 if o2_3 else tmp583)) tmp581 = (1 if o3_3 else tmp582) tmp586 = (1 - (1 if o3_3 else tmp582)) tmp585 = (1 if o1_2 else tmp586) tmp587 = (1 - (tmp581 if o1_2 else 0)) tmp584 = (1 - (tmp585 if o2_2 else tmp587)) tmp580 = (tmp581 if o3_2 else tmp584) tmp590 = (1 - (tmp581 if o3_2 else tmp584)) tmp589 = (1 if o1_1 else tmp590) tmp591 = (1 - (tmp580 if o1_1 else 0)) tmp588 = (1 - (tmp589 if o2_1 else tmp591)) tmp579 = (tmp580 if o3_1 else tmp588) tmp570 = (tmp571 if sensor_up else tmp579) tmp554 = (tmp555 if sensor_right else tmp570) tmp597 = (tmp563 if o2_3 else 1) tmp596 = (1 if o3_3 else tmp597) tmp599 = (tmp596 if o1_2 else 0) tmp598 = (tmp599 if o2_2 else tmp596) tmp595 = (tmp596 if o3_2 else tmp598) tmp601 = (tmp595 if o1_1 else 0) tmp600 = (tmp601 if o2_1 else tmp595) tmp594 = (tmp595 if o3_1 else tmp600) tmp605 = (1 - (1 if o2_3 else 0)) tmp604 = (1 if o3_3 else tmp605) tmp607 = (1 - (1 if o3_3 else tmp605)) tmp606 = (1 - (1 if o2_2 else tmp607)) tmp603 = (tmp604 if o3_2 else tmp606) tmp609 = (1 - (tmp604 if o3_2 else tmp606)) tmp608 = (1 - (1 if o2_1 else tmp609)) tmp602 = (tmp603 if o3_1 else tmp608) tmp593 = (tmp594 if sensor_up else tmp602) tmp613 = (1 if o3_3 else tmp563) tmp614 = (tmp613 if o1_2 else 0) tmp612 = (tmp613 if o3_2 else tmp614) tmp615 = (tmp612 if o1_1 else 0) tmp611 = (tmp612 if o3_1 else tmp615) tmp619 = (1 - (1 if o2_3 else tmp583)) tmp618 = (1 if o3_3 else tmp619) tmp621 = (1 - (tmp618 if o1_2 else 0)) tmp620 = (1 - (1 if o2_2 else tmp621)) tmp617 = (tmp618 if o3_2 else tmp620) tmp623 = (1 - (tmp617 if o1_1 else 0)) tmp622 = (1 - (1 if o2_1 else tmp623)) tmp616 = (tmp617 if o3_1 else tmp622) tmp610 = (tmp611 if sensor_up else tmp616) tmp592 = (tmp593 if sensor_right else tmp610) tmp553 = (tmp554 if sensor_left else tmp592) tmp630 = (1 if o2_3 else tmp583) tmp629 = (1 if o3_3 else tmp630) tmp633 = (1 - (1 if o3_3 else tmp630)) tmp632 = (1 - (1 if o1_2 else tmp633)) tmp631 = (tmp629 if o2_2 else tmp632) tmp628 = (tmp629 if o3_2 else tmp631) tmp636 = (1 - (tmp629 if o3_2 else tmp631)) tmp635 = (1 - (1 if o1_1 else tmp636)) tmp634 = (tmp628 if o2_1 else tmp635) tmp627 = (tmp628 if o3_1 else tmp634) tmp639 = (1 if o3_3 else tmp583) tmp641 = (1 - (1 if o3_3 else tmp583)) tmp640 = (1 - (1 if o1_2 else tmp641)) tmp638 = (tmp639 if o3_2 else tmp640) tmp643 = (1 - (tmp639 if o3_2 else tmp640)) tmp642 = (1 - (1 if o1_1 else tmp643)) tmp637 = (tmp638 if o3_1 else tmp642) tmp626 = (tmp627 if sensor_up else tmp637) tmp648 = (1 if o2_3 else 0) tmp647 = (1 if o3_3 else tmp648) tmp649 = (tmp647 if o2_2 else 0) tmp646 = (tmp647 if o3_2 else tmp649) tmp650 = (tmp646 if o2_1 else 0) tmp645 = (tmp646 if o3_1 else tmp650) tmp654 = (1 - (tmp563 if o2_3 else 1)) tmp653 = (1 if o3_3 else tmp654) tmp657 = (1 - (1 if o3_3 else tmp654)) tmp656 = (1 if o1_2 else tmp657) tmp655 = (1 - (tmp656 if o2_2 else 1)) tmp652 = (tmp653 if o3_2 else tmp655) tmp660 = (1 - (tmp653 if o3_2 else tmp655)) tmp659 = (1 if o1_1 else tmp660) tmp658 = (1 - (tmp659 if o2_1 else 1)) tmp651 = (tmp652 if o3_1 else tmp658) tmp644 = (tmp645 if sensor_up else tmp651) tmp625 = (tmp626 if sensor_right else tmp644) tmp666 = (tmp563 if o2_3 else tmp583) tmp665 = (1 if o3_3 else tmp666) tmp668 = (tmp665 if o1_2 else 0) tmp670 = (1 - (1 if o3_3 else tmp666)) tmp669 = (1 - (1 if o1_2 else tmp670)) tmp667 = (tmp668 if o2_2 else tmp669) tmp664 = (tmp665 if o3_2 else tmp667) tmp672 = (tmp664 if o1_1 else 0) tmp674 = (1 - (tmp665 if o3_2 else tmp667)) tmp673 = (1 - (1 if o1_1 else tmp674)) tmp671 = (tmp672 if o2_1 else tmp673) tmp663 = (tmp664 if o3_1 else tmp671) tmp678 = (1 - (1 if o2_3 else tmp563)) tmp677 = (1 if o3_3 else tmp678) tmp681 = (1 - (1 if o3_3 else tmp678)) tmp680 = (1 if o1_2 else tmp681) tmp679 = (1 - (1 if o2_2 else tmp680)) tmp676 = (tmp677 if o3_2 else tmp679) tmp684 = (1 - (tmp677 if o3_2 else tmp679)) tmp683 = (1 if o1_1 else tmp684) tmp682 = (1 - (1 if o2_1 else tmp683)) tmp675 = (tmp676 if o3_1 else tmp682) tmp662 = (tmp663 if sensor_up else tmp675) tmp689 = (tmp563 if o2_3 else 0) tmp688 = (1 if o3_3 else tmp689) tmp691 = (tmp688 if o1_2 else 0) tmp690 = (tmp691 if o2_2 else 0) tmp687 = (tmp688 if o3_2 else tmp690) tmp693 = (tmp687 if o1_1 else 0) tmp692 = (tmp693 if o2_1 else 0) tmp686 = (tmp687 if o3_1 else tmp692) tmp685 = (tmp686 if sensor_up else tmp556) tmp661 = (tmp662 if sensor_right else tmp685) tmp624 = (tmp625 if sensor_left else tmp661) tmp552 = (tmp553 if sensor_down else tmp624) tmp702 = (tmp574 if o3_3 else 0) tmp703 = (tmp702 if o1_2 else 0) tmp701 = (tmp702 if o2_2 else tmp703) tmp700 = (tmp701 if o3_2 else 0) tmp704 = (tmp700 if o1_1 else 0) tmp699 = (tmp700 if o2_1 else tmp704) tmp698 = (tmp699 if o3_1 else 0) tmp709 = (tmp574 if o3_3 else tmp562) tmp710 = (tmp709 if o1_2 else 0) tmp708 = (tmp709 if o2_2 else tmp710) tmp713 = (1 - (tmp574 if o3_3 else tmp562)) tmp712 = (1 if o1_2 else tmp713) tmp711 = (1 - (tmp712 if o2_2 else tmp713)) tmp707 = (tmp708 if o3_2 else tmp711) tmp714 = (tmp707 if o1_1 else 0) tmp706 = (tmp707 if o2_1 else tmp714) tmp717 = (1 - (tmp708 if o3_2 else tmp711)) tmp716 = (1 if o1_1 else tmp717) tmp715 = (1 - (tmp716 if o2_1 else tmp717)) tmp705 = (tmp706 if o3_1 else tmp715) tmp697 = (tmp698 if sensor_up else tmp705) tmp721 = (tmp574 if o1_2 else 0) tmp720 = (tmp574 if o2_2 else tmp721) tmp722 = (tmp720 if o1_1 else 0) tmp719 = (tmp720 if o2_1 else tmp722) tmp727 = (tmp574 if o3_3 else tmp582) tmp728 = (tmp727 if o1_2 else 0) tmp726 = (tmp727 if o2_2 else tmp728) tmp731 = (1 - (tmp574 if o3_3 else tmp582)) tmp730 = (1 if o1_2 else tmp731) tmp732 = (1 - (tmp727 if o1_2 else 0)) tmp729 = (1 - (tmp730 if o2_2 else tmp732)) tmp725 = (tmp726 if o3_2 else tmp729) tmp733 = (tmp725 if o1_1 else 0) tmp724 = (tmp725 if o2_1 else tmp733) tmp736 = (1 - (tmp726 if o3_2 else tmp729)) tmp735 = (1 if o1_1 else tmp736) tmp737 = (1 - (tmp725 if o1_1 else 0)) tmp734 = (1 - (tmp735 if o2_1 else tmp737)) tmp723 = (tmp724 if o3_1 else tmp734) tmp718 = (tmp719 if sensor_up else tmp723) tmp696 = (tmp697 if sensor_right else tmp718) tmp744 = (tmp574 if o3_3 else tmp597) tmp745 = (tmp744 if o1_2 else 0) tmp743 = (tmp744 if o2_2 else tmp745) tmp746 = (tmp745 if o2_2 else tmp744) tmp742 = (tmp743 if o3_2 else tmp746) tmp747 = (tmp742 if o1_1 else 0) tmp741 = (tmp742 if o2_1 else tmp747) tmp748 = (tmp747 if o2_1 else tmp742) tmp740 = (tmp741 if o3_1 else tmp748) tmp753 = (tmp574 if o3_3 else tmp605) tmp754 = (tmp753 if o1_2 else 0) tmp752 = (tmp753 if o2_2 else tmp754) tmp756 = (1 - (tmp574 if o3_3 else tmp605)) tmp755 = (1 - (1 if o2_2 else tmp756)) tmp751 = (tmp752 if o3_2 else tmp755) tmp757 = (tmp751 if o1_1 else 0) tmp750 = (tmp751 if o2_1 else tmp757) tmp759 = (1 - (tmp752 if o3_2 else tmp755)) tmp758 = (1 - (1 if o2_1 else tmp759)) tmp749 = (tmp750 if o3_1 else tmp758) tmp739 = (tmp740 if sensor_up else tmp749) tmp765 = (tmp574 if o3_3 else tmp563) tmp766 = (tmp765 if o1_2 else 0) tmp764 = (tmp765 if o2_2 else tmp766) tmp763 = (tmp764 if o3_2 else tmp766) tmp767 = (tmp763 if o1_1 else 0) tmp762 = (tmp763 if o2_1 else tmp767) tmp761 = (tmp762 if o3_1 else tmp767) tmp772 = (tmp574 if o3_3 else tmp619) tmp773 = (tmp772 if o1_2 else 0) tmp771 = (tmp772 if o2_2 else tmp773) tmp775 = (1 - (tmp772 if o1_2 else 0)) tmp774 = (1 - (1 if o2_2 else tmp775)) tmp770 = (tmp771 if o3_2 else tmp774) tmp776 = (tmp770 if o1_1 else 0) tmp769 = (tmp770 if o2_1 else tmp776) tmp778 = (1 - (tmp770 if o1_1 else 0)) tmp777 = (1 - (1 if o2_1 else tmp778)) tmp768 = (tmp769 if o3_1 else tmp777) tmp760 = (tmp761 if sensor_up else tmp768) tmp738 = (tmp739 if sensor_right else tmp760) tmp695 = (tmp696 if sensor_left else tmp738) tmp786 = (tmp574 if o3_3 else tmp630) tmp787 = (tmp786 if o1_2 else 0) tmp785 = (tmp786 if o2_2 else tmp787) tmp790 = (1 - (tmp574 if o3_3 else tmp630)) tmp789 = (1 - (1 if o1_2 else tmp790)) tmp788 = (tmp786 if o2_2 else tmp789) tmp784 = (tmp785 if o3_2 else tmp788) tmp791 = (tmp784 if o1_1 else 0) tmp783 = (tmp784 if o2_1 else tmp791) tmp794 = (1 - (tmp785 if o3_2 else tmp788)) tmp793 = (1 - (1 if o1_1 else tmp794)) tmp792 = (tmp784 if o2_1 else tmp793) tmp782 = (tmp783 if o3_1 else tmp792) tmp799 = (tmp574 if o3_3 else tmp583) tmp800 = (tmp799 if o1_2 else 0) tmp798 = (tmp799 if o2_2 else tmp800) tmp802 = (1 - (tmp574 if o3_3 else tmp583)) tmp801 = (1 - (1 if o1_2 else tmp802)) tmp797 = (tmp798 if o3_2 else tmp801) tmp803 = (tmp797 if o1_1 else 0) tmp796 = (tmp797 if o2_1 else tmp803) tmp805 = (1 - (tmp798 if o3_2 else tmp801)) tmp804 = (1 - (1 if o1_1 else tmp805)) tmp795 = (tmp796 if o3_1 else tmp804) tmp781 = (tmp782 if sensor_up else tmp795) tmp811 = (tmp574 if o3_3 else tmp648) tmp812 = (tmp811 if o1_2 else 0) tmp810 = (tmp811 if o2_2 else tmp812) tmp813 = (tmp811 if o2_2 else 0) tmp809 = (tmp810 if o3_2 else tmp813) tmp814 = (tmp809 if o1_1 else 0) tmp808 = (tmp809 if o2_1 else tmp814) tmp815 = (tmp809 if o2_1 else 0) tmp807 = (tmp808 if o3_1 else tmp815) tmp820 = (tmp574 if o3_3 else tmp654) tmp821 = (tmp820 if o1_2 else 0) tmp819 = (tmp820 if o2_2 else tmp821) tmp824 = (1 - (tmp574 if o3_3 else tmp654)) tmp823 = (1 if o1_2 else tmp824) tmp822 = (1 - (tmp823 if o2_2 else 1)) tmp818 = (tmp819 if o3_2 else tmp822) tmp825 = (tmp818 if o1_1 else 0) tmp817 = (tmp818 if o2_1 else tmp825) tmp828 = (1 - (tmp819 if o3_2 else tmp822)) tmp827 = (1 if o1_1 else tmp828) tmp826 = (1 - (tmp827 if o2_1 else 1)) tmp816 = (tmp817 if o3_1 else tmp826) tmp806 = (tmp807 if sensor_up else tmp816) tmp780 = (tmp781 if sensor_right else tmp806) tmp835 = (tmp574 if o3_3 else tmp666) tmp836 = (tmp835 if o1_2 else 0) tmp834 = (tmp835 if o2_2 else tmp836) tmp839 = (1 - (tmp574 if o3_3 else tmp666)) tmp838 = (1 - (1 if o1_2 else tmp839)) tmp837 = (tmp836 if o2_2 else tmp838) tmp833 = (tmp834 if o3_2 else tmp837) tmp840 = (tmp833 if o1_1 else 0) tmp832 = (tmp833 if o2_1 else tmp840) tmp843 = (1 - (tmp834 if o3_2 else tmp837)) tmp842 = (1 - (1 if o1_1 else tmp843)) tmp841 = (tmp840 if o2_1 else tmp842) tmp831 = (tmp832 if o3_1 else tmp841) tmp848 = (tmp574 if o3_3 else tmp678) tmp849 = (tmp848 if o1_2 else 0) tmp847 = (tmp848 if o2_2 else tmp849) tmp852 = (1 - (tmp574 if o3_3 else tmp678)) tmp851 = (1 if o1_2 else tmp852) tmp850 = (1 - (1 if o2_2 else tmp851)) tmp846 = (tmp847 if o3_2 else tmp850) tmp853 = (tmp846 if o1_1 else 0) tmp845 = (tmp846 if o2_1 else tmp853) tmp856 = (1 - (tmp847 if o3_2 else tmp850)) tmp855 = (1 if o1_1 else tmp856) tmp854 = (1 - (1 if o2_1 else tmp855)) tmp844 = (tmp845 if o3_1 else tmp854) tmp830 = (tmp831 if sensor_up else tmp844) tmp862 = (tmp574 if o3_3 else tmp689) tmp863 = (tmp862 if o1_2 else 0) tmp861 = (tmp862 if o2_2 else tmp863) tmp864 = (tmp863 if o2_2 else 0) tmp860 = (tmp861 if o3_2 else tmp864) tmp865 = (tmp860 if o1_1 else 0) tmp859 = (tmp860 if o2_1 else tmp865) tmp866 = (tmp865 if o2_1 else 0) tmp858 = (tmp859 if o3_1 else tmp866) tmp857 = (tmp858 if sensor_up else tmp698) tmp829 = (tmp830 if sensor_right else tmp857) tmp779 = (tmp780 if sensor_left else tmp829) tmp694 = (tmp695 if sensor_down else tmp779) tmp551 = (tmp552 if b else tmp694) tmp550 = (1 - (tmp551 if s1 else tmp694)) tmp549 = (1 - (1 if s2 else tmp550)) s2n = tmp549 tmp878 = (1 if o3_3 else 0) tmp879 = (tmp878 if o1_2 else 1) tmp877 = (tmp878 if o2_2 else tmp879) tmp876 = (tmp877 if o3_2 else 0) tmp880 = (tmp876 if o1_1 else 1) tmp875 = (tmp876 if o2_1 else tmp880) tmp874 = (tmp875 if o3_1 else 0) tmp887 = (1 if o1_3 else 0) tmp886 = (1 - (tmp887 if o2_3 else 0)) tmp885 = (1 if o3_3 else tmp886) tmp888 = (tmp885 if o1_2 else 1) tmp884 = (tmp885 if o2_2 else tmp888) tmp891 = (1 - (1 if o3_3 else tmp886)) tmp890 = (1 if o1_2 else tmp891) tmp889 = (1 - (tmp890 if o2_2 else tmp891)) tmp883 = (tmp884 if o3_2 else tmp889) tmp892 = (tmp883 if o1_1 else 1) tmp882 = (tmp883 if o2_1 else tmp892) tmp895 = (1 - (tmp884 if o3_2 else tmp889)) tmp894 = (1 if o1_1 else tmp895) tmp893 = (1 - (tmp894 if o2_1 else tmp895)) tmp881 = (tmp882 if o3_1 else tmp893) tmp873 = (tmp874 if sensor_up else tmp881) tmp901 = (tmp878 if o1_2 else 0) tmp900 = (tmp878 if o2_2 else tmp901) tmp899 = (tmp877 if o3_2 else tmp900) tmp902 = (tmp899 if o1_1 else 1) tmp898 = (tmp899 if o2_1 else tmp902) tmp904 = (tmp899 if o1_1 else 0) tmp903 = (tmp899 if o2_1 else tmp904) tmp897 = (tmp898 if o3_1 else tmp903) tmp910 = (1 - (1 if o3_3 else 0)) tmp909 = (1 if o1_2 else tmp910) tmp911 = (1 - (tmp878 if o1_2 else 0)) tmp908 = (1 - (tmp909 if o2_2 else tmp911)) tmp907 = (tmp877 if o3_2 else tmp908) tmp912 = (tmp907 if o1_1 else 1) tmp906 = (tmp907 if o2_1 else tmp912) tmp915 = (1 - (tmp877 if o3_2 else tmp908)) tmp914 = (1 if o1_1 else tmp915) tmp916 = (1 - (tmp907 if o1_1 else 0)) tmp913 = (1 - (tmp914 if o2_1 else tmp916)) tmp905 = (tmp906 if o3_1 else tmp913) tmp896 = (tmp897 if sensor_up else tmp905) tmp872 = (tmp873 if sensor_right else tmp896) tmp924 = (tmp887 if o2_3 else 1) tmp923 = (1 if o3_3 else tmp924) tmp925 = (tmp923 if o1_2 else 1) tmp922 = (tmp923 if o2_2 else tmp925) tmp927 = (tmp923 if o1_2 else 0) tmp926 = (tmp927 if o2_2 else tmp923) tmp921 = (tmp922 if o3_2 else tmp926) tmp928 = (tmp921 if o1_1 else 1) tmp920 = (tmp921 if o2_1 else tmp928) tmp930 = (tmp921 if o1_1 else 0) tmp929 = (tmp930 if o2_1 else tmp921) tmp919 = (tmp920 if o3_1 else tmp929) tmp936 = (1 - (1 if o2_3 else 0)) tmp935 = (1 if o3_3 else tmp936) tmp937 = (tmp935 if o1_2 else 1) tmp934 = (tmp935 if o2_2 else tmp937) tmp939 = (1 - (1 if o3_3 else tmp936)) tmp938 = (1 - (1 if o2_2 else tmp939)) tmp933 = (tmp934 if o3_2 else tmp938) tmp940 = (tmp933 if o1_1 else 1) tmp932 = (tmp933 if o2_1 else tmp940) tmp942 = (1 - (tmp934 if o3_2 else tmp938)) tmp941 = (1 - (1 if o2_1 else tmp942)) tmp931 = (tmp932 if o3_1 else tmp941) tmp918 = (tmp919 if sensor_up else tmp931) tmp946 = (tmp877 if o3_2 else tmp901) tmp947 = (tmp946 if o1_1 else 1) tmp945 = (tmp946 if o2_1 else tmp947) tmp948 = (tmp946 if o1_1 else 0) tmp944 = (tmp945 if o3_1 else tmp948) tmp952 = (1 - (1 if o2_2 else tmp911)) tmp951 = (tmp877 if o3_2 else tmp952) tmp953 = (tmp951 if o1_1 else 1) tmp950 = (tmp951 if o2_1 else tmp953) tmp955 = (1 - (tmp951 if o1_1 else 0)) tmp954 = (1 - (1 if o2_1 else tmp955)) tmp949 = (tmp950 if o3_1 else tmp954) tmp943 = (tmp944 if sensor_up else tmp949) tmp917 = (tmp918 if sensor_right else tmp943) tmp871 = (tmp872 if sensor_left else tmp917) tmp965 = (1 - (1 if o1_3 else 0)) tmp964 = (1 if o2_3 else tmp965) tmp963 = (1 if o3_3 else tmp964) tmp966 = (tmp963 if o1_2 else 1) tmp962 = (tmp963 if o2_2 else tmp966) tmp969 = (1 - (1 if o3_3 else tmp964)) tmp968 = (1 - (1 if o1_2 else tmp969)) tmp967 = (tmp963 if o2_2 else tmp968) tmp961 = (tmp962 if o3_2 else tmp967) tmp970 = (tmp961 if o1_1 else 1) tmp960 = (tmp961 if o2_1 else tmp970) tmp973 = (1 - (tmp962 if o3_2 else tmp967)) tmp972 = (1 - (1 if o1_1 else tmp973)) tmp971 = (tmp961 if o2_1 else tmp972) tmp959 = (tmp960 if o3_1 else tmp971) tmp978 = (1 if o3_3 else tmp965) tmp979 = (tmp978 if o1_2 else 1) tmp977 = (tmp978 if o2_2 else tmp979) tmp981 = (1 - (1 if o3_3 else tmp965)) tmp980 = (1 - (1 if o1_2 else tmp981)) tmp976 = (tmp977 if o3_2 else tmp980) tmp982 = (tmp976 if o1_1 else 1) tmp975 = (tmp976 if o2_1 else tmp982) tmp984 = (1 - (tmp977 if o3_2 else tmp980)) tmp983 = (1 - (1 if o1_1 else tmp984)) tmp974 = (tmp975 if o3_1 else tmp983) tmp958 = (tmp959 if sensor_up else tmp974) tmp989 = (tmp878 if o2_2 else 0) tmp988 = (tmp877 if o3_2 else tmp989) tmp990 = (tmp988 if o1_1 else 1) tmp987 = (tmp988 if o2_1 else tmp990) tmp991 = (tmp988 if o2_1 else 0) tmp986 = (tmp987 if o3_1 else tmp991) tmp995 = (1 - (tmp909 if o2_2 else 1)) tmp994 = (tmp877 if o3_2 else tmp995) tmp996 = (tmp994 if o1_1 else 1) tmp993 = (tmp994 if o2_1 else tmp996) tmp999 = (1 - (tmp877 if o3_2 else tmp995)) tmp998 = (1 if o1_1 else tmp999) tmp997 = (1 - (tmp998 if o2_1 else 1)) tmp992 = (tmp993 if o3_1 else tmp997) tmp985 = (tmp986 if sensor_up else tmp992) tmp957 = (tmp958 if sensor_right else tmp985) tmp1007 = (tmp887 if o2_3 else tmp965) tmp1006 = (1 if o3_3 else tmp1007) tmp1008 = (tmp1006 if o1_2 else 1) tmp1005 = (tmp1006 if o2_2 else tmp1008) tmp1010 = (tmp1006 if o1_2 else 0) tmp1012 = (1 - (1 if o3_3 else tmp1007)) tmp1011 = (1 - (1 if o1_2 else tmp1012)) tmp1009 = (tmp1010 if o2_2 else tmp1011) tmp1004 = (tmp1005 if o3_2 else tmp1009) tmp1013 = (tmp1004 if o1_1 else 1) tmp1003 = (tmp1004 if o2_1 else tmp1013) tmp1015 = (tmp1004 if o1_1 else 0) tmp1017 = (1 - (tmp1005 if o3_2 else tmp1009)) tmp1016 = (1 - (1 if o1_1 else tmp1017)) tmp1014 = (tmp1015 if o2_1 else tmp1016) tmp1002 = (tmp1003 if o3_1 else tmp1014) tmp1023 = (1 - (1 if o2_3 else tmp887)) tmp1022 = (1 if o3_3 else tmp1023) tmp1024 = (tmp1022 if o1_2 else 1) tmp1021 = (tmp1022 if o2_2 else tmp1024) tmp1027 = (1 - (1 if o3_3 else tmp1023)) tmp1026 = (1 if o1_2 else tmp1027) tmp1025 = (1 - (1 if o2_2 else tmp1026)) tmp1020 = (tmp1021 if o3_2 else tmp1025) tmp1028 = (tmp1020 if o1_1 else 1) tmp1019 = (tmp1020 if o2_1 else tmp1028) tmp1031 = (1 - (tmp1021 if o3_2 else tmp1025)) tmp1030 = (1 if o1_1 else tmp1031) tmp1029 = (1 - (1 if o2_1 else tmp1030)) tmp1018 = (tmp1019 if o3_1 else tmp1029) tmp1001 = (tmp1002 if sensor_up else tmp1018) tmp1036 = (tmp901 if o2_2 else 0) tmp1035 = (tmp877 if o3_2 else tmp1036) tmp1037 = (tmp1035 if o1_1 else 1) tmp1034 = (tmp1035 if o2_1 else tmp1037) tmp1039 = (tmp1035 if o1_1 else 0) tmp1038 = (tmp1039 if o2_1 else 0) tmp1033 = (tmp1034 if o3_1 else tmp1038) tmp1032 = (tmp1033 if sensor_up else tmp874) tmp1000 = (tmp1001 if sensor_right else tmp1032) tmp956 = (tmp957 if sensor_left else tmp1000) tmp870 = (tmp871 if sensor_down else tmp956) tmp869 = (tmp870 if b else 0) tmp868 = (1 - (tmp869 if s0 else 0)) tmp867 = (1 - (1 if s1 else tmp868)) s1n = tmp867 tmp1051 = (1 if o3_3 else 0) tmp1052 = (tmp1051 if o1_2 else 1) tmp1050 = (tmp1051 if o2_2 else tmp1052) tmp1049 = (tmp1050 if o3_2 else 0) tmp1053 = (tmp1049 if o1_1 else 1) tmp1048 = (tmp1049 if o2_1 else tmp1053) tmp1047 = (tmp1048 if o3_1 else 0) tmp1060 = (1 if o1_3 else 0) tmp1059 = (1 - (tmp1060 if o2_3 else 0)) tmp1058 = (1 if o3_3 else tmp1059) tmp1061 = (tmp1058 if o1_2 else 1) tmp1057 = (tmp1058 if o2_2 else tmp1061) tmp1064 = (1 - (1 if o3_3 else tmp1059)) tmp1063 = (1 if o1_2 else tmp1064) tmp1062 = (1 - (tmp1063 if o2_2 else tmp1064)) tmp1056 = (tmp1057 if o3_2 else tmp1062) tmp1065 = (tmp1056 if o1_1 else 1) tmp1055 = (tmp1056 if o2_1 else tmp1065) tmp1068 = (1 - (tmp1057 if o3_2 else tmp1062)) tmp1067 = (1 if o1_1 else tmp1068) tmp1066 = (1 - (tmp1067 if o2_1 else tmp1068)) tmp1054 = (tmp1055 if o3_1 else tmp1066) tmp1046 = (tmp1047 if sensor_up else tmp1054) tmp1074 = (tmp1051 if o1_2 else 0) tmp1073 = (tmp1051 if o2_2 else tmp1074) tmp1072 = (tmp1050 if o3_2 else tmp1073) tmp1075 = (tmp1072 if o1_1 else 1) tmp1071 = (tmp1072 if o2_1 else tmp1075) tmp1077 = (tmp1072 if o1_1 else 0) tmp1076 = (tmp1072 if o2_1 else tmp1077) tmp1070 = (tmp1071 if o3_1 else tmp1076) tmp1083 = (1 - (1 if o3_3 else 0)) tmp1082 = (1 if o1_2 else tmp1083) tmp1084 = (1 - (tmp1051 if o1_2 else 0)) tmp1081 = (1 - (tmp1082 if o2_2 else tmp1084)) tmp1080 = (tmp1050 if o3_2 else tmp1081) tmp1085 = (tmp1080 if o1_1 else 1) tmp1079 = (tmp1080 if o2_1 else tmp1085) tmp1088 = (1 - (tmp1050 if o3_2 else tmp1081)) tmp1087 = (1 if o1_1 else tmp1088) tmp1089 = (1 - (tmp1080 if o1_1 else 0)) tmp1086 = (1 - (tmp1087 if o2_1 else tmp1089)) tmp1078 = (tmp1079 if o3_1 else tmp1086) tmp1069 = (tmp1070 if sensor_up else tmp1078) tmp1045 = (tmp1046 if sensor_right else tmp1069) tmp1097 = (tmp1060 if o2_3 else 1) tmp1096 = (1 if o3_3 else tmp1097) tmp1098 = (tmp1096 if o1_2 else 1) tmp1095 = (tmp1096 if o2_2 else tmp1098) tmp1100 = (tmp1096 if o1_2 else 0) tmp1099 = (tmp1100 if o2_2 else tmp1096) tmp1094 = (tmp1095 if o3_2 else tmp1099) tmp1101 = (tmp1094 if o1_1 else 1) tmp1093 = (tmp1094 if o2_1 else tmp1101) tmp1103 = (tmp1094 if o1_1 else 0) tmp1102 = (tmp1103 if o2_1 else tmp1094) tmp1092 = (tmp1093 if o3_1 else tmp1102) tmp1109 = (1 - (1 if o2_3 else 0)) tmp1108 = (1 if o3_3 else tmp1109) tmp1110 = (tmp1108 if o1_2 else 1) tmp1107 = (tmp1108 if o2_2 else tmp1110) tmp1112 = (1 - (1 if o3_3 else tmp1109)) tmp1111 = (1 - (1 if o2_2 else tmp1112)) tmp1106 = (tmp1107 if o3_2 else tmp1111) tmp1113 = (tmp1106 if o1_1 else 1) tmp1105 = (tmp1106 if o2_1 else tmp1113) tmp1115 = (1 - (tmp1107 if o3_2 else tmp1111)) tmp1114 = (1 - (1 if o2_1 else tmp1115)) tmp1104 = (tmp1105 if o3_1 else tmp1114) tmp1091 = (tmp1092 if sensor_up else tmp1104) tmp1119 = (tmp1050 if o3_2 else tmp1074) tmp1120 = (tmp1119 if o1_1 else 1) tmp1118 = (tmp1119 if o2_1 else tmp1120) tmp1121 = (tmp1119 if o1_1 else 0) tmp1117 = (tmp1118 if o3_1 else tmp1121) tmp1125 = (1 - (1 if o2_2 else tmp1084)) tmp1124 = (tmp1050 if o3_2 else tmp1125) tmp1126 = (tmp1124 if o1_1 else 1) tmp1123 = (tmp1124 if o2_1 else tmp1126) tmp1128 = (1 - (tmp1124 if o1_1 else 0)) tmp1127 = (1 - (1 if o2_1 else tmp1128)) tmp1122 = (tmp1123 if o3_1 else tmp1127) tmp1116 = (tmp1117 if sensor_up else tmp1122) tmp1090 = (tmp1091 if sensor_right else tmp1116) tmp1044 = (tmp1045 if sensor_left else tmp1090) tmp1138 = (1 - (1 if o1_3 else 0)) tmp1137 = (1 if o2_3 else tmp1138) tmp1136 = (1 if o3_3 else tmp1137) tmp1139 = (tmp1136 if o1_2 else 1) tmp1135 = (tmp1136 if o2_2 else tmp1139) tmp1142 = (1 - (1 if o3_3 else tmp1137)) tmp1141 = (1 - (1 if o1_2 else tmp1142)) tmp1140 = (tmp1136 if o2_2 else tmp1141) tmp1134 = (tmp1135 if o3_2 else tmp1140) tmp1143 = (tmp1134 if o1_1 else 1) tmp1133 = (tmp1134 if o2_1 else tmp1143) tmp1146 = (1 - (tmp1135 if o3_2 else tmp1140)) tmp1145 = (1 - (1 if o1_1 else tmp1146)) tmp1144 = (tmp1134 if o2_1 else tmp1145) tmp1132 = (tmp1133 if o3_1 else tmp1144) tmp1151 = (1 if o3_3 else tmp1138) tmp1152 = (tmp1151 if o1_2 else 1) tmp1150 = (tmp1151 if o2_2 else tmp1152) tmp1154 = (1 - (1 if o3_3 else tmp1138)) tmp1153 = (1 - (1 if o1_2 else tmp1154)) tmp1149 = (tmp1150 if o3_2 else tmp1153) tmp1155 = (tmp1149 if o1_1 else 1) tmp1148 = (tmp1149 if o2_1 else tmp1155) tmp1157 = (1 - (tmp1150 if o3_2 else tmp1153)) tmp1156 = (1 - (1 if o1_1 else tmp1157)) tmp1147 = (tmp1148 if o3_1 else tmp1156) tmp1131 = (tmp1132 if sensor_up else tmp1147) tmp1162 = (tmp1051 if o2_2 else 0) tmp1161 = (tmp1050 if o3_2 else tmp1162) tmp1163 = (tmp1161 if o1_1 else 1) tmp1160 = (tmp1161 if o2_1 else tmp1163) tmp1164 = (tmp1161 if o2_1 else 0) tmp1159 = (tmp1160 if o3_1 else tmp1164) tmp1168 = (1 - (tmp1082 if o2_2 else 1)) tmp1167 = (tmp1050 if o3_2 else tmp1168) tmp1169 = (tmp1167 if o1_1 else 1) tmp1166 = (tmp1167 if o2_1 else tmp1169) tmp1172 = (1 - (tmp1050 if o3_2 else tmp1168)) tmp1171 = (1 if o1_1 else tmp1172) tmp1170 = (1 - (tmp1171 if o2_1 else 1)) tmp1165 = (tmp1166 if o3_1 else tmp1170) tmp1158 = (tmp1159 if sensor_up else tmp1165) tmp1130 = (tmp1131 if sensor_right else tmp1158) tmp1180 = (tmp1060 if o2_3 else tmp1138) tmp1179 = (1 if o3_3 else tmp1180) tmp1181 = (tmp1179 if o1_2 else 1) tmp1178 = (tmp1179 if o2_2 else tmp1181) tmp1183 = (tmp1179 if o1_2 else 0) tmp1185 = (1 - (1 if o3_3 else tmp1180)) tmp1184 = (1 - (1 if o1_2 else tmp1185)) tmp1182 = (tmp1183 if o2_2 else tmp1184) tmp1177 = (tmp1178 if o3_2 else tmp1182) tmp1186 = (tmp1177 if o1_1 else 1) tmp1176 = (tmp1177 if o2_1 else tmp1186) tmp1188 = (tmp1177 if o1_1 else 0) tmp1190 = (1 - (tmp1178 if o3_2 else tmp1182)) tmp1189 = (1 - (1 if o1_1 else tmp1190)) tmp1187 = (tmp1188 if o2_1 else tmp1189) tmp1175 = (tmp1176 if o3_1 else tmp1187) tmp1196 = (1 - (1 if o2_3 else tmp1060)) tmp1195 = (1 if o3_3 else tmp1196) tmp1197 = (tmp1195 if o1_2 else 1) tmp1194 = (tmp1195 if o2_2 else tmp1197) tmp1200 = (1 - (1 if o3_3 else tmp1196)) tmp1199 = (1 if o1_2 else tmp1200) tmp1198 = (1 - (1 if o2_2 else tmp1199)) tmp1193 = (tmp1194 if o3_2 else tmp1198) tmp1201 = (tmp1193 if o1_1 else 1) tmp1192 = (tmp1193 if o2_1 else tmp1201) tmp1204 = (1 - (tmp1194 if o3_2 else tmp1198)) tmp1203 = (1 if o1_1 else tmp1204) tmp1202 = (1 - (1 if o2_1 else tmp1203)) tmp1191 = (tmp1192 if o3_1 else tmp1202) tmp1174 = (tmp1175 if sensor_up else tmp1191) tmp1209 = (tmp1074 if o2_2 else 0) tmp1208 = (tmp1050 if o3_2 else tmp1209) tmp1210 = (tmp1208 if o1_1 else 1) tmp1207 = (tmp1208 if o2_1 else tmp1210) tmp1212 = (tmp1208 if o1_1 else 0) tmp1211 = (tmp1212 if o2_1 else 0) tmp1206 = (tmp1207 if o3_1 else tmp1211) tmp1205 = (tmp1206 if sensor_up else tmp1047) tmp1173 = (tmp1174 if sensor_right else tmp1205) tmp1129 = (tmp1130 if sensor_left else tmp1173) tmp1043 = (tmp1044 if sensor_down else tmp1129) tmp1042 = (1 - (tmp1043 if b else 0)) tmp1041 = (1 if s0 else tmp1042) tmp1040 = (1 - (1 if s1 else tmp1041)) s0n = tmp1040 self.s2 = s2n self.s1 = s1n self.s0 = s0n return [ o3__s, o2__s, o1__s, recovery__s]
0.134321
0.417628
from lib.common import unix2str from lib.logger import log_types, update_types from multiprocessing.connection import Client from StringIO import StringIO from bitcoin.messages import MsgSerializable from getopt import getopt from sys import argv from apps import COINSCOPED_API_ADDR, COINSCOPED_API_PORT commands = ['getpeer', 'getpeers', 'getpeersinfo', 'getpeerhistory', 'getpeercount', 'getlogs', 'getnetlogs', 'help'] def show_usage(): print "usage: python coinscope-cli.py argument [additional_arguments]." \ "\nArguments:" \ "\ngetpeer <peer_ip:peer_port>: \treturns metadata abut the requested peer." \ "\ngetpeers: \treturns the list of all connected peers with their corresponding coinscope id." \ "\ngetpeersinfo: \treturns a list of all connected peers including all known metadata about them." \ "\ngetpeercount: \treturns the total number of peers." \ "\ngetlogs: \treturns a live parsing of the Bitcoin messages logs." \ "\ngetnetlogs: \treturns a live parsing of the network logs." \ "\nhelp: \t\tshows this message." def deserialize_log(log, sock_type): if sock_type == 'bitcoin_msg_log': output = "[{0}] {1}: handle_id: {2}, is_sender: {3}, bitcoin_msg: {4}".format( unix2str(log.timestamp), log_types.str_mapping[log.log_type], log.handle_id, log.is_sender, MsgSerializable.stream_deserialize(StringIO(response.bitcoin_msg))) elif sock_type == 'bitcoin_log': output = "[{0}] {1}: handle: {2} update_type: {3}, remote: {4}:{5}, local: {6}:{7}, text: {8}".format( unix2str(log.timestamp), log_types.str_mapping[log.log_type], log.handle_id, update_types.str_mapping[log.update_type], log.remote_addr, log.remote_port, log.local_addr, log.local_port, log.text) else: raise Exception("Unknown socket type.") return output if __name__ == '__main__': opts, args = getopt(argv[1:], '', commands) # Get args if len(args) > 0: command = args[0] else: raise Exception("Argument missing. Use help for usage information.") if command in ['getpeer', 'getpeerhistory']: if len(args) == 2: arg = args[1] elif command == 'getpeer' and len(args) == 3: arg = args[1:] else: raise Exception("Argument missing. Use help for usage information.") else: arg = None conn = Client((COINSCOPED_API_ADDR, COINSCOPED_API_PORT)) if command in ['help']: show_usage() elif command in ['getpeer', 'getpeers', 'getpeersinfo', 'getpeercount', 'getpeerold', 'getpeerhistory']: if arg: conn.send((command, arg)) else: conn.send(command) response = conn.recv() if command in ['getpeer', 'getpeers', 'getpeercount', 'getpeerold', 'getpeerhistory']: output = response elif command == 'getpeersinfo': output = '{\n' output += ''.join('\t%s:%s\n' % (peer_ip, meta) for peer_ip, meta in response.iteritems()) output += '}' print output conn.close() elif command in ['getlogs', 'getnetlogs']: conn.send(command) while True: response = conn.recv() if command == 'getlogs': try: print deserialize_log(response, 'bitcoin_msg_log') except ValueError: continue elif command == 'getnetlogs': print deserialize_log(response, 'bitcoin_log') elif command == 'getlogerrors': try: deserialize_log(response, 'bitcoin_msg_log') except ValueError: print "[{0}] {1}: handle_id: {2}, is_sender: {3}, error: wrong magic number".format( unix2str(response.timestamp), log_types.str_mapping[response.log_type], response.handle_id, response.is_sender) else: raise Exception("Invalid command. Use help for usage information.")
apps/hyperion-cli.py
from lib.common import unix2str from lib.logger import log_types, update_types from multiprocessing.connection import Client from StringIO import StringIO from bitcoin.messages import MsgSerializable from getopt import getopt from sys import argv from apps import COINSCOPED_API_ADDR, COINSCOPED_API_PORT commands = ['getpeer', 'getpeers', 'getpeersinfo', 'getpeerhistory', 'getpeercount', 'getlogs', 'getnetlogs', 'help'] def show_usage(): print "usage: python coinscope-cli.py argument [additional_arguments]." \ "\nArguments:" \ "\ngetpeer <peer_ip:peer_port>: \treturns metadata abut the requested peer." \ "\ngetpeers: \treturns the list of all connected peers with their corresponding coinscope id." \ "\ngetpeersinfo: \treturns a list of all connected peers including all known metadata about them." \ "\ngetpeercount: \treturns the total number of peers." \ "\ngetlogs: \treturns a live parsing of the Bitcoin messages logs." \ "\ngetnetlogs: \treturns a live parsing of the network logs." \ "\nhelp: \t\tshows this message." def deserialize_log(log, sock_type): if sock_type == 'bitcoin_msg_log': output = "[{0}] {1}: handle_id: {2}, is_sender: {3}, bitcoin_msg: {4}".format( unix2str(log.timestamp), log_types.str_mapping[log.log_type], log.handle_id, log.is_sender, MsgSerializable.stream_deserialize(StringIO(response.bitcoin_msg))) elif sock_type == 'bitcoin_log': output = "[{0}] {1}: handle: {2} update_type: {3}, remote: {4}:{5}, local: {6}:{7}, text: {8}".format( unix2str(log.timestamp), log_types.str_mapping[log.log_type], log.handle_id, update_types.str_mapping[log.update_type], log.remote_addr, log.remote_port, log.local_addr, log.local_port, log.text) else: raise Exception("Unknown socket type.") return output if __name__ == '__main__': opts, args = getopt(argv[1:], '', commands) # Get args if len(args) > 0: command = args[0] else: raise Exception("Argument missing. Use help for usage information.") if command in ['getpeer', 'getpeerhistory']: if len(args) == 2: arg = args[1] elif command == 'getpeer' and len(args) == 3: arg = args[1:] else: raise Exception("Argument missing. Use help for usage information.") else: arg = None conn = Client((COINSCOPED_API_ADDR, COINSCOPED_API_PORT)) if command in ['help']: show_usage() elif command in ['getpeer', 'getpeers', 'getpeersinfo', 'getpeercount', 'getpeerold', 'getpeerhistory']: if arg: conn.send((command, arg)) else: conn.send(command) response = conn.recv() if command in ['getpeer', 'getpeers', 'getpeercount', 'getpeerold', 'getpeerhistory']: output = response elif command == 'getpeersinfo': output = '{\n' output += ''.join('\t%s:%s\n' % (peer_ip, meta) for peer_ip, meta in response.iteritems()) output += '}' print output conn.close() elif command in ['getlogs', 'getnetlogs']: conn.send(command) while True: response = conn.recv() if command == 'getlogs': try: print deserialize_log(response, 'bitcoin_msg_log') except ValueError: continue elif command == 'getnetlogs': print deserialize_log(response, 'bitcoin_log') elif command == 'getlogerrors': try: deserialize_log(response, 'bitcoin_msg_log') except ValueError: print "[{0}] {1}: handle_id: {2}, is_sender: {3}, error: wrong magic number".format( unix2str(response.timestamp), log_types.str_mapping[response.log_type], response.handle_id, response.is_sender) else: raise Exception("Invalid command. Use help for usage information.")
0.44553
0.151153
import re from threading import Thread from .pipe import Pipe from newsman.utils import Url from newsman.scraping import UrlFilter import requests #RE_IMG = re.compile(r'<img (?:.+?) src="(.+?)" (?:.+?)>', re.MULTILINE|re.DOTALL) RE_IMG = re.compile(r'<img(?:.*?)src="(.+?)"', re.MULTILINE|re.DOTALL) ACCEPTED_EXTS = ['txt', 'xml', 'json', 'doc', 'docx', 'pdf', 'gif', 'png', 'jpg', 'jpeg', 'png', 'bmp', 'ico', 'svg', 'css', 'js', 'jsp', 'php', 'mp3', 'mp4', 'mov', 'mpeg4', 'flv'] def perform_request(url, results, idx): """Performs a HTTP request for getting image size.""" try: resp = requests.get(url, timeout=3.5, stream=True) results[idx] = resp.headers['Content-Length'] except: results[idx] = 0 return class Html2image(Pipe): """Class for extracting the main image for html code. Selection is based on ulr filtering and largest size. Attributes: urlfilter: UrlFilter object for link validation. """ name = 'html2image' def __init__(self, config): """Object initialization. Args: config: Dict with configuration parameters. """ self.urlfilter = UrlFilter(accepted_exts=ACCEPTED_EXTS, rejected_exts=None, accepted_domains=config['accepted_domains'], rejected_domains=config['rejected_domains']) def set_annotations(self, page, **kwargs): """Extracts image info from html code in asynchronous mode. Args: page: <Page> object. Returns: page: <Page> object. """ urlfilter = self.urlfilter html = page.html if html: urls = [] for match in RE_IMG.finditer(html): try: url = Url(match.group(1)) if urlfilter.validate_domain(url) and urlfilter.validate_content(url): urls.append(url) except ValueError: # invalid url, do nothing pass img_sizes = self.get_sizes(urls) page.images = [(url.url, img_size) for url, img_size in zip(urls, img_sizes)] return page def get_sizes(self, urls): num_urls = len(urls) # init threads = [None] * num_urls img_sizes = [None] * num_urls for ii in range(len(threads)): threads[ii] = Thread(target=perform_request, args=(urls[ii].url, img_sizes, ii)) threads[ii].start() for ii in range(len(threads)): threads[ii].join() return img_sizes
newsman/pipeline/html2image.py
import re from threading import Thread from .pipe import Pipe from newsman.utils import Url from newsman.scraping import UrlFilter import requests #RE_IMG = re.compile(r'<img (?:.+?) src="(.+?)" (?:.+?)>', re.MULTILINE|re.DOTALL) RE_IMG = re.compile(r'<img(?:.*?)src="(.+?)"', re.MULTILINE|re.DOTALL) ACCEPTED_EXTS = ['txt', 'xml', 'json', 'doc', 'docx', 'pdf', 'gif', 'png', 'jpg', 'jpeg', 'png', 'bmp', 'ico', 'svg', 'css', 'js', 'jsp', 'php', 'mp3', 'mp4', 'mov', 'mpeg4', 'flv'] def perform_request(url, results, idx): """Performs a HTTP request for getting image size.""" try: resp = requests.get(url, timeout=3.5, stream=True) results[idx] = resp.headers['Content-Length'] except: results[idx] = 0 return class Html2image(Pipe): """Class for extracting the main image for html code. Selection is based on ulr filtering and largest size. Attributes: urlfilter: UrlFilter object for link validation. """ name = 'html2image' def __init__(self, config): """Object initialization. Args: config: Dict with configuration parameters. """ self.urlfilter = UrlFilter(accepted_exts=ACCEPTED_EXTS, rejected_exts=None, accepted_domains=config['accepted_domains'], rejected_domains=config['rejected_domains']) def set_annotations(self, page, **kwargs): """Extracts image info from html code in asynchronous mode. Args: page: <Page> object. Returns: page: <Page> object. """ urlfilter = self.urlfilter html = page.html if html: urls = [] for match in RE_IMG.finditer(html): try: url = Url(match.group(1)) if urlfilter.validate_domain(url) and urlfilter.validate_content(url): urls.append(url) except ValueError: # invalid url, do nothing pass img_sizes = self.get_sizes(urls) page.images = [(url.url, img_size) for url, img_size in zip(urls, img_sizes)] return page def get_sizes(self, urls): num_urls = len(urls) # init threads = [None] * num_urls img_sizes = [None] * num_urls for ii in range(len(threads)): threads[ii] = Thread(target=perform_request, args=(urls[ii].url, img_sizes, ii)) threads[ii].start() for ii in range(len(threads)): threads[ii].join() return img_sizes
0.530966
0.170612
import time import ast import os import getpass from passlib.hash import pbkdf2_sha256 def refreshlist(): infile = open('creds.cred', 'r') per_row = [] for line in infile: per_row.append(line.strip().split('\t')) per_column = zip(*per_row) try: global firstname global username global usernames global password global nocontents firstnames = str(per_column[0]) firstnames = firstnames.replace("(", "[") firstnames = firstnames.replace(")", "]") firstname = ast.literal_eval(firstnames) usernames = str(per_column[1]) usernames = usernames.replace("(", "[") usernames = usernames.replace(")", "]") username = ast.literal_eval(usernames) passwords = str(per_column[2]) passwords = passwords.replace("(", "[") passwords = passwords.replace(")", "]") password = ast.literal_eval(passwords) nocontents = 0 except: nocontents = 1 pass refreshlist() print "Welcome to the Simple Login System." print "Your passwords are encrypted with SHA256." print "Coded by <NAME> on 03/13/16" time.sleep(0.2) print "Do you want to:" time.sleep(0.2) print "[1]Create an account" print "[2]Login to an account" selection = input("What do you want to do (eg. 1 or 2)?: ") accountcreation = 1 if selection == 1: while accountcreation == True: fullname = raw_input("What is your full name?: ") if fullname == "": print ("Please enter a name") continue else: a = 1 while a == 1: Username = raw_input("Please enter a username: ") if nocontents == 0: if any(item.lower() == Username.lower() for item in usernames): print ("Sorry, That username is taken, please enter another.") continue elif Username == "": print ("You need to enter a username!") continue elif Username == "": print ("You need to enter a username!") continue #else: b = 1 while b == 1: enteredpassword = getpass.getpass("Please enter a password: ") if enteredpassword == "": print "You need to enter a password!" else: print ("Creating the User. Please wait for a few moments") password = <PASSWORD>(enteredpassword, rounds=200000, salt_size=16) tosavetofile = (fullname + "\t" + Username + "\t" + password) credfile = open("creds.cred","a") credfile.write(tosavetofile + "\n") credfile.close() print ("Done! Now taking you to login.") time.sleep(0.25) b = 0 a = 0 selection = 0 accountcreation = 0 os.system('cls') refreshlist() os.system('cls') os.system('clear') loginusername = raw_input("Please enter your username: ") loginpassword = <PASSWORD>("Please enter your password: ") with open("creds.cred") as myFile: for num, line in enumerate(myFile, 1): if loginusername in line: dataloc = num - 1 pwdtoverify = password[dataloc] if (pbkdf2_sha256.verify(loginpassword, pwdtoverify): print ("Welcome " + firstname[dataloc]) print "You have successfully logged in!" time.sleep(2) else: print ("Incorrect username and password!") time.sleep(2)
main.py
import time import ast import os import getpass from passlib.hash import pbkdf2_sha256 def refreshlist(): infile = open('creds.cred', 'r') per_row = [] for line in infile: per_row.append(line.strip().split('\t')) per_column = zip(*per_row) try: global firstname global username global usernames global password global nocontents firstnames = str(per_column[0]) firstnames = firstnames.replace("(", "[") firstnames = firstnames.replace(")", "]") firstname = ast.literal_eval(firstnames) usernames = str(per_column[1]) usernames = usernames.replace("(", "[") usernames = usernames.replace(")", "]") username = ast.literal_eval(usernames) passwords = str(per_column[2]) passwords = passwords.replace("(", "[") passwords = passwords.replace(")", "]") password = ast.literal_eval(passwords) nocontents = 0 except: nocontents = 1 pass refreshlist() print "Welcome to the Simple Login System." print "Your passwords are encrypted with SHA256." print "Coded by <NAME> on 03/13/16" time.sleep(0.2) print "Do you want to:" time.sleep(0.2) print "[1]Create an account" print "[2]Login to an account" selection = input("What do you want to do (eg. 1 or 2)?: ") accountcreation = 1 if selection == 1: while accountcreation == True: fullname = raw_input("What is your full name?: ") if fullname == "": print ("Please enter a name") continue else: a = 1 while a == 1: Username = raw_input("Please enter a username: ") if nocontents == 0: if any(item.lower() == Username.lower() for item in usernames): print ("Sorry, That username is taken, please enter another.") continue elif Username == "": print ("You need to enter a username!") continue elif Username == "": print ("You need to enter a username!") continue #else: b = 1 while b == 1: enteredpassword = getpass.getpass("Please enter a password: ") if enteredpassword == "": print "You need to enter a password!" else: print ("Creating the User. Please wait for a few moments") password = <PASSWORD>(enteredpassword, rounds=200000, salt_size=16) tosavetofile = (fullname + "\t" + Username + "\t" + password) credfile = open("creds.cred","a") credfile.write(tosavetofile + "\n") credfile.close() print ("Done! Now taking you to login.") time.sleep(0.25) b = 0 a = 0 selection = 0 accountcreation = 0 os.system('cls') refreshlist() os.system('cls') os.system('clear') loginusername = raw_input("Please enter your username: ") loginpassword = <PASSWORD>("Please enter your password: ") with open("creds.cred") as myFile: for num, line in enumerate(myFile, 1): if loginusername in line: dataloc = num - 1 pwdtoverify = password[dataloc] if (pbkdf2_sha256.verify(loginpassword, pwdtoverify): print ("Welcome " + firstname[dataloc]) print "You have successfully logged in!" time.sleep(2) else: print ("Incorrect username and password!") time.sleep(2)
0.040731
0.110519
import preprocess_helper import tensorflow as tf import numpy as np import gensim def get_model_api(): """Returns lambda function for api""" # 1. initialize model once and for all print("- Load vocabulary list") vocabulary_size = 20000 embedding_dimension = 100 use_word2vec_emb = True vocab, generated_embeddings = preprocess_helper.load_frequent_words_and_embeddings("data/vocab_with_emb.txt") print("- Load checkpoint") checkpoint_file = tf.train.latest_checkpoint("model/pretrained_w2v_hidden_layer_1024/checkpoints/") graph = tf.Graph() with graph.as_default(): session_conf = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False) sess = tf.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables print("- Restore the model") saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file)) saver.restore(sess, checkpoint_file) # Get the placeholders from the graph by name inputs = graph.get_operation_by_name("inputs").outputs[0] vocab_embedding = graph.get_operation_by_name("vocab_embedding").outputs[0] discard_last_prediction = graph.get_operation_by_name("discard_last_prediction").outputs[0] # Tensors we want to evaluate probabilities = graph.get_operation_by_name("softmax_layer/Reshape_2").outputs[0] print("- Model successfully restored") # Construct the embedding matrix vocab_emb = np.zeros(shape=(20000, 100)) w2v_model = gensim.models.KeyedVectors.load_word2vec_format("data/wordembeddings-dim100.word2vec", binary=False) for tok, idx in vocab.items(): if tok in w2v_model.vocab: vocab_emb[idx] = w2v_model[tok] else: vocab_emb[idx] = generated_embeddings[tok] print("- Embedding done") def model_api(input_data): """ Args: input_data: submitted to the API, raw string Returns: output_data: after some transformation, to be returned to the API """ # 2. process input input_data = input_data.lower() input_data_token = preprocess_helper.replace_unknown_words([input_data], vocab) input_data_token, _ = preprocess_helper.add_tokens_to_sentences(input_data_token, vocab, 30, eos_token=False, pad_sentence=False) continuation_length = 30 for cont in range(continuation_length): all_probabilities = sess.run([probabilities], {inputs: input_data_token, vocab_embedding: vocab_emb, discard_last_prediction: False}) all_probabilities = np.squeeze(all_probabilities) all_probabilities = all_probabilities[-1, :] # artificially set to zero the proba of the token <unk> all_probabilities[19996] = 0 # sort and take the value of Nth largest one... n = 20000 - 10 sorted_proba = np.sort(all_probabilities) thresh = sorted_proba[n] all_probabilities[np.abs(all_probabilities) < thresh] = 0 sum_all_probabilities = np.sum(all_probabilities) all_probabilities = all_probabilities / sum_all_probabilities predicted_word = np.random.choice(20000, 1, p=all_probabilities) input_data_token = np.concatenate((input_data_token, [predicted_word]), axis=1) sentence = '' for i in range(len(input_data_token[0])): word = (list(vocab.keys())[list(vocab.values()).index(input_data_token[0][i])]) sentence += word sentence += ' ' # remove all the brackets sign sentence = sentence.replace("<bos>", "") sentence = sentence.replace("<eos>", "") sentence = sentence.replace("<pad>", "") sentence = sentence.replace("<", "") sentence = sentence.replace(">", "") print(sentence) # 5. return the output for the api return sentence return model_api
serve_lstm.py
import preprocess_helper import tensorflow as tf import numpy as np import gensim def get_model_api(): """Returns lambda function for api""" # 1. initialize model once and for all print("- Load vocabulary list") vocabulary_size = 20000 embedding_dimension = 100 use_word2vec_emb = True vocab, generated_embeddings = preprocess_helper.load_frequent_words_and_embeddings("data/vocab_with_emb.txt") print("- Load checkpoint") checkpoint_file = tf.train.latest_checkpoint("model/pretrained_w2v_hidden_layer_1024/checkpoints/") graph = tf.Graph() with graph.as_default(): session_conf = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False) sess = tf.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables print("- Restore the model") saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file)) saver.restore(sess, checkpoint_file) # Get the placeholders from the graph by name inputs = graph.get_operation_by_name("inputs").outputs[0] vocab_embedding = graph.get_operation_by_name("vocab_embedding").outputs[0] discard_last_prediction = graph.get_operation_by_name("discard_last_prediction").outputs[0] # Tensors we want to evaluate probabilities = graph.get_operation_by_name("softmax_layer/Reshape_2").outputs[0] print("- Model successfully restored") # Construct the embedding matrix vocab_emb = np.zeros(shape=(20000, 100)) w2v_model = gensim.models.KeyedVectors.load_word2vec_format("data/wordembeddings-dim100.word2vec", binary=False) for tok, idx in vocab.items(): if tok in w2v_model.vocab: vocab_emb[idx] = w2v_model[tok] else: vocab_emb[idx] = generated_embeddings[tok] print("- Embedding done") def model_api(input_data): """ Args: input_data: submitted to the API, raw string Returns: output_data: after some transformation, to be returned to the API """ # 2. process input input_data = input_data.lower() input_data_token = preprocess_helper.replace_unknown_words([input_data], vocab) input_data_token, _ = preprocess_helper.add_tokens_to_sentences(input_data_token, vocab, 30, eos_token=False, pad_sentence=False) continuation_length = 30 for cont in range(continuation_length): all_probabilities = sess.run([probabilities], {inputs: input_data_token, vocab_embedding: vocab_emb, discard_last_prediction: False}) all_probabilities = np.squeeze(all_probabilities) all_probabilities = all_probabilities[-1, :] # artificially set to zero the proba of the token <unk> all_probabilities[19996] = 0 # sort and take the value of Nth largest one... n = 20000 - 10 sorted_proba = np.sort(all_probabilities) thresh = sorted_proba[n] all_probabilities[np.abs(all_probabilities) < thresh] = 0 sum_all_probabilities = np.sum(all_probabilities) all_probabilities = all_probabilities / sum_all_probabilities predicted_word = np.random.choice(20000, 1, p=all_probabilities) input_data_token = np.concatenate((input_data_token, [predicted_word]), axis=1) sentence = '' for i in range(len(input_data_token[0])): word = (list(vocab.keys())[list(vocab.values()).index(input_data_token[0][i])]) sentence += word sentence += ' ' # remove all the brackets sign sentence = sentence.replace("<bos>", "") sentence = sentence.replace("<eos>", "") sentence = sentence.replace("<pad>", "") sentence = sentence.replace("<", "") sentence = sentence.replace(">", "") print(sentence) # 5. return the output for the api return sentence return model_api
0.772788
0.181571
from PyQt4 import QtGui, QtCore import sys from collections import namedtuple class BoolBox(QtGui.QPushButton): myValueChanged = QtCore.pyqtSignal(bool) def __init__(self, value, parent=None): super(BoolBox, self).__init__(parent) self.setCheckable(True) self.state = value self.setChecked(self.state) if self.state: text = 'ON' else: text = 'OFF' self.setText(text) self.clicked.connect(self.handleBoolButtonClicked) stylesheet = ('QPushButton:checked { background-color:' 'rgb(100,255,125); }' 'QPushButton { background-color:' 'rgb(255,125,100); }') self.setStyleSheet(stylesheet) def handleBoolButtonClicked(self, checked): self.state = bool(checked) if self.state: text = 'ON' else: text = 'OFF' self.setText(text) self.myValueChanged.emit(self.state) def mySetValue(self, val): self.state = bool(val) self.setChecked(self.state) if self.state: text = 'ON' else: text = 'OFF' self.setText(text) class IntBox(QtGui.QSpinBox): myValueChanged = QtCore.pyqtSignal(int) def __init__(self, value, parent=None): super(IntBox, self).__init__(parent) self.setValue(value) self.setRange(-2000000000, 2000000000) self.valueChanged.connect(self.myValueChanged) def mySetValue(self, val): self.setValue(val) class FloatBox(QtGui.QDoubleSpinBox): myValueChanged = QtCore.pyqtSignal(float) def __init__(self, value, parent=None): super(FloatBox, self).__init__(parent) self.setRange(-1e100, 1e100) self.setValue(value) self.setDecimals(10) self.valueChanged.connect(self.myValueChanged) def mySetValue(self, val): self.setValue(val) class StringBox(QtGui.QLineEdit): myValueChanged = QtCore.pyqtSignal(str) def __init__(self, value, parent=None): super(StringBox, self).__init__(parent) self.mySetValue(value) self.textChanged.connect(self.handleTextChanged) def mySetValue(self, val): self.setText(val) def handleTextChanged(self): self.myValueChanged.emit(str(self.text())) class FileBox(QtGui.QWidget): myValueChanged = QtCore.pyqtSignal(QtCore.QFileInfo) def __init__(self, value, parent=None): super(FileBox, self).__init__(parent) self.hbox = QtGui.QHBoxLayout(self) self.setLayout(self.hbox) self.file_edit = QtGui.QLineEdit(self) self.file_edit.setReadOnly(True) self.open_button = QtGui.QPushButton('Open', self) self.open_button.clicked.connect(self.handleOpenButtonClicked) self.hbox.addWidget(self.file_edit) self.hbox.addWidget(self.open_button) self.mySetValue(value) def mySetValue(self, value): self.value = value # check if original value is a directory. If it is, retstrict return # values to directories only self.isdir = self.value.isDir() self.file_edit.setText(value.canonicalFilePath()) def handleOpenButtonClicked(self): if self.isdir: ged = QtGui.QFileDialog.getExistingDirectory new_path = ged(self, "Open Directory", self.value.canonicalPath()) else: gofn = QtGui.QFileDialog.getOpenFileName new_path = gofn(self, "Open File", self.value.canonicalFilePath()) if new_path != '': new_value = QtCore.QFileInfo(new_path) self.mySetValue(new_value) self.myValueChanged.emit(new_value) class ListBox(QtGui.QPlainTextEdit): myValueChanged = QtCore.pyqtSignal(list) def __init__(self, value, parent=None): super(ListBox, self).__init__(parent) self.mySetValue(value) self.textChanged.connect(self.handleTextChanged) def mySetValue(self, val): self.setPlainText(repr(val)) def handleTextChanged(self): try: new_list = eval(str(self.toPlainText()), {}, {}) except: print "Unexpected error:", sys.exc_info()[0] else: self.myValueChanged.emit(new_list) widgets_for_type = {int: IntBox, float: FloatBox, str: StringBox, list: ListBox, unicode: StringBox, bool: BoolBox, QtCore.QFileInfo: FileBox} class NamedEditor(QtGui.QWidget): valueChaged = QtCore.pyqtSignal(str, object) # key_name, new_value def __init__(self, name, value, grid, row, parent=None): super(NamedEditor, self).__init__(parent) name_label = QtGui.QLabel(name, self) key_type = type(value) sub_widget = widgets_for_type[key_type](value) sub_widget.mySetValue(value) sub_widget.myValueChanged.connect(self.handleValueChanged) grid.addWidget(name_label, row, 0) grid.addWidget(sub_widget, row, 1) self.name = name def handleValueChanged(self, new_value): self.valueChaged.emit(self.name, new_value) class DictEditor(QtGui.QDialog): def __init__(self, dct, dct_name=None, parent=None): super(DictEditor, self).__init__(parent) grid = QtGui.QGridLayout() self.setLayout(grid) self.dct = dct self.button_ok = QtGui.QPushButton('Ok', self) self.button_ok.clicked.connect(self.accept) self.button_cancel = QtGui.QPushButton('Cancel', self) self.button_cancel.clicked.connect(self.reject) for i, (key, value) in enumerate(sorted(dct.items())): key_type = type(value) if key_type in widgets_for_type: named_widget = NamedEditor(key, value, grid, i, self) named_widget.valueChaged.connect(self.handleValueChanged) grid.addWidget(named_widget, i, 0) i = len(dct) grid.addWidget(self.button_cancel, i, 0) grid.addWidget(self.button_ok, i, 1) if dct_name is not None: self.setWindowTitle(dct_name) def handleValueChanged(self, key_name, new_value): if type(new_value) is QtCore.QString: new_value = str(new_value) self.dct[str(key_name)] = new_value def main(): app = QtGui.QApplication(sys.argv) dct = {} dct['name'] = "a string" dct['float_val'] = 1.0 dct['int_val'] = 5 dct['bool'] = True dct['list_stuff'] = [0.0, "string", True, 1] edit_dct = dict(dct) d = DictEditor(edit_dct) print(d.exec_()) print dct print edit_dct return if __name__ == '__main__': main()
streamviewer/widgets/DictEditor.py
from PyQt4 import QtGui, QtCore import sys from collections import namedtuple class BoolBox(QtGui.QPushButton): myValueChanged = QtCore.pyqtSignal(bool) def __init__(self, value, parent=None): super(BoolBox, self).__init__(parent) self.setCheckable(True) self.state = value self.setChecked(self.state) if self.state: text = 'ON' else: text = 'OFF' self.setText(text) self.clicked.connect(self.handleBoolButtonClicked) stylesheet = ('QPushButton:checked { background-color:' 'rgb(100,255,125); }' 'QPushButton { background-color:' 'rgb(255,125,100); }') self.setStyleSheet(stylesheet) def handleBoolButtonClicked(self, checked): self.state = bool(checked) if self.state: text = 'ON' else: text = 'OFF' self.setText(text) self.myValueChanged.emit(self.state) def mySetValue(self, val): self.state = bool(val) self.setChecked(self.state) if self.state: text = 'ON' else: text = 'OFF' self.setText(text) class IntBox(QtGui.QSpinBox): myValueChanged = QtCore.pyqtSignal(int) def __init__(self, value, parent=None): super(IntBox, self).__init__(parent) self.setValue(value) self.setRange(-2000000000, 2000000000) self.valueChanged.connect(self.myValueChanged) def mySetValue(self, val): self.setValue(val) class FloatBox(QtGui.QDoubleSpinBox): myValueChanged = QtCore.pyqtSignal(float) def __init__(self, value, parent=None): super(FloatBox, self).__init__(parent) self.setRange(-1e100, 1e100) self.setValue(value) self.setDecimals(10) self.valueChanged.connect(self.myValueChanged) def mySetValue(self, val): self.setValue(val) class StringBox(QtGui.QLineEdit): myValueChanged = QtCore.pyqtSignal(str) def __init__(self, value, parent=None): super(StringBox, self).__init__(parent) self.mySetValue(value) self.textChanged.connect(self.handleTextChanged) def mySetValue(self, val): self.setText(val) def handleTextChanged(self): self.myValueChanged.emit(str(self.text())) class FileBox(QtGui.QWidget): myValueChanged = QtCore.pyqtSignal(QtCore.QFileInfo) def __init__(self, value, parent=None): super(FileBox, self).__init__(parent) self.hbox = QtGui.QHBoxLayout(self) self.setLayout(self.hbox) self.file_edit = QtGui.QLineEdit(self) self.file_edit.setReadOnly(True) self.open_button = QtGui.QPushButton('Open', self) self.open_button.clicked.connect(self.handleOpenButtonClicked) self.hbox.addWidget(self.file_edit) self.hbox.addWidget(self.open_button) self.mySetValue(value) def mySetValue(self, value): self.value = value # check if original value is a directory. If it is, retstrict return # values to directories only self.isdir = self.value.isDir() self.file_edit.setText(value.canonicalFilePath()) def handleOpenButtonClicked(self): if self.isdir: ged = QtGui.QFileDialog.getExistingDirectory new_path = ged(self, "Open Directory", self.value.canonicalPath()) else: gofn = QtGui.QFileDialog.getOpenFileName new_path = gofn(self, "Open File", self.value.canonicalFilePath()) if new_path != '': new_value = QtCore.QFileInfo(new_path) self.mySetValue(new_value) self.myValueChanged.emit(new_value) class ListBox(QtGui.QPlainTextEdit): myValueChanged = QtCore.pyqtSignal(list) def __init__(self, value, parent=None): super(ListBox, self).__init__(parent) self.mySetValue(value) self.textChanged.connect(self.handleTextChanged) def mySetValue(self, val): self.setPlainText(repr(val)) def handleTextChanged(self): try: new_list = eval(str(self.toPlainText()), {}, {}) except: print "Unexpected error:", sys.exc_info()[0] else: self.myValueChanged.emit(new_list) widgets_for_type = {int: IntBox, float: FloatBox, str: StringBox, list: ListBox, unicode: StringBox, bool: BoolBox, QtCore.QFileInfo: FileBox} class NamedEditor(QtGui.QWidget): valueChaged = QtCore.pyqtSignal(str, object) # key_name, new_value def __init__(self, name, value, grid, row, parent=None): super(NamedEditor, self).__init__(parent) name_label = QtGui.QLabel(name, self) key_type = type(value) sub_widget = widgets_for_type[key_type](value) sub_widget.mySetValue(value) sub_widget.myValueChanged.connect(self.handleValueChanged) grid.addWidget(name_label, row, 0) grid.addWidget(sub_widget, row, 1) self.name = name def handleValueChanged(self, new_value): self.valueChaged.emit(self.name, new_value) class DictEditor(QtGui.QDialog): def __init__(self, dct, dct_name=None, parent=None): super(DictEditor, self).__init__(parent) grid = QtGui.QGridLayout() self.setLayout(grid) self.dct = dct self.button_ok = QtGui.QPushButton('Ok', self) self.button_ok.clicked.connect(self.accept) self.button_cancel = QtGui.QPushButton('Cancel', self) self.button_cancel.clicked.connect(self.reject) for i, (key, value) in enumerate(sorted(dct.items())): key_type = type(value) if key_type in widgets_for_type: named_widget = NamedEditor(key, value, grid, i, self) named_widget.valueChaged.connect(self.handleValueChanged) grid.addWidget(named_widget, i, 0) i = len(dct) grid.addWidget(self.button_cancel, i, 0) grid.addWidget(self.button_ok, i, 1) if dct_name is not None: self.setWindowTitle(dct_name) def handleValueChanged(self, key_name, new_value): if type(new_value) is QtCore.QString: new_value = str(new_value) self.dct[str(key_name)] = new_value def main(): app = QtGui.QApplication(sys.argv) dct = {} dct['name'] = "a string" dct['float_val'] = 1.0 dct['int_val'] = 5 dct['bool'] = True dct['list_stuff'] = [0.0, "string", True, 1] edit_dct = dict(dct) d = DictEditor(edit_dct) print(d.exec_()) print dct print edit_dct return if __name__ == '__main__': main()
0.358016
0.095983
import os import re import pickle PAD = "PAD" EOS = "EOS" START_VOCAB = [PAD, EOS] try: PROJECT_ROOT = os.environ['HH_ROOT'] except KeyError: raise Exception("Please export HH_ROOT as the project directory") VOCAB_SET_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/vocab_set.pkl') INPUT2ID_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/input2id.pkl') TARGET2ID_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/target2id.pkl') ID2INPUT_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/id2input.pkl') ID2TARGET_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/id2target.pkl') TRAINING_INPUT_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/training.in') TRAINING_TARGET_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/training.tgt') TESTING_INPUT_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/testing.in') TESTING_TARGET_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/testing.tgt') def sentence_to_token_ids(sentence, word2id): """ Gets token id's of each word in the sentence and returns a list of those words. Is called by data_to_token_ids and the Lexer. Args: sentence: A list of word tokens. word2id: A dictionary that maps words to its given id. This can be for the input or target vocabulary. """ tokenized_sentence = [] for word in sentence: tokenized_sentence.append(str(word2id[word])) return tokenized_sentence def data_to_token_ids(data_path, use_existing_vocab=True): """ Convert a set of training pairs to their respective token id's. Args: data_path: Path to where training pairs are located. use_existing_vocab: A boolean that determines whether or not to use a pre-existing vocabulary or not. If false, then a new vocabulary set is created. """ input2id_exists = os.path.isfile(INPUT2ID_PATH) target2id_exists = os.path.isfile(TARGET2ID_PATH) id2input_exists = os.path.isfile(ID2INPUT_PATH) id2target_exists = os.path.isfile(ID2TARGET_PATH) all_files_exist = (input2id_exists and target2id_exists and id2input_exists and id2target_exists and use_existing_vocab) # Create the vocabulary files if they do not exist if not all_files_exist: input_vocab, target_vocab = initialize_vocabulary(data_path) create_vocabulary(input_vocab, target_vocab) else: print("* Using an already existing vocabulary.") print("* Saving token ID's...") input_list, target_list = [], [] input2id = pickle.load(open(INPUT2ID_PATH, 'rb')) target2id = pickle.load(open(TARGET2ID_PATH, 'rb')) with open(data_path, 'r') as f: for line in f: data_pair = tuple(map(eval, line.split('\t'))) input_list.append(sentence_to_token_ids(data_pair[0], input2id)) target_list.append(sentence_to_token_ids(data_pair[1], target2id)) input_path = TRAINING_INPUT_PATH target_path = TRAINING_TARGET_PATH with open(input_path, 'w') as input_file: for line in input_list: input_file.write(" ".join(line)+'\n') with open(target_path, 'w') as target_file: for line in target_list: target_file.write(" ".join(line)+'\n') print("* Data preparation complete!") def create_vocabulary(input_vocab, target_vocab): """ Creates vocabulary files for converting source/target data to their id's, and vice versa. Args: input_vocab: A list of input vocabulary. target_vocab: A list of target vocabulary. """ print("* Creating vocabulary files...") id2input = START_VOCAB + list(input_vocab) id2target = START_VOCAB + list(target_vocab) input2id, target2id = {}, {} for i in range(len(id2input)): input2id[id2input[i]] = i for i in range(len(id2target)): target2id[id2target[i]] = i pickle.dump(id2input, open(ID2INPUT_PATH, 'wb')) pickle.dump(id2target, open(ID2TARGET_PATH, 'wb')) pickle.dump(input2id, open(INPUT2ID_PATH, 'wb')) pickle.dump(target2id, open(TARGET2ID_PATH, 'wb')) print(" * All vocabulary files created.") def initialize_vocabulary(data_path): """ Initializes sets of vocabulary based on the training data. Args: data_path: Path to where the training/testing data is located. Returns: A 2-item tuple with the input and target vocabulary sets. """ print("* Initializing vocabulary...") input_vocab, target_vocab = set(), set() with open(data_path, 'r') as f: for line in f: data_pair = tuple(map(eval, line.split('\t'))) input_vocab = input_vocab.union(data_pair[0]) target_vocab = target_vocab.union(data_pair[1]) print("* Created vocabulary sets") print(" * Input vocab size: {}".format(len(input_vocab))) print(" * Target vocab size: {}".format(len(target_vocab))) print("* Writing input vocab as vocab_set.pkl.") pickle.dump(input_vocab, open(VOCAB_SET_PATH, 'wb')) return input_vocab, target_vocab
hooperhub/util/data_utils.py
import os import re import pickle PAD = "PAD" EOS = "EOS" START_VOCAB = [PAD, EOS] try: PROJECT_ROOT = os.environ['HH_ROOT'] except KeyError: raise Exception("Please export HH_ROOT as the project directory") VOCAB_SET_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/vocab_set.pkl') INPUT2ID_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/input2id.pkl') TARGET2ID_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/target2id.pkl') ID2INPUT_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/id2input.pkl') ID2TARGET_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/pkl/id2target.pkl') TRAINING_INPUT_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/training.in') TRAINING_TARGET_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/training.tgt') TESTING_INPUT_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/testing.in') TESTING_TARGET_PATH = os.path.join(PROJECT_ROOT, 'hooperhub/data/testing.tgt') def sentence_to_token_ids(sentence, word2id): """ Gets token id's of each word in the sentence and returns a list of those words. Is called by data_to_token_ids and the Lexer. Args: sentence: A list of word tokens. word2id: A dictionary that maps words to its given id. This can be for the input or target vocabulary. """ tokenized_sentence = [] for word in sentence: tokenized_sentence.append(str(word2id[word])) return tokenized_sentence def data_to_token_ids(data_path, use_existing_vocab=True): """ Convert a set of training pairs to their respective token id's. Args: data_path: Path to where training pairs are located. use_existing_vocab: A boolean that determines whether or not to use a pre-existing vocabulary or not. If false, then a new vocabulary set is created. """ input2id_exists = os.path.isfile(INPUT2ID_PATH) target2id_exists = os.path.isfile(TARGET2ID_PATH) id2input_exists = os.path.isfile(ID2INPUT_PATH) id2target_exists = os.path.isfile(ID2TARGET_PATH) all_files_exist = (input2id_exists and target2id_exists and id2input_exists and id2target_exists and use_existing_vocab) # Create the vocabulary files if they do not exist if not all_files_exist: input_vocab, target_vocab = initialize_vocabulary(data_path) create_vocabulary(input_vocab, target_vocab) else: print("* Using an already existing vocabulary.") print("* Saving token ID's...") input_list, target_list = [], [] input2id = pickle.load(open(INPUT2ID_PATH, 'rb')) target2id = pickle.load(open(TARGET2ID_PATH, 'rb')) with open(data_path, 'r') as f: for line in f: data_pair = tuple(map(eval, line.split('\t'))) input_list.append(sentence_to_token_ids(data_pair[0], input2id)) target_list.append(sentence_to_token_ids(data_pair[1], target2id)) input_path = TRAINING_INPUT_PATH target_path = TRAINING_TARGET_PATH with open(input_path, 'w') as input_file: for line in input_list: input_file.write(" ".join(line)+'\n') with open(target_path, 'w') as target_file: for line in target_list: target_file.write(" ".join(line)+'\n') print("* Data preparation complete!") def create_vocabulary(input_vocab, target_vocab): """ Creates vocabulary files for converting source/target data to their id's, and vice versa. Args: input_vocab: A list of input vocabulary. target_vocab: A list of target vocabulary. """ print("* Creating vocabulary files...") id2input = START_VOCAB + list(input_vocab) id2target = START_VOCAB + list(target_vocab) input2id, target2id = {}, {} for i in range(len(id2input)): input2id[id2input[i]] = i for i in range(len(id2target)): target2id[id2target[i]] = i pickle.dump(id2input, open(ID2INPUT_PATH, 'wb')) pickle.dump(id2target, open(ID2TARGET_PATH, 'wb')) pickle.dump(input2id, open(INPUT2ID_PATH, 'wb')) pickle.dump(target2id, open(TARGET2ID_PATH, 'wb')) print(" * All vocabulary files created.") def initialize_vocabulary(data_path): """ Initializes sets of vocabulary based on the training data. Args: data_path: Path to where the training/testing data is located. Returns: A 2-item tuple with the input and target vocabulary sets. """ print("* Initializing vocabulary...") input_vocab, target_vocab = set(), set() with open(data_path, 'r') as f: for line in f: data_pair = tuple(map(eval, line.split('\t'))) input_vocab = input_vocab.union(data_pair[0]) target_vocab = target_vocab.union(data_pair[1]) print("* Created vocabulary sets") print(" * Input vocab size: {}".format(len(input_vocab))) print(" * Target vocab size: {}".format(len(target_vocab))) print("* Writing input vocab as vocab_set.pkl.") pickle.dump(input_vocab, open(VOCAB_SET_PATH, 'wb')) return input_vocab, target_vocab
0.493653
0.171061
from copy import copy from ..blast.BlastHsp import BlastHsp class BlastHit: def __init__(self, subject_id, subject_length, hsps): self.subject_id = subject_id self.subject_length = subject_length self.hsps = hsps @classmethod def from_rec(cls, rec): subject_id = rec.hit_id subject_length = rec.length hsps = [] for hsp in rec.hsps: hsps.append(BlastHsp.from_rec(hsp)) return cls(subject_id, subject_length, hsps) def merge_hsps(self, max_space): hsps = copy(self.hsps) change = True merged_idx = [] while change: change = False merged = [] for i, hi in enumerate(hsps): for j, hj in enumerate(hsps): if i != j and not (i, j) in merged_idx \ and hi.query_end <= hj.query_start \ and hi.subject_end <= hj.subject_start \ and hi.subject_strand == hj.subject_strand \ and hj.query_start - hi.query_end + 1 <= max_space \ and hj.subject_start - hi.subject_end + 1 <= max_space: merged.append(hi+hj) change = True merged_idx.append((i, j)) hsps = hsps + merged return hsps def get_best_hsp(self, query_range=(0, 0), min_overlap=1, positive_subject_strand_only=False): bhsp = BlastHsp() for hsp in self.hsps: if positive_subject_strand_only and hsp.subject_strand == "-": continue if query_range == (0, 0): if bhsp < hsp: bhsp = hsp else: max_start = max(hsp.query_start, query_range[0]) min_end = min(hsp.query_end, query_range[1]) overlap_len = min_end - max_start + 1 if bhsp < hsp and overlap_len >= min_overlap: bhsp = hsp return bhsp def get_max_hsp(self, query_range=(0, 0), min_overlap=1, positive_subject_strand_only=False): query_start, query_end = 0,0 for hsp in self.hsps: if positive_subject_strand_only and hsp.subject_strand == "-": continue max_start = max(hsp.query_start, query_range[0]) min_end = min(hsp.query_end, query_range[1]) overlap_len = min_end - max_start + 1 if query_range == (0, 0) or overlap_len >= min_overlap: if query_start == 0 and query_end == 0: query_start = hsp.query_start query_end = hsp.query_end else: query_start = min(query_start, hsp.query_start) query_end = max(query_end, hsp.query_end) return query_start, query_end def __str__(self): hsps = '\n'.join(list(str(i) for i in self.hsps)) return "{}, {}\n".format(self.subject_id, self.subject_length) + hsps
src/blast/BlastHit.py
from copy import copy from ..blast.BlastHsp import BlastHsp class BlastHit: def __init__(self, subject_id, subject_length, hsps): self.subject_id = subject_id self.subject_length = subject_length self.hsps = hsps @classmethod def from_rec(cls, rec): subject_id = rec.hit_id subject_length = rec.length hsps = [] for hsp in rec.hsps: hsps.append(BlastHsp.from_rec(hsp)) return cls(subject_id, subject_length, hsps) def merge_hsps(self, max_space): hsps = copy(self.hsps) change = True merged_idx = [] while change: change = False merged = [] for i, hi in enumerate(hsps): for j, hj in enumerate(hsps): if i != j and not (i, j) in merged_idx \ and hi.query_end <= hj.query_start \ and hi.subject_end <= hj.subject_start \ and hi.subject_strand == hj.subject_strand \ and hj.query_start - hi.query_end + 1 <= max_space \ and hj.subject_start - hi.subject_end + 1 <= max_space: merged.append(hi+hj) change = True merged_idx.append((i, j)) hsps = hsps + merged return hsps def get_best_hsp(self, query_range=(0, 0), min_overlap=1, positive_subject_strand_only=False): bhsp = BlastHsp() for hsp in self.hsps: if positive_subject_strand_only and hsp.subject_strand == "-": continue if query_range == (0, 0): if bhsp < hsp: bhsp = hsp else: max_start = max(hsp.query_start, query_range[0]) min_end = min(hsp.query_end, query_range[1]) overlap_len = min_end - max_start + 1 if bhsp < hsp and overlap_len >= min_overlap: bhsp = hsp return bhsp def get_max_hsp(self, query_range=(0, 0), min_overlap=1, positive_subject_strand_only=False): query_start, query_end = 0,0 for hsp in self.hsps: if positive_subject_strand_only and hsp.subject_strand == "-": continue max_start = max(hsp.query_start, query_range[0]) min_end = min(hsp.query_end, query_range[1]) overlap_len = min_end - max_start + 1 if query_range == (0, 0) or overlap_len >= min_overlap: if query_start == 0 and query_end == 0: query_start = hsp.query_start query_end = hsp.query_end else: query_start = min(query_start, hsp.query_start) query_end = max(query_end, hsp.query_end) return query_start, query_end def __str__(self): hsps = '\n'.join(list(str(i) for i in self.hsps)) return "{}, {}\n".format(self.subject_id, self.subject_length) + hsps
0.561215
0.255442
import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder, MinMaxScaler import os def load_schizophrenia_data(data_folder='data', verbose=True): """Load the functional connectivity data from the MLSP Schizophrenia classification challange on Kaggle: http://www.kaggle.com/c/mlsp-2014-mri/data Parameters: ---------- data_folder: string Path to the data verbose: bool Return: ------ X, y: (ndarray, array) The dataset with the class labels """ train_FNC = os.path.join(data_folder, 'MLSP_Kaggle', 'train_FNC.csv') train_labels = os.path.join(data_folder, 'MLSP_Kaggle', 'train_labels.csv') X = np.loadtxt(train_FNC, delimiter=',', skiprows=1)[:, 1:] y = np.loadtxt(train_labels, delimiter=',', skiprows=1)[:, 1:].reshape(-1) if verbose: print 'n_samples: %s, n_samples_by_class: (%s - %s)' % (len(y), len(y[y == 0]), len(y[y == 1])) # Dealing with negative correlations X = np.where(X > 0, X, 0.) # X = np.abs(X) # Being sure that labels are 0-1 le = LabelEncoder() y = le.fit_transform(y) # Sorting data by labels ast = np.argsort(y) X = X[ast] y = y[ast] mm = MinMaxScaler() X = mm.fit_transform(X.T).T return X, y def load_1000_funct_connectome(data_folder='data', location='Baltimore', verbose=True): """Load the functional connectivity dataset 1000_functional connectomes available at: http://umcd.humanconnectomeproject.org/umcd/default/browse_studies Parameters: ---------- data_folder: string Path to the folder containing all the data files verbose: bool Return: ------ X, y: (ndarray, array) The dataset with the class labels """ path_folder = os.path.join(data_folder, 'Functional_Connectomes', 'Locations', location) desc_file = os.path.join(data_folder, 'Functional_Connectomes', '1000_Functional_Connectomes.csv') desc = pd.read_csv(desc_file, sep=',') name = desc['upload_data.network_name'] pool = desc['upload_data.gender'] dt_name = dict() for i, v in enumerate(name): dt_name[v] = i dt_cls = dict() cs = np.unique(pool) for i, v in enumerate(cs): dt_cls[v] = i dirs = os.listdir(path_folder) dirs.sort() connects = [] names = [] y = [] for i, v in enumerate(dirs): spl = v.split('_') if spl[-2] == 'matrix': connects.append(np.loadtxt(os.path.join(path_folder, v))) nm = '_'.join(spl[:-3]) names.append(nm) y.append(dt_cls[pool[dt_name[nm]]]) assert cs[y[-1]] == pool[dt_name[nm]], 'wrong class assignment' X = np.zeros((len(connects), (connects[0].shape[0] * (connects[0].shape[1] - 1)) / 2)) triu_idx = np.triu_indices(connects[0].shape[0], k=1) for i, v in enumerate(connects): X[i] = v[triu_idx] # Dealing with negative correlations X = np.where(X > 0, X, 0.) # X = np.abs(X) # Being sure that labels are 0-1 le = LabelEncoder() y = le.fit_transform(y) # Sorting data by labels ast = np.argsort(y) X = X[ast] y = y[ast] mm = MinMaxScaler() X = mm.fit_transform(X.T).T if verbose: print 'n_samples: %s, n_samples_by_class: (%s - %s)' % (len(y), len(y[y == 0]), len(y[y == 1])) return X, y def load_kernel_matrix(data_path='data', study='wl_kernel', verbose=True): """Loading already computed kernel matrix. Parameters: --------- data_path: string Path to the data folder. study: string Name of the folder containing the study, e.g. 'wl_kernel', which contains the WL kernel matrix. verbose: bool """ path_k_matrix = os.path.join(data_path, 'precomputed_kernels', study, 'k_matrix.csv') path_cls = os.path.join(data_path, 'precomputed_kernels', study, 'class_labels.csv') K = np.loadtxt(path_k_matrix) y = np.loadtxt(path_cls) le = LabelEncoder() y = le.fit_transform(y) if verbose: print 'n_samples: %s, n_samples_by_class: (%s - %s)' % (len(y), len(y[y == 0]), len(y[y == 1])) return K, y
load_data.py
import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder, MinMaxScaler import os def load_schizophrenia_data(data_folder='data', verbose=True): """Load the functional connectivity data from the MLSP Schizophrenia classification challange on Kaggle: http://www.kaggle.com/c/mlsp-2014-mri/data Parameters: ---------- data_folder: string Path to the data verbose: bool Return: ------ X, y: (ndarray, array) The dataset with the class labels """ train_FNC = os.path.join(data_folder, 'MLSP_Kaggle', 'train_FNC.csv') train_labels = os.path.join(data_folder, 'MLSP_Kaggle', 'train_labels.csv') X = np.loadtxt(train_FNC, delimiter=',', skiprows=1)[:, 1:] y = np.loadtxt(train_labels, delimiter=',', skiprows=1)[:, 1:].reshape(-1) if verbose: print 'n_samples: %s, n_samples_by_class: (%s - %s)' % (len(y), len(y[y == 0]), len(y[y == 1])) # Dealing with negative correlations X = np.where(X > 0, X, 0.) # X = np.abs(X) # Being sure that labels are 0-1 le = LabelEncoder() y = le.fit_transform(y) # Sorting data by labels ast = np.argsort(y) X = X[ast] y = y[ast] mm = MinMaxScaler() X = mm.fit_transform(X.T).T return X, y def load_1000_funct_connectome(data_folder='data', location='Baltimore', verbose=True): """Load the functional connectivity dataset 1000_functional connectomes available at: http://umcd.humanconnectomeproject.org/umcd/default/browse_studies Parameters: ---------- data_folder: string Path to the folder containing all the data files verbose: bool Return: ------ X, y: (ndarray, array) The dataset with the class labels """ path_folder = os.path.join(data_folder, 'Functional_Connectomes', 'Locations', location) desc_file = os.path.join(data_folder, 'Functional_Connectomes', '1000_Functional_Connectomes.csv') desc = pd.read_csv(desc_file, sep=',') name = desc['upload_data.network_name'] pool = desc['upload_data.gender'] dt_name = dict() for i, v in enumerate(name): dt_name[v] = i dt_cls = dict() cs = np.unique(pool) for i, v in enumerate(cs): dt_cls[v] = i dirs = os.listdir(path_folder) dirs.sort() connects = [] names = [] y = [] for i, v in enumerate(dirs): spl = v.split('_') if spl[-2] == 'matrix': connects.append(np.loadtxt(os.path.join(path_folder, v))) nm = '_'.join(spl[:-3]) names.append(nm) y.append(dt_cls[pool[dt_name[nm]]]) assert cs[y[-1]] == pool[dt_name[nm]], 'wrong class assignment' X = np.zeros((len(connects), (connects[0].shape[0] * (connects[0].shape[1] - 1)) / 2)) triu_idx = np.triu_indices(connects[0].shape[0], k=1) for i, v in enumerate(connects): X[i] = v[triu_idx] # Dealing with negative correlations X = np.where(X > 0, X, 0.) # X = np.abs(X) # Being sure that labels are 0-1 le = LabelEncoder() y = le.fit_transform(y) # Sorting data by labels ast = np.argsort(y) X = X[ast] y = y[ast] mm = MinMaxScaler() X = mm.fit_transform(X.T).T if verbose: print 'n_samples: %s, n_samples_by_class: (%s - %s)' % (len(y), len(y[y == 0]), len(y[y == 1])) return X, y def load_kernel_matrix(data_path='data', study='wl_kernel', verbose=True): """Loading already computed kernel matrix. Parameters: --------- data_path: string Path to the data folder. study: string Name of the folder containing the study, e.g. 'wl_kernel', which contains the WL kernel matrix. verbose: bool """ path_k_matrix = os.path.join(data_path, 'precomputed_kernels', study, 'k_matrix.csv') path_cls = os.path.join(data_path, 'precomputed_kernels', study, 'class_labels.csv') K = np.loadtxt(path_k_matrix) y = np.loadtxt(path_cls) le = LabelEncoder() y = le.fit_transform(y) if verbose: print 'n_samples: %s, n_samples_by_class: (%s - %s)' % (len(y), len(y[y == 0]), len(y[y == 1])) return K, y
0.777384
0.524334
import os import re import json import hashlib import zipfile from io import StringIO from django.core.exceptions import ObjectDoesNotExist from django.core.files import File as NewFile from django.db import transaction from django.template import loader from bridge.vars import COVERAGE_FILE from reports.models import CoverageFile, CoverageData, CoverageDataValue, CoverageDataStatistics, CoverageArchive from reports.utils import get_parents from reports.etv import TAB_LENGTH, KEY1_WORDS, KEY2_WORDS SOURCE_CLASSES = { 'comment': "COVComment", 'number': "COVNumber", 'text': "COVText", 'key1': "COVKey1", 'key2': "COVKey2" } COLOR = { 'grey': '#bcbcbc', 'purple': '#a478e9', 'lightgrey': '#f4f7ff' } TABLE_STAT_COLOR = ['#f18fa6', '#f1c0b2', '#f9e19b', '#e4f495', '#acf1a8'] ROOT_DIRS_ORDER = ['source files', 'specifications', 'generated models'] def coverage_color(curr_cov, max_cov, delta=0): if curr_cov == 0: return 'rgb(200, 190, 255)' green = 140 + int(100 * (1 - curr_cov / max_cov)) blue = 140 + int(100 * (1 - curr_cov / max_cov)) - delta return 'rgb(255, %s, %s)' % (green, blue) def get_legend(max_cov, leg_type, number=5, with_zero=False): if max_cov == 0: return [] elif max_cov > 100: rounded_max = 100 * int(max_cov/100) else: rounded_max = max_cov delta = 0 if leg_type == 'funcs': delta = 40 colors = [] divisions = number - 1 for i in reversed(range(divisions)): curr_cov = int(i * rounded_max / divisions) if curr_cov == 0: curr_cov = 1 colors.append((curr_cov, coverage_color(curr_cov, max_cov, delta))) colors.insert(0, (rounded_max, coverage_color(rounded_max, max_cov, delta))) if with_zero: colors.append((0, coverage_color(0, max_cov, delta))) new_colors = [] for i in reversed(range(len(colors))): if colors[i] not in new_colors: new_colors.insert(0, colors[i]) return new_colors def json_to_html(data): data = json.dumps(data, indent=2, sort_keys=True, ensure_ascii=False) def wrap_text(text): return '<span class="COVJsonText">{0}</span>'.format(text) def wrap_number(number): return '<span class="COVJsonNum">{0}</span>'.format(number) def wrap_string(string): return '<span class="COVJsonLine">{0}</span><br>'.format(string) data_html = '' for line in data.split('\n'): line = line.replace('\t', ' ' * TAB_LENGTH).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') m = re.match('^(\s*)(\".*?\"):\s(.*)$', line) if m is not None: if m.group(3) in {'{', '['}: new_line = '{0}{1}: {2}'.format(m.group(1), wrap_text(m.group(2)), m.group(3)) data_html += wrap_string(new_line) continue m2 = re.match('^(\d.*?)(,?)$', m.group(3)) if m2 is not None: new_line = '{0}{1}: {2}{3}'.format( m.group(1), wrap_text(m.group(2)), wrap_number(m2.group(1)), m2.group(2) ) data_html += wrap_string(new_line) continue m2 = re.match('^(\".*?\")(,?)$', m.group(3)) if m2 is not None: new_line = '{0}{1}: {2}{3}'.format( m.group(1), wrap_text(m.group(2)), wrap_text(m2.group(1)), m2.group(2) ) data_html += wrap_string(new_line) continue m = re.match('^(\s*)(\".*\")(,?)$', line) if m is not None: new_line = '{0}{1}{2}'.format(m.group(1), wrap_text(m.group(2)), m.group(3)) data_html += wrap_string(new_line) continue m = re.match('^(\s*)(\d.*?)(,?)$', line) if m is not None: new_line = '{0}{1}{2}'.format(m.group(1), wrap_number(m.group(2)), m.group(3)) data_html += wrap_string(new_line) continue data_html += wrap_string(line) return data_html class GetCoverage: def __init__(self, report, cov_arch_id, with_data): self.report = report if cov_arch_id is None: self.cov_arch = self.report.coverages.order_by('identifier').first() else: self.cov_arch = CoverageArchive.objects.get(id=cov_arch_id, report=report) self.coverage_archives = self.report.coverages.order_by('identifier').values_list('id', 'identifier') self.job = self.report.root.job self.parents = get_parents(self.report) self._statistic = CoverageStatistics(self.cov_arch) self.statistic_table = self._statistic.table_data if self._statistic.first_file: self.first_file = GetCoverageSrcHTML(self.cov_arch, self._statistic.first_file, with_data) if with_data: self.data_statistic = DataStatistic(self.cov_arch.id).table_html class GetCoverageSrcHTML: def __init__(self, cov_arch, filename, with_data): self._cov_arch = cov_arch self.filename = os.path.normpath(filename).replace('\\', '/') try: self._covfile = CoverageFile.objects.get(archive=self._cov_arch, name=self.filename) except ObjectDoesNotExist: self._covfile = None self._with_data = with_data self._content = self.__get_arch_content() self._max_cov_line, self._max_cov_func, self._line_coverage, self._func_coverage = self.__get_coverage() self._is_comment = False self._is_text = False self._text_quote = None self._total_lines = 1 self._lines_with_data = set() self.data_html = '' if self._with_data: self.data_html = self.__get_data() self.src_html = self.__get_source_html() self.legend = loader.get_template('reports/coverage/cov_legend.html').render({'legend': { 'lines': get_legend(self._max_cov_line, 'lines', 5, True), 'funcs': get_legend(self._max_cov_func, 'funcs', 5, False) }}) def __get_arch_content(self): with self._cov_arch.archive as fp: if os.path.splitext(fp.name)[-1] != '.zip': raise ValueError('Archive type is not supported') with zipfile.ZipFile(fp, 'r') as zfp: return zfp.read(self.filename).decode('utf8') def __get_coverage(self): if self._covfile is None: return 0, 0, {}, {} max_line_cov = 0 max_func_cov = 0 line_data = {} func_data = {} with self._covfile.file.file as fp: coverage = json.loads(fp.read().decode('utf8')) for linecov in coverage[0]: max_line_cov = max(max_line_cov, linecov[0]) for line in linecov[1]: if isinstance(line, int): line_data[line] = linecov[0] elif isinstance(line, list) and len(line) == 2: for i in range(*line): line_data[i] = linecov[0] line_data[line[1]] = linecov[0] for linecov in coverage[1]: max_func_cov = max(max_func_cov, linecov[0]) for line in linecov[1]: if isinstance(line, int): func_data[line] = linecov[0] elif isinstance(line, list) and len(line) == 2: for i in range(*line): func_data[i] = linecov[0] func_data[line[1]] = linecov[0] return max_line_cov, max_func_cov, line_data, func_data def __get_data(self): data_map = [] data_ids = set() last_i = -1 if self._covfile is not None: for data_id, dataname, line in CoverageData.objects.filter(covfile=self._covfile)\ .values_list('data_id', 'data__name', 'line').order_by('line', 'data__name'): self._lines_with_data.add(line) if last_i >= 0 and data_map[last_i]['line'] == line: data_map[last_i]['content'].append([dataname, data_id, False]) else: data_map.append({'line': line, 'content': [[dataname, data_id, True]]}) last_i += 1 data_ids.add(data_id) return loader.get_template('reports/coverage/coverageData.html').render({ 'data_map': data_map, 'data_values': CoverageDataValue.objects.filter(id__in=data_ids).values_list('id', 'value') }) def __get_source_html(self): data = [] cnt = 1 lines = self._content.split('\n') self._total_lines = len(str(len(lines))) for line in lines: line = line.replace('\t', ' ' * TAB_LENGTH).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') data.append(self.__get_line_data(cnt, self.__parse_line(line))) cnt += 1 return loader.get_template('reports/coverage/coverageFile.html').render({'linedata': data}) def __get_line_data(self, line, code): lineclass = None line_num = { 'class': 'COVLine', 'static': True, 'data': [], 'content': (' ' * (self._total_lines - len(str(line))) + str(line)) } code = {'class': 'COVCode', 'content': code} if line in self._line_coverage: line_num['data'].append(('number', self._line_coverage[line])) code['color'] = coverage_color(self._line_coverage[line], self._max_cov_line) code['data'] = [('number', self._line_coverage[line])] if line in self._lines_with_data: line_num['data'].append(('line', line)) func_cov = {'class': 'COVIsFC', 'static': True, 'content': '<i class="ui mini icon"></i>'} if line in self._func_coverage: func_cov['data'] = [('number', self._func_coverage[line])] if self._func_coverage[line] == 0: lineclass = 'func-uncovered' func_cov['content'] = '<i class="ui mini red remove icon"></i>' else: lineclass = 'func-covered' func_cov['content'] = '<i class="ui mini blue checkmark icon"></i>' func_cov['color'] = coverage_color(self._func_coverage[line], self._max_cov_func, 40) linedata = [line_num] if self._with_data and line in self._lines_with_data: line_num['content'] = '<a class="COVLineLink">%s</a>' % line_num['content'] line_num['class'] += ' COVWithData' linedata.append(func_cov) linedata.append(code) return {'linedata': linedata, 'lineclass': lineclass} def __parse_line(self, line): if self._is_comment: m = re.match('(.*?)\*/(.*)', line) if m is None: return self.__wrap_line(line, 'comment') self._is_comment = False new_line = self.__wrap_line(m.group(1) + '*/', 'comment') return new_line + self.__parse_line(m.group(2)) if self._is_text: before, after = self.__parse_text(line) if after is None: return self.__wrap_line(before, 'text') self._is_text = False return self.__wrap_line(before, 'text') + self.__parse_line(after) m = re.match('(.*?)/\*(.*)', line) if m is not None and m.group(1).find('"') == -1 and m.group(1).find("'") == -1: new_line = self.__parse_line(m.group(1)) self._is_comment = True new_line += self.__parse_line('/*' + m.group(2)) return new_line m = re.match('(.*?)//(.*)', line) if m is not None and m.group(1).find('"') == -1 and m.group(1).find("'") == -1: new_line = self.__parse_line(m.group(1)) new_line += self.__wrap_line('//' + m.group(2), 'comment') return new_line m = re.match('(.*?)([\'\"])(.*)', line) if m is not None: new_line = self.__parse_line(m.group(1)) self._text_quote = m.group(2) before, after = self.__parse_text(m.group(3)) new_line += self.__wrap_line(self._text_quote + before, 'text') if after is None: self._is_text = True return new_line self._is_text = False return new_line + self.__parse_line(after) m = re.match("(.*\W)(\d+)(\W.*)", line) if m is not None: new_line = self.__parse_line(m.group(1)) new_line += self.__wrap_line(m.group(2), 'number') new_line += self.__parse_line(m.group(3)) return new_line words = re.split('([^a-zA-Z0-9-_#])', line) new_words = [] for word in words: if word in KEY1_WORDS: new_words.append(self.__wrap_line(word, 'key1')) elif word in KEY2_WORDS: new_words.append(self.__wrap_line(word, 'key2')) else: new_words.append(word) return ''.join(new_words) def __parse_text(self, text): escaped = False before = '' after = '' end_found = False for c in text: if end_found: after += c continue if not escaped and c == self._text_quote: end_found = True elif escaped: escaped = False elif c == '\\': escaped = True before += c if end_found: return before, after return before, None def __wrap_line(self, line, text_type, line_id=None): self.__is_not_used() if text_type not in SOURCE_CLASSES: return line if line_id is not None: return '<span id="%s" class="%s">%s</span>' % (line_id, SOURCE_CLASSES[text_type], line) return '<span class="%s">%s</span>' % (SOURCE_CLASSES[text_type], line) def __is_not_used(self): pass class CoverageStatistics: def __init__(self, cov_arch): self.cov_arch = cov_arch self.first_file = None self.table_data = self.__get_table_data() def __get_table_data(self): coverage = {} for c in CoverageFile.objects.filter(archive=self.cov_arch): coverage[c.name] = c hide_all = False if len(coverage) > 30: hide_all = True cnt = 0 parents = {} for fname in coverage: path = fname.split('/') for i in range(len(path)): cnt += 1 curr_path = '/'.join(path[:(i + 1)]) if curr_path not in parents: parent_id = parent = None if i > 0: parent = '/'.join(path[:i]) parent_id = parents[parent]['id'] parents[curr_path] = { 'id': cnt, 'title': path[i], 'parent': parent, 'parent_id': parent_id, 'display': False, 'is_dir': (i != len(path) - 1), 'path': curr_path, 'lines': {'covered': 0, 'total': 0, 'percent': '-'}, 'funcs': {'covered': 0, 'total': 0, 'percent': '-'} } for fname in coverage: display = False if not hide_all and any(fname.endswith(x) for x in ['.i', '.c', '.c.aux']): display = True covered_lines = coverage[fname].covered_lines total_lines = coverage[fname].total_lines covered_funcs = coverage[fname].covered_funcs total_funcs = coverage[fname].total_funcs parent = fname while parent is not None: parents[parent]['lines']['covered'] += covered_lines parents[parent]['lines']['total'] += total_lines parents[parent]['funcs']['covered'] += covered_funcs parents[parent]['funcs']['total'] += total_funcs if parents[parent]['is_dir'] and display or parents[parent]['parent'] is None and not hide_all: parents[parent]['display'] = True parent = parents[parent]['parent'] for fname in parents: if parents[fname]['lines']['total'] > 0: div = parents[fname]['lines']['covered'] / parents[fname]['lines']['total'] parents[fname]['lines']['percent'] = '%s%%' % int(100 * div) color_id = int(div * len(TABLE_STAT_COLOR)) if color_id >= len(TABLE_STAT_COLOR): color_id = len(TABLE_STAT_COLOR) - 1 elif color_id < 0: color_id = 0 parents[fname]['lines']['color'] = TABLE_STAT_COLOR[color_id] if parents[fname]['funcs']['total'] > 0: div = parents[fname]['funcs']['covered'] / parents[fname]['funcs']['total'] parents[fname]['funcs']['percent'] = '%s%%' % int(100 * div) color_id = int(div * len(TABLE_STAT_COLOR)) if color_id >= len(TABLE_STAT_COLOR): color_id = len(TABLE_STAT_COLOR) - 1 elif color_id < 0: color_id = 0 parents[fname]['funcs']['color'] = TABLE_STAT_COLOR[color_id] other_data = list(sorted(parents.values(), key=lambda x: (not x['is_dir'], x['title']))) def __get_all_children(file_info, depth): children = [] if not file_info['is_dir']: return children for fi in other_data: if fi['parent_id'] == file_info['id']: fi['indent'] = ' ' * depth children.append(fi) children.extend(__get_all_children(fi, depth + 1)) return children first_lvl = [] for root_name in ROOT_DIRS_ORDER: if root_name in parents: first_lvl.append(parents[root_name]) ordered_data = [] for fd in first_lvl: fd['display'] = True ordered_data.append(fd) ordered_data.extend(__get_all_children(fd, 1)) for fd in ordered_data: if hide_all: fd['display'] = True if not fd['is_dir'] and parents[fd['parent']]['display']: self.first_file = fd['path'] break return ordered_data def __is_not_used(self): pass class DataStatistic: def __init__(self, cov_arch_id): self.table_html = loader.get_template('reports/coverage/coverageDataStatistics.html')\ .render({'DataStatistics': self.__get_data_stat(cov_arch_id)}) def __get_data_stat(self, cov_arch_id): self.__is_not_used() data = [] active = True for stat in CoverageDataStatistics.objects.filter(archive_id=cov_arch_id).order_by('name'): with stat.data.file as fp: data.append({'tab': stat.name, 'active': active, 'content': fp.read().decode('utf8')}) active = False return data def __is_not_used(self): pass class CreateCoverageFiles: def __init__(self, cov_arch, coverage): self._cov_arch = cov_arch self._coverage = coverage self._line_coverage = {} self._func_coverage = {} self._coverage_stat = {} self.__get_coverage_data() self.__create_files() self.files = self.__get_saved_files() def __get_coverage_data(self): for data in self._coverage['line coverage']: for fname in data[1]: if fname not in self._line_coverage: self._line_coverage[fname] = [] self._coverage_stat[fname] = [0, 0, 0, 0] self._line_coverage[fname].append([data[0], data[1][fname]]) if data[0] > 0: self._coverage_stat[fname][0] += self.__num_of_lines(data[1][fname]) self._coverage_stat[fname][1] += self.__num_of_lines(data[1][fname]) for data in self._coverage['function coverage']['coverage']: for fname in data[1]: if fname not in self._func_coverage: self._func_coverage[fname] = [] if fname not in self._coverage_stat: self._coverage_stat[fname] = [0, 0, 0, 0] self._func_coverage[fname].append([data[0], data[1][fname]]) if data[0] > 0: self._coverage_stat[fname][2] += self.__num_of_lines(data[1][fname]) self._coverage_stat[fname][3] += self.__num_of_lines(data[1][fname]) @transaction.atomic def __create_files(self): for fname in set(self._line_coverage) | set(self._func_coverage): file_coverage = StringIO(json.dumps( [self._line_coverage.get(fname, []), self._func_coverage.get(fname, [])] )) covfile = CoverageFile( archive=self._cov_arch, name=fname, covered_lines=self._coverage_stat[fname][0], total_lines=self._coverage_stat[fname][1], covered_funcs=self._coverage_stat[fname][2], total_funcs=self._coverage_stat[fname][3] ) covfile.file.save('coverage.json', NewFile(file_coverage)) def __num_of_lines(self, lines): self.__is_not_used() num = 0 for l in lines: if isinstance(l, int): num += 1 elif isinstance(l, list) and len(l) == 2 and isinstance(l[0], int) \ and isinstance(l[1], int) and l[0] <= l[1]: num += l[1] - l[0] + 1 return num def __get_saved_files(self): files = {} for f_id, fname in CoverageFile.objects.filter(archive=self._cov_arch).values_list('id', 'name'): files[fname] = f_id return files def __is_not_used(self): pass class FillCoverageCache: def __init__(self, report): for cov_arch, data in self.__get_coverage_data(report): self._data = data self._cov_arch = cov_arch self._files = CreateCoverageFiles(self._cov_arch, self._data).files del self._data['line coverage'], self._data['function coverage'] self.__fill_data() def __get_coverage_data(self, report): self.__is_not_used() for cov_arch in report.coverages.all(): with cov_arch.archive as fp: with zipfile.ZipFile(fp, 'r') as zfp: yield cov_arch, json.loads(zfp.read(COVERAGE_FILE).decode('utf8')) def __fill_data(self): covdata = [] data_values = {} for vid, dataname, hashsum in CoverageDataValue.objects.values_list('id', 'name', 'hashsum'): data_values[(dataname, hashsum)] = vid for dataname in self._data: covdatastat = CoverageDataStatistics(archive=self._cov_arch, name=dataname) covdatastat.data.save('CoverageData.html', NewFile(StringIO( json_to_html(self._data[dataname]['statistics']) ))) for data in self._data[dataname]['values']: dataval = json_to_html(data[0]) hashsum = hashlib.md5(dataval.encode('utf8')).hexdigest() if (dataname, hashsum) not in data_values: data_values[(dataname, hashsum)] = CoverageDataValue.objects\ .create(hashsum=hashsum, name=dataname, value=dataval).id data_id = data_values[(dataname, hashsum)] for fname in data[1]: if fname not in self._files: self._files[fname] = CoverageFile.objects.create(archive=self._cov_arch, name=fname).id for line in data[1][fname]: if isinstance(line, int): covdata.append(CoverageData(covfile_id=self._files[fname], line=line, data_id=data_id)) elif isinstance(line, list) and len(line) == 2: for i in range(*line): covdata.append(CoverageData(covfile_id=self._files[fname], line=i, data_id=data_id)) covdata.append(CoverageData(covfile_id=self._files[fname], line=line[1], data_id=data_id)) CoverageData.objects.bulk_create(covdata) def __is_not_used(self): pass
bridge/reports/coverage.py
import os import re import json import hashlib import zipfile from io import StringIO from django.core.exceptions import ObjectDoesNotExist from django.core.files import File as NewFile from django.db import transaction from django.template import loader from bridge.vars import COVERAGE_FILE from reports.models import CoverageFile, CoverageData, CoverageDataValue, CoverageDataStatistics, CoverageArchive from reports.utils import get_parents from reports.etv import TAB_LENGTH, KEY1_WORDS, KEY2_WORDS SOURCE_CLASSES = { 'comment': "COVComment", 'number': "COVNumber", 'text': "COVText", 'key1': "COVKey1", 'key2': "COVKey2" } COLOR = { 'grey': '#bcbcbc', 'purple': '#a478e9', 'lightgrey': '#f4f7ff' } TABLE_STAT_COLOR = ['#f18fa6', '#f1c0b2', '#f9e19b', '#e4f495', '#acf1a8'] ROOT_DIRS_ORDER = ['source files', 'specifications', 'generated models'] def coverage_color(curr_cov, max_cov, delta=0): if curr_cov == 0: return 'rgb(200, 190, 255)' green = 140 + int(100 * (1 - curr_cov / max_cov)) blue = 140 + int(100 * (1 - curr_cov / max_cov)) - delta return 'rgb(255, %s, %s)' % (green, blue) def get_legend(max_cov, leg_type, number=5, with_zero=False): if max_cov == 0: return [] elif max_cov > 100: rounded_max = 100 * int(max_cov/100) else: rounded_max = max_cov delta = 0 if leg_type == 'funcs': delta = 40 colors = [] divisions = number - 1 for i in reversed(range(divisions)): curr_cov = int(i * rounded_max / divisions) if curr_cov == 0: curr_cov = 1 colors.append((curr_cov, coverage_color(curr_cov, max_cov, delta))) colors.insert(0, (rounded_max, coverage_color(rounded_max, max_cov, delta))) if with_zero: colors.append((0, coverage_color(0, max_cov, delta))) new_colors = [] for i in reversed(range(len(colors))): if colors[i] not in new_colors: new_colors.insert(0, colors[i]) return new_colors def json_to_html(data): data = json.dumps(data, indent=2, sort_keys=True, ensure_ascii=False) def wrap_text(text): return '<span class="COVJsonText">{0}</span>'.format(text) def wrap_number(number): return '<span class="COVJsonNum">{0}</span>'.format(number) def wrap_string(string): return '<span class="COVJsonLine">{0}</span><br>'.format(string) data_html = '' for line in data.split('\n'): line = line.replace('\t', ' ' * TAB_LENGTH).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') m = re.match('^(\s*)(\".*?\"):\s(.*)$', line) if m is not None: if m.group(3) in {'{', '['}: new_line = '{0}{1}: {2}'.format(m.group(1), wrap_text(m.group(2)), m.group(3)) data_html += wrap_string(new_line) continue m2 = re.match('^(\d.*?)(,?)$', m.group(3)) if m2 is not None: new_line = '{0}{1}: {2}{3}'.format( m.group(1), wrap_text(m.group(2)), wrap_number(m2.group(1)), m2.group(2) ) data_html += wrap_string(new_line) continue m2 = re.match('^(\".*?\")(,?)$', m.group(3)) if m2 is not None: new_line = '{0}{1}: {2}{3}'.format( m.group(1), wrap_text(m.group(2)), wrap_text(m2.group(1)), m2.group(2) ) data_html += wrap_string(new_line) continue m = re.match('^(\s*)(\".*\")(,?)$', line) if m is not None: new_line = '{0}{1}{2}'.format(m.group(1), wrap_text(m.group(2)), m.group(3)) data_html += wrap_string(new_line) continue m = re.match('^(\s*)(\d.*?)(,?)$', line) if m is not None: new_line = '{0}{1}{2}'.format(m.group(1), wrap_number(m.group(2)), m.group(3)) data_html += wrap_string(new_line) continue data_html += wrap_string(line) return data_html class GetCoverage: def __init__(self, report, cov_arch_id, with_data): self.report = report if cov_arch_id is None: self.cov_arch = self.report.coverages.order_by('identifier').first() else: self.cov_arch = CoverageArchive.objects.get(id=cov_arch_id, report=report) self.coverage_archives = self.report.coverages.order_by('identifier').values_list('id', 'identifier') self.job = self.report.root.job self.parents = get_parents(self.report) self._statistic = CoverageStatistics(self.cov_arch) self.statistic_table = self._statistic.table_data if self._statistic.first_file: self.first_file = GetCoverageSrcHTML(self.cov_arch, self._statistic.first_file, with_data) if with_data: self.data_statistic = DataStatistic(self.cov_arch.id).table_html class GetCoverageSrcHTML: def __init__(self, cov_arch, filename, with_data): self._cov_arch = cov_arch self.filename = os.path.normpath(filename).replace('\\', '/') try: self._covfile = CoverageFile.objects.get(archive=self._cov_arch, name=self.filename) except ObjectDoesNotExist: self._covfile = None self._with_data = with_data self._content = self.__get_arch_content() self._max_cov_line, self._max_cov_func, self._line_coverage, self._func_coverage = self.__get_coverage() self._is_comment = False self._is_text = False self._text_quote = None self._total_lines = 1 self._lines_with_data = set() self.data_html = '' if self._with_data: self.data_html = self.__get_data() self.src_html = self.__get_source_html() self.legend = loader.get_template('reports/coverage/cov_legend.html').render({'legend': { 'lines': get_legend(self._max_cov_line, 'lines', 5, True), 'funcs': get_legend(self._max_cov_func, 'funcs', 5, False) }}) def __get_arch_content(self): with self._cov_arch.archive as fp: if os.path.splitext(fp.name)[-1] != '.zip': raise ValueError('Archive type is not supported') with zipfile.ZipFile(fp, 'r') as zfp: return zfp.read(self.filename).decode('utf8') def __get_coverage(self): if self._covfile is None: return 0, 0, {}, {} max_line_cov = 0 max_func_cov = 0 line_data = {} func_data = {} with self._covfile.file.file as fp: coverage = json.loads(fp.read().decode('utf8')) for linecov in coverage[0]: max_line_cov = max(max_line_cov, linecov[0]) for line in linecov[1]: if isinstance(line, int): line_data[line] = linecov[0] elif isinstance(line, list) and len(line) == 2: for i in range(*line): line_data[i] = linecov[0] line_data[line[1]] = linecov[0] for linecov in coverage[1]: max_func_cov = max(max_func_cov, linecov[0]) for line in linecov[1]: if isinstance(line, int): func_data[line] = linecov[0] elif isinstance(line, list) and len(line) == 2: for i in range(*line): func_data[i] = linecov[0] func_data[line[1]] = linecov[0] return max_line_cov, max_func_cov, line_data, func_data def __get_data(self): data_map = [] data_ids = set() last_i = -1 if self._covfile is not None: for data_id, dataname, line in CoverageData.objects.filter(covfile=self._covfile)\ .values_list('data_id', 'data__name', 'line').order_by('line', 'data__name'): self._lines_with_data.add(line) if last_i >= 0 and data_map[last_i]['line'] == line: data_map[last_i]['content'].append([dataname, data_id, False]) else: data_map.append({'line': line, 'content': [[dataname, data_id, True]]}) last_i += 1 data_ids.add(data_id) return loader.get_template('reports/coverage/coverageData.html').render({ 'data_map': data_map, 'data_values': CoverageDataValue.objects.filter(id__in=data_ids).values_list('id', 'value') }) def __get_source_html(self): data = [] cnt = 1 lines = self._content.split('\n') self._total_lines = len(str(len(lines))) for line in lines: line = line.replace('\t', ' ' * TAB_LENGTH).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') data.append(self.__get_line_data(cnt, self.__parse_line(line))) cnt += 1 return loader.get_template('reports/coverage/coverageFile.html').render({'linedata': data}) def __get_line_data(self, line, code): lineclass = None line_num = { 'class': 'COVLine', 'static': True, 'data': [], 'content': (' ' * (self._total_lines - len(str(line))) + str(line)) } code = {'class': 'COVCode', 'content': code} if line in self._line_coverage: line_num['data'].append(('number', self._line_coverage[line])) code['color'] = coverage_color(self._line_coverage[line], self._max_cov_line) code['data'] = [('number', self._line_coverage[line])] if line in self._lines_with_data: line_num['data'].append(('line', line)) func_cov = {'class': 'COVIsFC', 'static': True, 'content': '<i class="ui mini icon"></i>'} if line in self._func_coverage: func_cov['data'] = [('number', self._func_coverage[line])] if self._func_coverage[line] == 0: lineclass = 'func-uncovered' func_cov['content'] = '<i class="ui mini red remove icon"></i>' else: lineclass = 'func-covered' func_cov['content'] = '<i class="ui mini blue checkmark icon"></i>' func_cov['color'] = coverage_color(self._func_coverage[line], self._max_cov_func, 40) linedata = [line_num] if self._with_data and line in self._lines_with_data: line_num['content'] = '<a class="COVLineLink">%s</a>' % line_num['content'] line_num['class'] += ' COVWithData' linedata.append(func_cov) linedata.append(code) return {'linedata': linedata, 'lineclass': lineclass} def __parse_line(self, line): if self._is_comment: m = re.match('(.*?)\*/(.*)', line) if m is None: return self.__wrap_line(line, 'comment') self._is_comment = False new_line = self.__wrap_line(m.group(1) + '*/', 'comment') return new_line + self.__parse_line(m.group(2)) if self._is_text: before, after = self.__parse_text(line) if after is None: return self.__wrap_line(before, 'text') self._is_text = False return self.__wrap_line(before, 'text') + self.__parse_line(after) m = re.match('(.*?)/\*(.*)', line) if m is not None and m.group(1).find('"') == -1 and m.group(1).find("'") == -1: new_line = self.__parse_line(m.group(1)) self._is_comment = True new_line += self.__parse_line('/*' + m.group(2)) return new_line m = re.match('(.*?)//(.*)', line) if m is not None and m.group(1).find('"') == -1 and m.group(1).find("'") == -1: new_line = self.__parse_line(m.group(1)) new_line += self.__wrap_line('//' + m.group(2), 'comment') return new_line m = re.match('(.*?)([\'\"])(.*)', line) if m is not None: new_line = self.__parse_line(m.group(1)) self._text_quote = m.group(2) before, after = self.__parse_text(m.group(3)) new_line += self.__wrap_line(self._text_quote + before, 'text') if after is None: self._is_text = True return new_line self._is_text = False return new_line + self.__parse_line(after) m = re.match("(.*\W)(\d+)(\W.*)", line) if m is not None: new_line = self.__parse_line(m.group(1)) new_line += self.__wrap_line(m.group(2), 'number') new_line += self.__parse_line(m.group(3)) return new_line words = re.split('([^a-zA-Z0-9-_#])', line) new_words = [] for word in words: if word in KEY1_WORDS: new_words.append(self.__wrap_line(word, 'key1')) elif word in KEY2_WORDS: new_words.append(self.__wrap_line(word, 'key2')) else: new_words.append(word) return ''.join(new_words) def __parse_text(self, text): escaped = False before = '' after = '' end_found = False for c in text: if end_found: after += c continue if not escaped and c == self._text_quote: end_found = True elif escaped: escaped = False elif c == '\\': escaped = True before += c if end_found: return before, after return before, None def __wrap_line(self, line, text_type, line_id=None): self.__is_not_used() if text_type not in SOURCE_CLASSES: return line if line_id is not None: return '<span id="%s" class="%s">%s</span>' % (line_id, SOURCE_CLASSES[text_type], line) return '<span class="%s">%s</span>' % (SOURCE_CLASSES[text_type], line) def __is_not_used(self): pass class CoverageStatistics: def __init__(self, cov_arch): self.cov_arch = cov_arch self.first_file = None self.table_data = self.__get_table_data() def __get_table_data(self): coverage = {} for c in CoverageFile.objects.filter(archive=self.cov_arch): coverage[c.name] = c hide_all = False if len(coverage) > 30: hide_all = True cnt = 0 parents = {} for fname in coverage: path = fname.split('/') for i in range(len(path)): cnt += 1 curr_path = '/'.join(path[:(i + 1)]) if curr_path not in parents: parent_id = parent = None if i > 0: parent = '/'.join(path[:i]) parent_id = parents[parent]['id'] parents[curr_path] = { 'id': cnt, 'title': path[i], 'parent': parent, 'parent_id': parent_id, 'display': False, 'is_dir': (i != len(path) - 1), 'path': curr_path, 'lines': {'covered': 0, 'total': 0, 'percent': '-'}, 'funcs': {'covered': 0, 'total': 0, 'percent': '-'} } for fname in coverage: display = False if not hide_all and any(fname.endswith(x) for x in ['.i', '.c', '.c.aux']): display = True covered_lines = coverage[fname].covered_lines total_lines = coverage[fname].total_lines covered_funcs = coverage[fname].covered_funcs total_funcs = coverage[fname].total_funcs parent = fname while parent is not None: parents[parent]['lines']['covered'] += covered_lines parents[parent]['lines']['total'] += total_lines parents[parent]['funcs']['covered'] += covered_funcs parents[parent]['funcs']['total'] += total_funcs if parents[parent]['is_dir'] and display or parents[parent]['parent'] is None and not hide_all: parents[parent]['display'] = True parent = parents[parent]['parent'] for fname in parents: if parents[fname]['lines']['total'] > 0: div = parents[fname]['lines']['covered'] / parents[fname]['lines']['total'] parents[fname]['lines']['percent'] = '%s%%' % int(100 * div) color_id = int(div * len(TABLE_STAT_COLOR)) if color_id >= len(TABLE_STAT_COLOR): color_id = len(TABLE_STAT_COLOR) - 1 elif color_id < 0: color_id = 0 parents[fname]['lines']['color'] = TABLE_STAT_COLOR[color_id] if parents[fname]['funcs']['total'] > 0: div = parents[fname]['funcs']['covered'] / parents[fname]['funcs']['total'] parents[fname]['funcs']['percent'] = '%s%%' % int(100 * div) color_id = int(div * len(TABLE_STAT_COLOR)) if color_id >= len(TABLE_STAT_COLOR): color_id = len(TABLE_STAT_COLOR) - 1 elif color_id < 0: color_id = 0 parents[fname]['funcs']['color'] = TABLE_STAT_COLOR[color_id] other_data = list(sorted(parents.values(), key=lambda x: (not x['is_dir'], x['title']))) def __get_all_children(file_info, depth): children = [] if not file_info['is_dir']: return children for fi in other_data: if fi['parent_id'] == file_info['id']: fi['indent'] = ' ' * depth children.append(fi) children.extend(__get_all_children(fi, depth + 1)) return children first_lvl = [] for root_name in ROOT_DIRS_ORDER: if root_name in parents: first_lvl.append(parents[root_name]) ordered_data = [] for fd in first_lvl: fd['display'] = True ordered_data.append(fd) ordered_data.extend(__get_all_children(fd, 1)) for fd in ordered_data: if hide_all: fd['display'] = True if not fd['is_dir'] and parents[fd['parent']]['display']: self.first_file = fd['path'] break return ordered_data def __is_not_used(self): pass class DataStatistic: def __init__(self, cov_arch_id): self.table_html = loader.get_template('reports/coverage/coverageDataStatistics.html')\ .render({'DataStatistics': self.__get_data_stat(cov_arch_id)}) def __get_data_stat(self, cov_arch_id): self.__is_not_used() data = [] active = True for stat in CoverageDataStatistics.objects.filter(archive_id=cov_arch_id).order_by('name'): with stat.data.file as fp: data.append({'tab': stat.name, 'active': active, 'content': fp.read().decode('utf8')}) active = False return data def __is_not_used(self): pass class CreateCoverageFiles: def __init__(self, cov_arch, coverage): self._cov_arch = cov_arch self._coverage = coverage self._line_coverage = {} self._func_coverage = {} self._coverage_stat = {} self.__get_coverage_data() self.__create_files() self.files = self.__get_saved_files() def __get_coverage_data(self): for data in self._coverage['line coverage']: for fname in data[1]: if fname not in self._line_coverage: self._line_coverage[fname] = [] self._coverage_stat[fname] = [0, 0, 0, 0] self._line_coverage[fname].append([data[0], data[1][fname]]) if data[0] > 0: self._coverage_stat[fname][0] += self.__num_of_lines(data[1][fname]) self._coverage_stat[fname][1] += self.__num_of_lines(data[1][fname]) for data in self._coverage['function coverage']['coverage']: for fname in data[1]: if fname not in self._func_coverage: self._func_coverage[fname] = [] if fname not in self._coverage_stat: self._coverage_stat[fname] = [0, 0, 0, 0] self._func_coverage[fname].append([data[0], data[1][fname]]) if data[0] > 0: self._coverage_stat[fname][2] += self.__num_of_lines(data[1][fname]) self._coverage_stat[fname][3] += self.__num_of_lines(data[1][fname]) @transaction.atomic def __create_files(self): for fname in set(self._line_coverage) | set(self._func_coverage): file_coverage = StringIO(json.dumps( [self._line_coverage.get(fname, []), self._func_coverage.get(fname, [])] )) covfile = CoverageFile( archive=self._cov_arch, name=fname, covered_lines=self._coverage_stat[fname][0], total_lines=self._coverage_stat[fname][1], covered_funcs=self._coverage_stat[fname][2], total_funcs=self._coverage_stat[fname][3] ) covfile.file.save('coverage.json', NewFile(file_coverage)) def __num_of_lines(self, lines): self.__is_not_used() num = 0 for l in lines: if isinstance(l, int): num += 1 elif isinstance(l, list) and len(l) == 2 and isinstance(l[0], int) \ and isinstance(l[1], int) and l[0] <= l[1]: num += l[1] - l[0] + 1 return num def __get_saved_files(self): files = {} for f_id, fname in CoverageFile.objects.filter(archive=self._cov_arch).values_list('id', 'name'): files[fname] = f_id return files def __is_not_used(self): pass class FillCoverageCache: def __init__(self, report): for cov_arch, data in self.__get_coverage_data(report): self._data = data self._cov_arch = cov_arch self._files = CreateCoverageFiles(self._cov_arch, self._data).files del self._data['line coverage'], self._data['function coverage'] self.__fill_data() def __get_coverage_data(self, report): self.__is_not_used() for cov_arch in report.coverages.all(): with cov_arch.archive as fp: with zipfile.ZipFile(fp, 'r') as zfp: yield cov_arch, json.loads(zfp.read(COVERAGE_FILE).decode('utf8')) def __fill_data(self): covdata = [] data_values = {} for vid, dataname, hashsum in CoverageDataValue.objects.values_list('id', 'name', 'hashsum'): data_values[(dataname, hashsum)] = vid for dataname in self._data: covdatastat = CoverageDataStatistics(archive=self._cov_arch, name=dataname) covdatastat.data.save('CoverageData.html', NewFile(StringIO( json_to_html(self._data[dataname]['statistics']) ))) for data in self._data[dataname]['values']: dataval = json_to_html(data[0]) hashsum = hashlib.md5(dataval.encode('utf8')).hexdigest() if (dataname, hashsum) not in data_values: data_values[(dataname, hashsum)] = CoverageDataValue.objects\ .create(hashsum=hashsum, name=dataname, value=dataval).id data_id = data_values[(dataname, hashsum)] for fname in data[1]: if fname not in self._files: self._files[fname] = CoverageFile.objects.create(archive=self._cov_arch, name=fname).id for line in data[1][fname]: if isinstance(line, int): covdata.append(CoverageData(covfile_id=self._files[fname], line=line, data_id=data_id)) elif isinstance(line, list) and len(line) == 2: for i in range(*line): covdata.append(CoverageData(covfile_id=self._files[fname], line=i, data_id=data_id)) covdata.append(CoverageData(covfile_id=self._files[fname], line=line[1], data_id=data_id)) CoverageData.objects.bulk_create(covdata) def __is_not_used(self): pass
0.380644
0.142739
from sklearn.model_selection import train_test_split from sklearn.metrics import plot_confusion_matrix from sklearn.naive_bayes import GaussianNB from matplotlib import pylab import pandas as pd import argparse from eolearn.core import LoadTask from plasticfinder.tasks.local_norm import LocalNormalization from plasticfinder.class_defs import catMap import matplotlib.pyplot as plt import joblib import math def train_model_and_test(x_train,y_train,x_test,y_test,model=None, model_name=None): ''' Will train a model with the specified data and save the results to the specified folder. Parameters: x_train: A DataFrame or np array containing the training features y_train: A DataFrame or np array containing the training labels x_test: A DataFrame or np array containing the test features y_test: A DataFrame or np array containing the test labels model: A scikit learn like model to use, defaults to GaussianNB model_name: A name used to save the model in the models folder. Returns: Nothing but will create a directiory in models/{model_name} which includes a joblib serialized version of the trained model and some plots that show the performance of the model. ''' if(model_name): models_dir = Path('models') model_dir = models_dir / model_name model_dir.mkdir(exist_ok=True) if(model ==None): model = GaussianNB() model.fit(x_train, y_train) fig,axs = plt.subplots(ncols=2, nrows=2, figsize=(15,10)) axs = axs.flatten() plot_confusion_matrix(model,x_train, y_train,ax=axs[0], normalize='true') axs[0].set_xticklabels(list(cats.keys())) axs[0].set_yticklabels(list(cats.keys())) axs[0].xaxis.set_tick_params(rotation=45) plot_confusion_matrix(model,x_test, y_test,ax=axs[1], normalize='true') axs[1].set_xticklabels(list(cats.keys())) axs[1].set_yticklabels(list(cats.keys())) axs[1].xaxis.set_tick_params(rotation=45) plot_confusion_matrix(model,x_train, y_train,ax=axs[2], normalize=None) axs[2].set_xticklabels(list(cats.keys())) axs[2].set_yticklabels(list(cats.keys())) axs[2].xaxis.set_tick_params(rotation=45) plot_confusion_matrix(model,x_test, y_test,ax=axs[3], normalize=None) axs[3].set_xticklabels(list(cats.keys())) axs[3].set_yticklabels(list(cats.keys())) axs[3].xaxis.set_tick_params(rotation=45) plt.tight_layout() if(model_name): plt.savefig(model_dir / "confusion.png") plt.close(fig) joblib.dump(model,model_dir / "model.joblib") return model, model.predict(x_train), model.predict(x_test) def load_and_apply_local_norm(feature_index,method, window_size): '''A function to apply the local normalization step to each feature Parameters: feature (GeoSeries): A row from the GeoDataFrame produced by load_fetures_from_file feature_index (int): The integer used in saving the EOPatch to disk. method: One of 'min', 'median' or 'mean' indicating the type of averaging the window function should use. window_size: The extent in pixles that averaging should carried out over. Returns: EOPatch including the normalized data ''' load_task = LoadTask(path=f'data/Training/feature_{feature_index}/') local_norm = LocalNormalization() workflow = LinearWorkflow(load_task, local_norm) patch = workflow.execute({ local_norm: { 'method' : method, 'window_size': window_size } }) return patch def load_features(method,window_size): issue_files = [] train = pd.DataFrame() for feature_index in range(0, features.shape[0],1): try: feature = features.iloc[feature_index] patch =load_and_apply_local_norm(feature_index,method, window_size) data = patch['data'] bands_L1C = data['BANDS-S2-L1C'] bands_L2A = data['BANDS-S2-L2A'] center_x = math.floor(bands_L1C.shape[1]/2) center_y = math.floor(bands_L1C.shape[2]/2) ndvi = data['NDVI'][0,center_x,center_y,0] fdi = data['FDI'][0,center_x,center_y,0] normed_ndvi = data["NORM_NDVI"][0,center_x,center_y,0] normed_fdi = data["NORM_FDI"][0,center_x,center_y,0] spectra = data['BANDS-S2-L2A'][0,center_x,center_y,:] metrics = { 'ndvi': ndvi, 'label':feature.reduced_label, 'fdi': fdi, 'normed_ndvi' : normed_ndvi, 'normed_fdi': normed_fdi, 'Lat':feature.Lat, 'Lon': feature.Lon } band_cols_L1C = dict(zip( ['B01_L1C', 'B02_L1C', 'B03_L1C', 'B04_L1C', 'B05_L1C', 'B06_L1C', 'B07_L1C', 'B08_L1C', 'B08A_L1C', 'B09_L1C', 'B10_L1C', 'B11_L1C', 'B12_L1C'], bands_L1C[0,center_x,center_y,:] )) band_cols_L2A = dict(zip( ['B01_L2A', 'B02_L2A', 'B03_L2A', 'B04_L2A', 'B05_L2A', 'B06_L2A', 'B07_L2A', 'B08_L2A', 'B08A_L2A', 'B09_L2A', 'B10_L2A', 'B11_L2A', 'B12_L2A'], bands_L2A[0,center_x,center_y,:] )) train = train.append( pd.Series( {**metrics, **band_cols_L1C, **band_cols_L2A}, name =feature_index)) except Exception as e : print(e) issue_files.append(feature_index) return (train, issue_files) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Script to train a model using a specific noramization scheme') parser.add_argument('--method', type=str, help='Normalization method, one of median, mean or min') parser.add_argument('--window_size', type=int, help='Normalization window in meters') parser.add_argument('--name', type=str, help='Name for the model') args = parser.parse_args() window_size_px = math.floor(args.window_size/10.0) train, issue_files = load_features(args.method, window_size_px) train = train.assign(label_cat = train.label.apply(lambda x: catMap[x])) train = train.dropna(subset=['normed_ndvi']) X_train, X_test, Y_train, Y_test = train_test_split(train[['normed_ndvi','normed_fdi', 'B06_L1C', 'B07_L1C', 'B11_L1C']], train['label_cat'], stratify=train['label_cat']) model, prediction_train, prediction_test = train_model_and_test(X_train,Y_train, X_test,Y_test, model_name=args.name)
train_model.py
from sklearn.model_selection import train_test_split from sklearn.metrics import plot_confusion_matrix from sklearn.naive_bayes import GaussianNB from matplotlib import pylab import pandas as pd import argparse from eolearn.core import LoadTask from plasticfinder.tasks.local_norm import LocalNormalization from plasticfinder.class_defs import catMap import matplotlib.pyplot as plt import joblib import math def train_model_and_test(x_train,y_train,x_test,y_test,model=None, model_name=None): ''' Will train a model with the specified data and save the results to the specified folder. Parameters: x_train: A DataFrame or np array containing the training features y_train: A DataFrame or np array containing the training labels x_test: A DataFrame or np array containing the test features y_test: A DataFrame or np array containing the test labels model: A scikit learn like model to use, defaults to GaussianNB model_name: A name used to save the model in the models folder. Returns: Nothing but will create a directiory in models/{model_name} which includes a joblib serialized version of the trained model and some plots that show the performance of the model. ''' if(model_name): models_dir = Path('models') model_dir = models_dir / model_name model_dir.mkdir(exist_ok=True) if(model ==None): model = GaussianNB() model.fit(x_train, y_train) fig,axs = plt.subplots(ncols=2, nrows=2, figsize=(15,10)) axs = axs.flatten() plot_confusion_matrix(model,x_train, y_train,ax=axs[0], normalize='true') axs[0].set_xticklabels(list(cats.keys())) axs[0].set_yticklabels(list(cats.keys())) axs[0].xaxis.set_tick_params(rotation=45) plot_confusion_matrix(model,x_test, y_test,ax=axs[1], normalize='true') axs[1].set_xticklabels(list(cats.keys())) axs[1].set_yticklabels(list(cats.keys())) axs[1].xaxis.set_tick_params(rotation=45) plot_confusion_matrix(model,x_train, y_train,ax=axs[2], normalize=None) axs[2].set_xticklabels(list(cats.keys())) axs[2].set_yticklabels(list(cats.keys())) axs[2].xaxis.set_tick_params(rotation=45) plot_confusion_matrix(model,x_test, y_test,ax=axs[3], normalize=None) axs[3].set_xticklabels(list(cats.keys())) axs[3].set_yticklabels(list(cats.keys())) axs[3].xaxis.set_tick_params(rotation=45) plt.tight_layout() if(model_name): plt.savefig(model_dir / "confusion.png") plt.close(fig) joblib.dump(model,model_dir / "model.joblib") return model, model.predict(x_train), model.predict(x_test) def load_and_apply_local_norm(feature_index,method, window_size): '''A function to apply the local normalization step to each feature Parameters: feature (GeoSeries): A row from the GeoDataFrame produced by load_fetures_from_file feature_index (int): The integer used in saving the EOPatch to disk. method: One of 'min', 'median' or 'mean' indicating the type of averaging the window function should use. window_size: The extent in pixles that averaging should carried out over. Returns: EOPatch including the normalized data ''' load_task = LoadTask(path=f'data/Training/feature_{feature_index}/') local_norm = LocalNormalization() workflow = LinearWorkflow(load_task, local_norm) patch = workflow.execute({ local_norm: { 'method' : method, 'window_size': window_size } }) return patch def load_features(method,window_size): issue_files = [] train = pd.DataFrame() for feature_index in range(0, features.shape[0],1): try: feature = features.iloc[feature_index] patch =load_and_apply_local_norm(feature_index,method, window_size) data = patch['data'] bands_L1C = data['BANDS-S2-L1C'] bands_L2A = data['BANDS-S2-L2A'] center_x = math.floor(bands_L1C.shape[1]/2) center_y = math.floor(bands_L1C.shape[2]/2) ndvi = data['NDVI'][0,center_x,center_y,0] fdi = data['FDI'][0,center_x,center_y,0] normed_ndvi = data["NORM_NDVI"][0,center_x,center_y,0] normed_fdi = data["NORM_FDI"][0,center_x,center_y,0] spectra = data['BANDS-S2-L2A'][0,center_x,center_y,:] metrics = { 'ndvi': ndvi, 'label':feature.reduced_label, 'fdi': fdi, 'normed_ndvi' : normed_ndvi, 'normed_fdi': normed_fdi, 'Lat':feature.Lat, 'Lon': feature.Lon } band_cols_L1C = dict(zip( ['B01_L1C', 'B02_L1C', 'B03_L1C', 'B04_L1C', 'B05_L1C', 'B06_L1C', 'B07_L1C', 'B08_L1C', 'B08A_L1C', 'B09_L1C', 'B10_L1C', 'B11_L1C', 'B12_L1C'], bands_L1C[0,center_x,center_y,:] )) band_cols_L2A = dict(zip( ['B01_L2A', 'B02_L2A', 'B03_L2A', 'B04_L2A', 'B05_L2A', 'B06_L2A', 'B07_L2A', 'B08_L2A', 'B08A_L2A', 'B09_L2A', 'B10_L2A', 'B11_L2A', 'B12_L2A'], bands_L2A[0,center_x,center_y,:] )) train = train.append( pd.Series( {**metrics, **band_cols_L1C, **band_cols_L2A}, name =feature_index)) except Exception as e : print(e) issue_files.append(feature_index) return (train, issue_files) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Script to train a model using a specific noramization scheme') parser.add_argument('--method', type=str, help='Normalization method, one of median, mean or min') parser.add_argument('--window_size', type=int, help='Normalization window in meters') parser.add_argument('--name', type=str, help='Name for the model') args = parser.parse_args() window_size_px = math.floor(args.window_size/10.0) train, issue_files = load_features(args.method, window_size_px) train = train.assign(label_cat = train.label.apply(lambda x: catMap[x])) train = train.dropna(subset=['normed_ndvi']) X_train, X_test, Y_train, Y_test = train_test_split(train[['normed_ndvi','normed_fdi', 'B06_L1C', 'B07_L1C', 'B11_L1C']], train['label_cat'], stratify=train['label_cat']) model, prediction_train, prediction_test = train_model_and_test(X_train,Y_train, X_test,Y_test, model_name=args.name)
0.792825
0.592902
import os import yaml from yaml import Loader from pyul.coreUtils import DotifyDict from battalion.api import * waybill_template = """ echo " - {command}" function {command}() {{ docker pull {docker_id} docker run -it --rm -v ~/.{command}:/.{command} "{docker_id}" {command} $@ }} """ def construct_yaml_map(self, node): # Override the default string handling function # to always return unicode objects return DotifyDict(self.construct_mapping(node)) Loader.add_constructor(u'tag:yaml.org,2002:map', construct_yaml_map) def which(program): def is_exe(fpath): return os.path.isfile(fpath) and (os.access(fpath, os.X_OK) or os.access(fpath, os.F_OK)) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None class waybill(CLI): class State: version = "0.0.1" def get_waybill_dir(self): return os.path.dirname(self.state.config_file) def get_waybills(self): waybill_dir = self.get_waybill_dir() if os.path.exists(waybill_dir): files = [os.path.join(waybill_dir, f) for f in os.listdir(waybill_dir)] for f in [f for f in files if os.path.isfile(f) and os.path.splitext(f)[-1] == '.waybill']: yield f @command def create(cli, command, docker_id): """Creates waybill shims from a given command name and docker image""" content = waybill_template.format(command=command, docker_id=docker_id) waybill_dir = cli.get_waybill_dir() waybill_filename = os.path.join(waybill_dir, command + '.waybill') with open(waybill_filename, 'wb') as filehandle: filehandle.write(content) cli.log.info('Created waybill {0}'.format(waybill_filename)) @command def load(cli, yaml_filename): """Creates waybill shims from a given yaml file definiations""" """Expected Definition: - name: NAME docker_id: IMAGE - name: NAME docker_id: IMAGE """ with open(yaml_filename, 'rb') as filehandle: for waybill in yaml.load(filehandle.read()): cli.create(waybill.name, waybill.docker_id) @command def list(cli): """Prints out the list of known waybills""" for waybill in cli.get_waybills(): cli.log.info(waybill) @command def clear(cli): for waybill in cli.get_waybills(): cli.log.info('Removing waybill {0}'.format(waybill)) os.remove(waybill) @command def shellinit(cli): """Implements the waybill shims in the active shell""" output = 'eval echo "Initializing Waybills"' if which('docker') is None: raise ValueError("Unable to find program 'docker'. Please make sure it is installed and setup properly") for waybill in cli.get_waybills(): output += ' && source {0}'.format(waybill) return output def main(): waybill.main() if __name__ == "__main__": main()
waybill/cli.py
import os import yaml from yaml import Loader from pyul.coreUtils import DotifyDict from battalion.api import * waybill_template = """ echo " - {command}" function {command}() {{ docker pull {docker_id} docker run -it --rm -v ~/.{command}:/.{command} "{docker_id}" {command} $@ }} """ def construct_yaml_map(self, node): # Override the default string handling function # to always return unicode objects return DotifyDict(self.construct_mapping(node)) Loader.add_constructor(u'tag:yaml.org,2002:map', construct_yaml_map) def which(program): def is_exe(fpath): return os.path.isfile(fpath) and (os.access(fpath, os.X_OK) or os.access(fpath, os.F_OK)) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None class waybill(CLI): class State: version = "0.0.1" def get_waybill_dir(self): return os.path.dirname(self.state.config_file) def get_waybills(self): waybill_dir = self.get_waybill_dir() if os.path.exists(waybill_dir): files = [os.path.join(waybill_dir, f) for f in os.listdir(waybill_dir)] for f in [f for f in files if os.path.isfile(f) and os.path.splitext(f)[-1] == '.waybill']: yield f @command def create(cli, command, docker_id): """Creates waybill shims from a given command name and docker image""" content = waybill_template.format(command=command, docker_id=docker_id) waybill_dir = cli.get_waybill_dir() waybill_filename = os.path.join(waybill_dir, command + '.waybill') with open(waybill_filename, 'wb') as filehandle: filehandle.write(content) cli.log.info('Created waybill {0}'.format(waybill_filename)) @command def load(cli, yaml_filename): """Creates waybill shims from a given yaml file definiations""" """Expected Definition: - name: NAME docker_id: IMAGE - name: NAME docker_id: IMAGE """ with open(yaml_filename, 'rb') as filehandle: for waybill in yaml.load(filehandle.read()): cli.create(waybill.name, waybill.docker_id) @command def list(cli): """Prints out the list of known waybills""" for waybill in cli.get_waybills(): cli.log.info(waybill) @command def clear(cli): for waybill in cli.get_waybills(): cli.log.info('Removing waybill {0}'.format(waybill)) os.remove(waybill) @command def shellinit(cli): """Implements the waybill shims in the active shell""" output = 'eval echo "Initializing Waybills"' if which('docker') is None: raise ValueError("Unable to find program 'docker'. Please make sure it is installed and setup properly") for waybill in cli.get_waybills(): output += ' && source {0}'.format(waybill) return output def main(): waybill.main() if __name__ == "__main__": main()
0.335024
0.09118
import csv import sys extra_keys_in_app = set([ 'English Government of Canada signature', # constant 'French Government of Canada signature', # constant 'Empty', # template_list.py '1 template', # template_list.py 'Not a valid phone number', # a validation liberary 'bad invitation link', # api 'invitation expired', # api 'password', # api 'Your service already uses ', # api 'Code not found', # api 'Code already sent, wait 10 seconds', # api 'You cannot delete a default email reply to address', # api 'Code has expired', # api 'Code already sent', # api 'Code has already been used', # api 'as an email reply-to address.', # api 'You cannot remove the only user for a service', # api 'Cannot send to international mobile numbers', # api ]) def csv_to_dict(filename): d = dict() with open(filename, newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: d[row['source']] = row['target'] return d def printMissingKeys(name, keys): if keys: print('\n----- ' + name) # noqa: T001 for k in keys: print(k) # noqa: T001 app = csv_to_dict(sys.argv[1]) csv_en = csv_to_dict('app/translations/csv/en.csv') csv_fr = csv_to_dict('app/translations/csv/fr.csv') app_keys = set(app.keys()).union(extra_keys_in_app) csv_en_keys = set(csv_en.keys()) csv_fr_keys = set(csv_fr.keys()) in_one_csv_not_both = csv_en_keys.symmetric_difference(csv_fr_keys) in_app_not_in_en_csv = app_keys.difference(csv_en_keys) in_app_not_in_fr_csv = app_keys.difference(csv_fr_keys) missing_from_csvs = in_app_not_in_en_csv.union(in_app_not_in_fr_csv) in_en_csv_not_in_app = csv_en_keys.difference(app_keys) in_fr_csv_not_in_app = csv_fr_keys.difference(app_keys) unused_translations = in_en_csv_not_in_app.union(in_fr_csv_not_in_app) printMissingKeys('missing from csvs', missing_from_csvs) printMissingKeys('in one csv but not both', in_one_csv_not_both) printMissingKeys('unused translations (check api before deleting!)', unused_translations) print(' ') # noqa: T001
scripts/babel_test.py
import csv import sys extra_keys_in_app = set([ 'English Government of Canada signature', # constant 'French Government of Canada signature', # constant 'Empty', # template_list.py '1 template', # template_list.py 'Not a valid phone number', # a validation liberary 'bad invitation link', # api 'invitation expired', # api 'password', # api 'Your service already uses ', # api 'Code not found', # api 'Code already sent, wait 10 seconds', # api 'You cannot delete a default email reply to address', # api 'Code has expired', # api 'Code already sent', # api 'Code has already been used', # api 'as an email reply-to address.', # api 'You cannot remove the only user for a service', # api 'Cannot send to international mobile numbers', # api ]) def csv_to_dict(filename): d = dict() with open(filename, newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: d[row['source']] = row['target'] return d def printMissingKeys(name, keys): if keys: print('\n----- ' + name) # noqa: T001 for k in keys: print(k) # noqa: T001 app = csv_to_dict(sys.argv[1]) csv_en = csv_to_dict('app/translations/csv/en.csv') csv_fr = csv_to_dict('app/translations/csv/fr.csv') app_keys = set(app.keys()).union(extra_keys_in_app) csv_en_keys = set(csv_en.keys()) csv_fr_keys = set(csv_fr.keys()) in_one_csv_not_both = csv_en_keys.symmetric_difference(csv_fr_keys) in_app_not_in_en_csv = app_keys.difference(csv_en_keys) in_app_not_in_fr_csv = app_keys.difference(csv_fr_keys) missing_from_csvs = in_app_not_in_en_csv.union(in_app_not_in_fr_csv) in_en_csv_not_in_app = csv_en_keys.difference(app_keys) in_fr_csv_not_in_app = csv_fr_keys.difference(app_keys) unused_translations = in_en_csv_not_in_app.union(in_fr_csv_not_in_app) printMissingKeys('missing from csvs', missing_from_csvs) printMissingKeys('in one csv but not both', in_one_csv_not_both) printMissingKeys('unused translations (check api before deleting!)', unused_translations) print(' ') # noqa: T001
0.245175
0.089773
import os import sys from PIL import Image from PIL.ExifTags import GPSTAGS, TAGS from modules.colour import bright as br def data_extracter_from_images(choose_options_1_or_2, forder_or_filename) -> str: def maps(gps_co_ordinate): latitude = decimal_degree(float(gps_co_ordinate["lat"][0]), float( gps_co_ordinate["lat"][1]), float(gps_co_ordinate["lat"][2]), gps_co_ordinate["lat_ref"]) logitude = decimal_degree(float(gps_co_ordinate["lon"][0]), float( gps_co_ordinate["lon"][1]), float(gps_co_ordinate["lon"][2]), gps_co_ordinate["lon_ref"]) return f"https://maps.google.com/?q={latitude},{logitude}" def decimal_degree(degree, minutes, seconds, direction): decimal_degree = degree + minutes / 60 + seconds / 3600 if direction == "S" or direction == "W": decimal_degree *= -1 return decimal_degree while True: input_1 = str(choose_options_1_or_2) try: var1 = str(input_1) if var1 == "1" or var1 == "01": sys.stdout = open("data.txt", "w") break elif var1 == "2" or var1 == "02": break else: print("None") except: print("OK") cwd = os.getcwd() os.chdir(os.path.join(cwd, str(forder_or_filename))) files = os.listdir() if len(files) == 0: print(f"{br.red_1}You have not file") sys.exit(0) for file in files: try: image = Image.open(file) gps_co_ordinate = {} if image._getexif() == None: print(f"{br.green_1}{file} ---> {br.red_1} no data found") else: for tag, value in image._getexif().items(): tag_name = TAGS.get(tag) if tag_name == "GPSInfo": for key, val in value.items(): print(f"{GPSTAGS.get(key)} - {val}") if GPSTAGS.get(key) == "GPSLatitude": gps_co_ordinate["lat"] == val elif GPSTAGS.get(key) == "GPSLongitude": gps_co_ordinate["lon"] == val elif GPSTAGS.get(key) == "GPSLatitudeRef": gps_co_ordinate["lat_ref"] = val elif GPSTAGS.get(key) == "GPSLongitudeRef": gps_co_ordinate["lon_ref"] = val else: print(f"{tag_name} - {value}") if gps_co_ordinate: print(maps(gps_co_ordinate)) except IOError: print(f"{br.yellow_1}File not Supported") if input_1 == "1": sys.stdout.close() os.chdir(cwd)
exploits/information_Gathering/image_data_extracter.py
import os import sys from PIL import Image from PIL.ExifTags import GPSTAGS, TAGS from modules.colour import bright as br def data_extracter_from_images(choose_options_1_or_2, forder_or_filename) -> str: def maps(gps_co_ordinate): latitude = decimal_degree(float(gps_co_ordinate["lat"][0]), float( gps_co_ordinate["lat"][1]), float(gps_co_ordinate["lat"][2]), gps_co_ordinate["lat_ref"]) logitude = decimal_degree(float(gps_co_ordinate["lon"][0]), float( gps_co_ordinate["lon"][1]), float(gps_co_ordinate["lon"][2]), gps_co_ordinate["lon_ref"]) return f"https://maps.google.com/?q={latitude},{logitude}" def decimal_degree(degree, minutes, seconds, direction): decimal_degree = degree + minutes / 60 + seconds / 3600 if direction == "S" or direction == "W": decimal_degree *= -1 return decimal_degree while True: input_1 = str(choose_options_1_or_2) try: var1 = str(input_1) if var1 == "1" or var1 == "01": sys.stdout = open("data.txt", "w") break elif var1 == "2" or var1 == "02": break else: print("None") except: print("OK") cwd = os.getcwd() os.chdir(os.path.join(cwd, str(forder_or_filename))) files = os.listdir() if len(files) == 0: print(f"{br.red_1}You have not file") sys.exit(0) for file in files: try: image = Image.open(file) gps_co_ordinate = {} if image._getexif() == None: print(f"{br.green_1}{file} ---> {br.red_1} no data found") else: for tag, value in image._getexif().items(): tag_name = TAGS.get(tag) if tag_name == "GPSInfo": for key, val in value.items(): print(f"{GPSTAGS.get(key)} - {val}") if GPSTAGS.get(key) == "GPSLatitude": gps_co_ordinate["lat"] == val elif GPSTAGS.get(key) == "GPSLongitude": gps_co_ordinate["lon"] == val elif GPSTAGS.get(key) == "GPSLatitudeRef": gps_co_ordinate["lat_ref"] = val elif GPSTAGS.get(key) == "GPSLongitudeRef": gps_co_ordinate["lon_ref"] = val else: print(f"{tag_name} - {value}") if gps_co_ordinate: print(maps(gps_co_ordinate)) except IOError: print(f"{br.yellow_1}File not Supported") if input_1 == "1": sys.stdout.close() os.chdir(cwd)
0.19163
0.250982
from os_parameters_define import * from utility_function import * from config import * ## Function : MESDC CMD Converter 0x26h cmd to str format def mesdc_26h_raw_to_str_py( ): netfun = 0x30 cmd = '0x26' # Setup 26h_raw mesdc_26h_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x04', '0x00', '0xca', '0x01', '0x01'] return netfun, mesdc_26h_raw ## Function : MESDC CMD Converter 0x26h cmd to str format def mesdc_26h_mctp_statistic_raw_to_str_py( ): netfun = 0x30 cmd = '0x26' # Setup 26h_raw mesdc_26h_mctp_statistic_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x04', '0x40', '0xf8', '0x00', '0x00'] return netfun, mesdc_26h_mctp_statistic_raw ## Function : MESDC CMD Converter 0x26h cmd to str format def mesdc_26h_read_power_data_raw_to_str_py( addr ): netfun = 0x30 cmd = '0x26' addr_low_byte = addr[0] addr_high_byte = addr[1] # Setup 26h_raw mesdc_26h_read_power_data_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x04', '0x9F', '0xB0', addr_low_byte, addr_high_byte] return netfun, mesdc_26h_read_power_data_raw ## Function : MESDC CMD Converter 0x26h cmd to str format : SUSRAM file for NONCE status is ASCII CODE : fwnoncest => 66 77 6e 6f 6e 63 65 73 74 00 def mesdc_26h_susram_hmrfpo_nonce_raw_to_str_py( ): netfun = 0x30 cmd = '0x26' # Setup 26h_raw mesdc_26h_susram_hmrfpo_nonce_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x0F', '0xD3', '0x76', '0x00', '0x00', '0x20', '0x66', '0x77', '0x6E', '0x6F', '0x6E', '0x63', '0x65', '0x73', '0x74', '0x00',] return netfun, mesdc_26h_susram_hmrfpo_nonce_raw ## Function : MESDC CMD Converter 0x26h cmd to str format def mesdc_26h_nm_ptu_launch_state_raw_to_str_py( ): netfun = 0x30 cmd = '0x26' # Setup 26h_raw mesdc_26h_nm_ptu_launch_state_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x02', '0xf5', '0x1a'] return netfun, mesdc_26h_nm_ptu_launch_state_raw ## Function : Converter DFh cmd to str format ## command: 01h = Recovery but no default. 02h= Restore facture default. 03h = PTT initial state restore. def dfh_raw_to_str( command ): netfun = OEM cmd = ' 0xdf ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 :Command byte4 = int_to_hex_string(command) # Setup dfh_raw dfh_raw = netfun + cmd + manufacture_id + byte4 return dfh_raw ## Function : Converter 60h cmd to str format ## request: 00h = No Launch. 01h= Launch at next boot def ptu_launch_60h_raw_to_str_py( request ): netfun = 0x2e cmd = '0x60' # Setup byte4 :Command byte4 = int_to_hex_string(request) # Setup dfh_raw ptu_60h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,ptu_60h_raw ## Function : Converter 61h cmd to str format ## domain: 00h = platform . 01h= CPU , 02h = Memory def ptu_result_61h_raw_to_str_py( domain ): netfun = 0x2e cmd = '0x61' # Setup byte4 :Domain byte4 = int_to_hex_string(domain) # Setup dfh_raw ptu_61h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,ptu_61h_raw ## Function : Converter 65h cmd to str format ## domain: 01h = CUPS Index . 02h= Dynamic load factors def get_cups_data_65h_raw_to_str_py( parameter_selector ): netfun = 0x2e cmd = '0x65' # Setup byte4 :parameter_selector byte4 = int_to_hex_string(parameter_selector) # Setup 65h_raw cups_65h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,cups_65h_raw ## Function : Converter 81h cmd to str format def btg_80h_raw_to_str_py( ): netfun = 0x2e cmd = '0x80' # Setup 80h_raw btg_81h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,btg_80h_raw ## Function : Converter 81h cmd to str format def btg_81h_raw_to_str_py( ): netfun = 0x2e cmd = '0x81' # Setup 81h_raw btg_81h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,btg_81h_raw ## Function : Converter 82h cmd to str format def btg_82h_raw_to_str_py( ): netfun = 0x2e cmd = '0x82' # Setup 82h_raw btg_82h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,btg_82h_raw ## Function : Converter 70h cmd to str format def ptt_70h_raw_to_str_py( ): netfun = 0x2e cmd = '0x70' # Setup 70h_raw ptt_70h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,ptt_70h_raw ## Function : Converter 71h cmd to str format def ptt_71h_raw_to_str_py( ): netfun = 0x2e cmd = '0x71' # Setup 71h_raw ptt_71h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,ptt_71h_raw ## Function : Converter DFh cmd to str format ## command: 01h = Recovery but no default. 02h= Restore facture default. 03h = PTT initial state restore. def dfh_raw_to_str_py( command ): netfun = 0x2e cmd = '0xdf' # Setup byte4 :Command byte4 = int_to_hex_string(command) # Setup dfh_raw dfh_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,dfh_raw ## Function : Converter D4h cmd to str format ## control_knob = 0 , get p/t states number. = 1 get logical processors number def d4h_raw_to_str_py( control_knob ): netfun = 0x2e cmd = '0xd4' # Setup byte4 [5:4] :control knob control = bit_shift_left(control_knob , 4) byte4 = int_to_hex_string(control) # Setup d4h_raw d4h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,d4h_raw ## Function : Converter D3h cmd to str format ## control_knob = 0 , get maximum allowed p/t states number. = 1 get get maximum allowed logical processors number def d3h_raw_to_str_py( control_knob ): netfun = 0x2e cmd = '0xd3' # Setup byte4 [5:4] :control knob control = bit_shift_left(control_knob , 4) byte4 = int_to_hex_string(control) # Setup d4h_raw d3h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,d3h_raw ## Function : Converter D2h cmd to str format ## control_knob = 0 , set p/t states number. = 1 set logical processors number def d2h_raw_to_str_py( control_knob , max_p_states, max_t_states): netfun = 0x2e cmd = '0xd2' # Setup byte4 [5:4] :control knob control = bit_shift_left(control_knob , 4) byte4 = int_to_hex_string(control) # Setup byte5 : maximum allowed p states or logical cores # note when control_knob = 0, byte5 = max_p_states, when control_knob=1, bytes5 = #of logical cores low byte byte5 = int_to_hex_string(max_p_states) # Setup byte6 : maximum allowed t states or logical cores # note when control_knob = 0, byte6 = max_t_states, when control_knob=1, bytes6 = #of logical cores high byte = 0 byte6 = int_to_hex_string(max_t_states) # Setup d2h_raw d2h_raw = [cmd,'0x57','0x01','0x00',byte4 , byte5 , byte6 ] return netfun ,d2h_raw ## Function : Converter D0h cmd to str format def d0h_raw_to_str_py( domain , control , power_budget , component_id ): netfun = 0x2e cmd = '0xd0' # Setup byte4 # byte4 bit[7]: Per component control component_control = bit_shift_left(control , 7) byte4 = int_to_hex_string(domain | component_control) # Setup bytes[6:5] = Power Budget byte5 = power_budget[0] byte6 = power_budget[1] # Setup bytes7 = component_identifier byte7 = int_to_hex_string(component_id) # Setup d0h_raw d0h_raw = [cmd,'0x57','0x01','0x00',byte4 ,byte5, byte6, byte7 ] return netfun, d0h_raw ## Function : Converter C0h cmd to str format def c0h_raw_to_str_py( flags, domain, policy_id): netfun = 0x2e cmd = '0xc0' # Setup byte4 : Flags, bits[2:0] byte4 = int_to_hex_string(flags) # Conbine for byte5 byte5 = int_to_hex_string(domain) # Setup byte6 : Policy ID byte6 = int_to_hex_string(policy_id) # Setup c0h_raw c0h_raw = [cmd,'0x57','0x01','0x00',byte4,byte5,byte6] return netfun, c0h_raw ## Function : Converter C8h cmd to str format ## mode = 1 = power mode /2 Inlet temp /3 Global throttle status / ..... ## domain = 0 platform domain / 1 CPU domain / 2 Memory domain / 3 HW Protection/ 4 HPIO def c8h_raw_to_str( mode, domain, power_domain, policy_id): netfun = OEM cmd = ' 0xc8 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Mode, bits[4:0] byte4 = int_to_hex_string(mode) # Setup byte5 : Domain ID, bits[3:0] domain = domain # byte5 bit[4]: power domain power_domain = bit_shift_left(power_domain , 4) # Conbine for byte5 byte5 = int_to_hex_string(domain | power_domain) # Setup byte6 : Policy ID byte6 = int_to_hex_string(policy_id) # Setup c8h_raw c8h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 + ' '+ byte6 return c8h_raw ## Function : Converter C8h cmd to str format ## mode = 1 = power mode /2 Inlet temp /3 Global throttle status / ..... ## domain = 0 platform domain / 1 CPU domain / 2 Memory domain / 3 HW Protection/ 4 HPIO def c8h_raw_to_str_py( mode, domain, power_domain, policy_id): netfun = 0x2e cmd = '0xc8' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Mode, bits[4:0] byte4 = int_to_hex_string(mode) # Setup byte5 : Domain ID, bits[3:0] domain = domain # byte5 bit[4]: power domain power_domain = bit_shift_left(power_domain , 4) # Conbine for byte5 byte5 = int_to_hex_string(domain | power_domain) # Setup byte6 : Policy ID byte6 = int_to_hex_string(policy_id) # Setup c8h_raw c8h_raw = [cmd,'0x57','0x01','0x00',byte4,byte5,byte6] return netfun, c8h_raw ## Function : Converter C9h cmd to str format ## domain = 0 platform domain / 1 CPU domain / 2 Memory domain / 3 HW Protection/ 4 HPIO ## Total request 5bytes def c9h_raw_to_str( domain, policy_trigger_type, policy_type, power_domain): DEBUG('c9h_raw_str:') netfun = OEM cmd = ' 0xc9 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 byte4 = int_to_hex_string(domain) # Setup byte5 , bits[3:0] policy_trigger_type = policy_trigger_type # bits[6:4] policy_type = bit_shift_left(policy_type , 4) # bit[7] power_domain = bit_shift_left(power_domain , 7) # Conbine for byte5 byte5 = int_to_hex_string(policy_trigger_type | policy_type | power_domain) # Setup c9h_raw c9h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 DEBUG(c9h_raw) return c9h_raw ## Function : Converter C9h cmd to str format ## domain = 0 platform domain / 1 CPU domain / 2 Memory domain / 3 HW Protection/ 4 HPIO ## Total request 5bytes def c9h_raw_to_str_py( domain, policy_trigger_type, policy_type, power_domain): DEBUG('c9h_raw_str:') netfun = 0x2e cmd = ' 0xc9 ' # Setup byte4 byte4 = int_to_hex_string(domain) # Setup byte5 , bits[3:0] policy_trigger_type = policy_trigger_type # bits[6:4] policy_type = bit_shift_left(policy_type , 4) # bit[7] power_domain = bit_shift_left(power_domain , 7) # Conbine for byte5 byte5 = int_to_hex_string(policy_trigger_type | policy_type | power_domain) # Setup c9h_raw c9h_raw = [cmd,'0x57','0x01','0x00',byte4,byte5] return netfun, c9h_raw ## Function : Converter EAh cmd to str format ## domain = 0 platform domain def eah_raw_to_str_py( domain ): netfun = 0x2e cmd = ' 0xea ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Domain ID, bits[3:0] domain = domain # Conbine for byte4 byte4 = int_to_hex_string(domain) # Setup eah_raw eah_raw = [ cmd, '0x57','0x01','0x00', byte4 ] return netfun, eah_raw ## Function : Converter F4h cmd to str format ## index = device index (XML file => SDR => User Device ID in each sensors) def f4h_raw_to_str_py( index ): netfun = 0x2e cmd = ' 0xf4 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Device Index, bits[4:0] index = index # Conbine for byte4 byte4 = int_to_hex_string(index) # Setup f4h_raw f4h_raw = [ cmd, '0x57','0x01','0x00', byte4 ] return netfun, f4h_raw ## Function : Converter F4h cmd to str format ## index = device index (XML file => SDR => User Device ID in each sensors) def f5h_raw_to_str_py( index , page, offset ): netfun = 0x2e cmd = ' 0xf5 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Device Index, bits[4:0] index = index # Conbine for byte4 byte4 = int_to_hex_string(index) # Setup byte5 : page =bit[7:4], History=bit[3:0] = 0xF page = bit_shift_left(page , 4) history = bit_shift_left( 0x0f, 0) #conbine for byte5 byte5 = int_to_hex_string( page | history ) # Setup byte6 : offset byte6 = int_to_hex_string(offset) # Setup f4h_raw f5h_raw = [ cmd, '0x57','0x01','0x00', byte4 ,byte5, byte6 ] return netfun, f5h_raw ## Function : Converter 6Ah cmd to str format ## ## Total request 10bytes def hwpm_6ah_raw_to_str_py(control_scope, scope, min_ratio_bias, max_ratio, performance_preference): DEBUG('6ah_raw_str:') netfun = 0x2e cmd = ' 0x6a ' # Setup byte4 , bits[3:0] control_scope = control_scope # bits[7:6] scope = bit_shift_left(scope , 6) # Conbine for byte4 byte4 = int_to_hex_string(control_scope | scope ) #Setup byte5 = 0 byte5 = str(0) #Setup byte6 = 0 byte6 = str(0) #Setup byte7 byte7 = ' '.join(min_ratio_bias) #Setup byte8 byte8 = ' '.join(max_ratio) #Setup byte9 byte9 = ' '.join(performance_preference) #Setup byte10 byte10 = str(0) # Setup 6ah_raw hwpm_6ah_raw = [cmd,'0x57','0x01','0x00',byte4,byte5, byte6, byte7, byte8, byte9, byte10] return netfun, hwpm_6ah_raw ## Function : Convert 0xC1 cmd to str format def c1h_raw_to_str(domain, policy_enable, policy_id, policy_trigger_type, policy_add, aggressive, storage_mode, alert, shutdown, power_domain, limit, correction, trigger_limit, report_period ): netfun = OEM cmd = ' 0xc1 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Domain ID, bits[3:0] domain = domain # byte4 bit[4]: Policy Enable policy_enable = bit_shift_left(policy_enable , 4) # Conbine for byte4 byte4 = int_to_hex_string(domain | policy_enable) # Setup byte5 : Policy ID byte5 = int_to_hex_string(policy_id) # Setup byte6 : Policy Type # byte6 bit[3:0] :Policy trigger type policy_trigger_type = policy_trigger_type # byte6 bit[4] :Policy config action policy_add = bit_shift_left(policy_add , 4) # byte6 bit[6:5] :aggressive mode aggressive = bit_shift_left(aggressive , 5) # byte6 bit[7] : storage_mode storage_mode = bit_shift_left(storage_mode , 7) # Conbine for byte6 byte6 = int_to_hex_string(policy_trigger_type | policy_add | aggressive | storage_mode | storage_mode) # Setup byte7 : Exception action # byte7 bit[0]: Send alert alert = alert # byte7 bit[1]: Shutdown shutdown = bit_shift_left(shutdown , 1) # byte7 bit[7]: power domain power_domain = bit_shift_left(power_domain , 7) # Conbine for byte6 byte7 = int_to_hex_string(power_domain | shutdown | alert) # Setup bytes[9:8] = Target Limit value byte8_to_byte9 = ' '.join(limit) # Setup bytes[13:10] = Correction time byte10_to_byte13 = ' '.join(correction) # Setup bytes[15:14] = Trigger Limit Point value byte14_to_byte15 = ' '.join(trigger_limit) # Setup bytes[17:16] = Statistic Report Period in second byte16_to_byte17 = ' '.join(report_period) c1h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 + ' ' + byte6 + ' ' + byte7 + ' ' + byte8_to_byte9 + ' ' + byte10_to_byte13 + ' ' + byte14_to_byte15 + ' ' + byte16_to_byte17 return c1h_raw ## Function : Convert 0xCB cmd to str format def cbh_raw_to_str_py(domain, min_power_draw_range, max_power_draw_range ): netfun = 0x2e cmd = '0xcb' # Setup byte4 : Domain ID, bits[3:0] byte4 = int_to_hex_string(domain) # Setup bytes[6:5] = Minimum Power Draw Range byte5 = min_power_draw_range[0] byte6 = min_power_draw_range[1] # Setup bytes[8:7] = Minimum Power Draw Range byte7 = max_power_draw_range[0] byte8 = max_power_draw_range[1] cbh_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6, byte7 , byte8 ] return netfun, cbh_raw ## Function : Convert 0xC1 cmd to str format def c1h_raw_to_str_py(domain, policy_enable, policy_id, policy_trigger_type, policy_add, aggressive, storage_mode, alert, shutdown, power_domain, limit, correction, trigger_limit, report_period ): netfun = 0x2e cmd = ' 0xc1 ' # Setup byte4 : Domain ID, bits[3:0] domain = domain # byte4 bit[4]: Policy Enable policy_enable = bit_shift_left(policy_enable , 4) # Conbine for byte4 byte4 = int_to_hex_string(domain | policy_enable) # Setup byte5 : Policy ID byte5 = int_to_hex_string(policy_id) # Setup byte6 : Policy Type # byte6 bit[3:0] :Policy trigger type policy_trigger_type = policy_trigger_type # byte6 bit[4] :Policy config action policy_add = bit_shift_left(policy_add , 4) # byte6 bit[6:5] :aggressive mode aggressive = bit_shift_left(aggressive , 5) # byte6 bit[7] : storage_mode storage_mode = bit_shift_left(storage_mode , 7) # Conbine for byte6 byte6 = int_to_hex_string(policy_trigger_type | policy_add | aggressive | storage_mode | storage_mode) # Setup byte7 : Exception action # byte7 bit[0]: Send alert alert = alert # byte7 bit[1]: Shutdown shutdown = bit_shift_left(shutdown , 1) # byte7 bit[7]: power domain power_domain = bit_shift_left(power_domain , 7) # Conbine for byte6 byte7 = int_to_hex_string(power_domain | shutdown | alert) # Setup bytes[9:8] = Target Limit value byte8 = limit[0] byte9 = limit[1] # Setup bytes[13:10] = Correction time byte10 = correction[0] byte11 = correction[1] byte12 = correction[2] byte13 = correction[3] # Setup bytes[15:14] = Trigger Limit Point value byte14 = trigger_limit[0] byte15 = trigger_limit[1] # Setup bytes[17:16] = Statistic Report Period in second byte16 = report_period[0] byte17 = report_period[1] #c1h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 + ' ' + byte6 + ' ' + byte7 + ' ' + byte8_to_byte9 + ' ' + byte10_to_byte13 + ' ' + byte14_to_byte15 + ' ' + byte16_to_byte17 c1h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6, byte7 , byte8, byte9 , byte10 , byte11, byte12, byte13, byte14, byte15, byte16, byte17 ] return netfun, c1h_raw ## Function : Convert 0xF0 cmd to str format def f0h_set_k_coefficiency_raw_to_str_py( k_coefficiency ): netfun = 0x2e cmd = '0xf0' # Setup byte4 : Domain ID, bits[3:0] byte4 = k_coefficiency[0] f0h_raw = [cmd,'0x57','0x01','0x00', byte4 ] return netfun, f0h_raw ## Function : Convert 0xF2 cmd to str format def f2h_raw_to_str_py(domain): netfun = 0x2e cmd = '0xf2' # Setup byte4 : Domain ID, bits[3:0] byte4 = int_to_hex_string(domain) f2h_raw = [cmd,'0x57','0x01','0x00', byte4 ] return netfun, f2h_raw ## Function : Converter 40h cmd : Send RAW PECI cmd to str format ## PECI client address = 30h CPU0 / 31h CPU1 domain / 32h CPU2 / 33h CPU3 ## 70h~73h cmd via PECI over DMI ## B0h~B3h cmd via PECI wire def peci_40h_raw_to_str( client_addr, interface , write_length, read_length, raw ): DEBUG('40h_raw_str:') netfun = OEM cmd = ' 0x40 ' manufacture_id = INTEL_MANUFACTURE_ID # byte4 bit[7:6]: Interface select interface = bit_shift_left(interface , 6) # byte4 bit[5:0]: Client address client_addr = client_addr # Setup byte4 byte4 = int_to_hex_string(client_addr | interface) # Setup byte5 : Write length byte5 = int_to_hex_string(write_length) # Setup byte6 : Read length byte6 = int_to_hex_string(read_length) # Setup byte7-byteN : raw PECI data byte7_byteN = ' '.join(raw) # Setup 40h_raw peci_40h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 + ' ' + byte6 + ' ' + byte7_byteN DEBUG(peci_40h_raw) return peci_40h_raw ## Function : Converter 40h cmd : Send RAW PECI cmd to str format ## PECI client address = 30h CPU0 / 31h CPU1 domain / 32h CPU2 / 33h CPU3 ## 70h~73h cmd via PECI over DMI ## B0h~B3h cmd via PECI wire def peci_40h_raw_to_str_py( client_addr, interface , write_length, read_length, raw ): DEBUG('40h_raw_str:') netfun = 0x2e cmd = '0x40' # byte4 bit[7:6]: Interface select interface = bit_shift_left(interface , 6) # byte4 bit[5:0]: Client address client_addr = client_addr # Setup byte4 byte4 = int_to_hex_string(client_addr | interface) # Setup byte5 : Write length byte5 = int_to_hex_string(write_length) # Setup byte6 : Read length byte6 = int_to_hex_string(read_length) # Setup 40h_raw peci_40h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 ] # Setup byte7-byteN : raw PECI data for loop in range(0 , len(raw)): peci_40h_raw.append(format(raw[loop])) DEBUG(peci_40h_raw) return netfun, peci_40h_raw ## Function : Converter d9h cmd : Send RAW PMbus extended cmd to str format def d9h_raw_to_str_py( msg_type , pec_report, pec_en, sensor_bus, target_addr, mux_addr, mux_ch, mux_config, trans_protocol, write_len, read_len, command ): DEBUG('d9h_raw_str:') netfun = 0x2e cmd = '0xd9' # byte4 bit[3:1]: msg_trans_type = bit_shift_left(msg_type , 1) # byte4 bit[5:4]: extended address formet = 1 extended_addr_format = bit_shift_left(d9h_extended_device_addr , 4) pec_rep = bit_shift_left(pec_report , 6) pec_enable = bit_shift_left(pec_en , 7) # Setup byte4 byte4 = int_to_hex_string(msg_trans_type | extended_addr_format | pec_rep | pec_enable ) # Setup byte5 : Sensor Bus byte5 = int_to_hex_string(sensor_bus) # Setup byte6 : Target Addr byte6 = int_to_hex_string(target_addr) # Setup byte7 : Mux Addr byte7 = int_to_hex_string(mux_addr) # Setup byte8 : Mux Channel byte8 = int_to_hex_string(mux_ch) # Setup byte9 : Mux Config byte9 = int_to_hex_string(mux_config) # Setup byte10 : Transmission Protocol parameter trans_ptol = bit_shift_left(trans_protocol , 5) byte10 = int_to_hex_string(trans_ptol) # Setup byte11 : Write Length byte11 = int_to_hex_string(write_len) # Setup byte12 : Read length byte12 = int_to_hex_string(read_len) # Setup byte13 : Command byte13 = int_to_hex_string(command) # Setup d9h_raw pmbus_d9h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12, byte13 ] return netfun, pmbus_d9h_raw ## Function : Converter d9h cmd : Send RAW PMbus extended cmd to str format def d9h_set_raw_to_str_py( msg_type , pec_report, pec_en, sensor_bus, target_addr, mux_addr, mux_ch, mux_config, trans_protocol, write_len, read_len, command ): DEBUG('d9h_raw_str:') netfun = 0x2e cmd = '0xd9' # byte4 bit[3:1]: msg_trans_type = bit_shift_left(msg_type , 1) # byte4 bit[5:4]: extended address formet = 1 extended_addr_format = bit_shift_left(d9h_extended_device_addr , 4) pec_rep = bit_shift_left(pec_report , 6) pec_enable = bit_shift_left(pec_en , 7) # Setup byte4 byte4 = int_to_hex_string(msg_trans_type | extended_addr_format | pec_rep | pec_enable ) # Setup byte5 : Sensor Bus byte5 = int_to_hex_string(sensor_bus) # Setup byte6 : Target Addr byte6 = int_to_hex_string(target_addr) # Setup byte7 : Mux Addr byte7 = int_to_hex_string(mux_addr) # Setup byte8 : Mux Channel byte8 = int_to_hex_string(mux_ch) # Setup byte9 : Mux Config byte9 = int_to_hex_string(mux_config) # Setup byte10 : Transmission Protocol parameter trans_ptol = bit_shift_left(trans_protocol , 5) byte10 = int_to_hex_string(trans_ptol) # Setup byte11 : Write Length byte11 = int_to_hex_string(write_len) # Setup byte12 : Read length byte12 = int_to_hex_string(read_len) # Setup d9h_raw pmbus_d9h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12 ] for loop in range(0 , len(command)): pmbus_d9h_raw.append(format(command[loop])) DEBUG(pmbus_d9h_raw) return netfun, pmbus_d9h_raw ## Function : Converter d7h cmd : Send RAW PMbus extended cmd to str format def d7h_set_raw_to_str_py( domain, psu1_addr, psu2_addr, psu3_addr, psu4_addr, psu5_addr, psu6_addr, psu7_addr, psu8_addr ): netfun = 0x2e cmd = '0xd7' # Setup byte4 byte4 = int_to_hex_string(domain) # Setup byte5 : PSU1 Slave addr byte5 = int_to_hex_string(psu1_addr) # Setup byte6 : PSU2 Slave addr byte6 = int_to_hex_string(psu2_addr) # Setup byte7 : PSU3 Slave addr byte7 = int_to_hex_string(psu3_addr) # Setup byte8 : PSU4 Slave addr byte8 = int_to_hex_string(psu4_addr) # Setup byte9 : PSU5 Slave addr byte9 = int_to_hex_string(psu5_addr) # Setup byte10 : PSU6 Slave addr byte10 = int_to_hex_string(psu6_addr) # Setup byte11 : PSU7 Slave addr byte11 = int_to_hex_string(psu7_addr) # Setup byte12 :PSU8 Slave addr byte12 = int_to_hex_string(psu8_addr) # Setup d7h_raw d7h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12 ] return netfun, d7h_raw ## Function : Converter Get DID : (NetFun = App(0x06), CMD = 01h) to str format def get_did_raw_to_str( ): netfun = App cmd = ' 0x01 ' # Setup get_did_raw get_did_raw = netfun + cmd return get_did_raw ## Function : Converter Get DID : (NetFun = App(0x06), CMD = 01h) to str format def get_did_raw_to_str_py(): netfun = 0x06 cmd = '0x01' # Setup get_did_raw get_did_raw = [cmd] return netfun, get_did_raw ## Function : Converter Cold Reset : (NetFun = App(0x06), CMD = 02h) to str format def cold_reset_raw_to_str_py(): netfun = 0x06 cmd = '0x02' # Setup get_did_raw cold_reset_raw = [cmd] return netfun, cold_reset_raw ## Function : Converter Get SEL TIME : (NetFun = Storage (0x0A), CMD = 48h) to str format def get_sel_time_raw_to_str( ): netfun = Storage cmd = ' 0x48 ' # Setup get_sel_time_raw get_sel_time_raw = netfun + cmd return get_sel_time_raw ## Function : Converter Get SEL TIME : (NetFun = Storage (0x0A), CMD = 48h) to str format def get_sel_time_raw_to_str_py( ): netfun = 0x0a cmd = '0x48' # Setup get_sel_time_raw get_sel_time_raw = [cmd] return netfun, get_sel_time_raw ## Function : Convert Get Sensor Reading : (NetFun = Sensor (0x04), CMD = 2Dh) to str format def get_sensor_reading_raw_to_str( sensor_number ): netfun = Sensor cmd = ' 0x2d ' # Setup byte1 : sensor_number byte1 = ' '.join(sensor_number) # Setup get_sensor_reading_raw get_sensor_reading_raw = netfun + cmd + ' ' + byte1 return get_sensor_reading_raw ## Function : Convert Get Sensor Reading : (NetFun = Sensor (0x04), CMD = 2Dh) to str format def get_sensor_reading_raw_to_str_py( sensor_number ): netfun = 0x04 cmd = '0x2d' # Setup byte1 : sensor_number byte1 = ' '.join(sensor_number) # Setup get_sensor_reading_raw get_sensor_reading_raw = [cmd, byte1] return netfun, get_sensor_reading_raw ## Function : Converter 01h cmd to str format: Get chassis status def get_chassis_power_status_raw_to_str_py( ): netfun = 0x00 cmd = '0x01' # Setup get_chassis_status_raw get_chassis_status_raw = [cmd] return netfun ,get_chassis_status_raw ## Function : Converter 02h cmd to str format: Get chassis status def chassis_control_raw_to_str_py( control ): netfun = 0x00 cmd = '0x02' # Setup byte1 : sensor_number byte1 = int_to_hex_string(control) # Setup chassis_control_raw chassis_control_raw = [cmd, byte1] return netfun ,chassis_control_raw ## Function : Converter 4Bh cmd : Get CPU and Memory Temperature cmd to str format def get_cpu_mem_temp_4bh_raw_to_str_py( cup_select, cpu_set, mem_channel_set, request_format ): DEBUG('4bh_raw_str:') netfun = 0x2e cmd = '0x4B' # byte4 bit[3:0]: cpu_sel = cup_select # byte4 bit[5:4]: cup_set cup_domain = bit_shift_left(cpu_set , 4) # byte4 bit[6]: mem_set mem_domain = bit_shift_left(mem_channel_set , 6) # byte4 bit[7]: request format format = bit_shift_left(request_format , 7) # Setup byte4 byte4 = int_to_hex_string(cpu_sel | cup_domain | mem_domain | format ) # Byte5~ Byte12 for standard mode if(request_format == get_temp_4bh_standard): # CPU0 Channels byte5 = get_temp_4bh_channel_all byte6 = get_temp_4bh_channel_all # CPU1 Channels byte7 = get_temp_4bh_channel_all byte8 = get_temp_4bh_channel_all # CPU2 Channels byte9 = get_temp_4bh_channel_all byte10 = get_temp_4bh_channel_all # CPU3 Channels byte11 = get_temp_4bh_channel_all byte12 = get_temp_4bh_channel_all # Setup 4bh_raw get_cpu_mem_temp_4bh_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12 ] DEBUG(get_cpu_mem_temp_4bh_raw) return netfun, get_cpu_mem_temp_4bh_raw # Byte5 ~ Byte20 for extended mode elif(request_format == get_temp_4bh_extended): # CPU0 Channels byte5 = get_temp_4bh_channel_all byte6 = get_temp_4bh_channel_all byte7 = get_temp_4bh_channel_all byte8 = get_temp_4bh_channel_all # CPU2 Channels byte9 = get_temp_4bh_channel_all byte10 = get_temp_4bh_channel_all byte11 = get_temp_4bh_channel_all byte12 = get_temp_4bh_channel_all # CPU3 Channels #byte13 = get_temp_4bh_channel_all #byte14 = get_temp_4bh_channel_all #byte15 = get_temp_4bh_channel_all #byte16 = get_temp_4bh_channel_all # CPU4 Channels #byte17 = get_temp_4bh_channel_all #byte18 = get_temp_4bh_channel_all #byte19 = get_temp_4bh_channel_all #byte20 = get_temp_4bh_channel_all # CPU3 Channels byte13 = get_temp_4bh_channel_none byte14 = get_temp_4bh_channel_none byte15 = get_temp_4bh_channel_none byte16 = get_temp_4bh_channel_none # CPU4 Channels byte17 = get_temp_4bh_channel_none byte18 = get_temp_4bh_channel_none byte19 = get_temp_4bh_channel_none byte20 = get_temp_4bh_channel_none # Setup 4bh_raw get_cpu_mem_temp_4bh_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12, byte13, byte14, byte15, byte16, byte17, byte18, byte19, byte20] DEBUG(get_cpu_mem_temp_4bh_raw) return netfun, get_cpu_mem_temp_4bh_raw
nm_ipmi_raw_to_str.py
from os_parameters_define import * from utility_function import * from config import * ## Function : MESDC CMD Converter 0x26h cmd to str format def mesdc_26h_raw_to_str_py( ): netfun = 0x30 cmd = '0x26' # Setup 26h_raw mesdc_26h_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x04', '0x00', '0xca', '0x01', '0x01'] return netfun, mesdc_26h_raw ## Function : MESDC CMD Converter 0x26h cmd to str format def mesdc_26h_mctp_statistic_raw_to_str_py( ): netfun = 0x30 cmd = '0x26' # Setup 26h_raw mesdc_26h_mctp_statistic_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x04', '0x40', '0xf8', '0x00', '0x00'] return netfun, mesdc_26h_mctp_statistic_raw ## Function : MESDC CMD Converter 0x26h cmd to str format def mesdc_26h_read_power_data_raw_to_str_py( addr ): netfun = 0x30 cmd = '0x26' addr_low_byte = addr[0] addr_high_byte = addr[1] # Setup 26h_raw mesdc_26h_read_power_data_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x04', '0x9F', '0xB0', addr_low_byte, addr_high_byte] return netfun, mesdc_26h_read_power_data_raw ## Function : MESDC CMD Converter 0x26h cmd to str format : SUSRAM file for NONCE status is ASCII CODE : fwnoncest => 66 77 6e 6f 6e 63 65 73 74 00 def mesdc_26h_susram_hmrfpo_nonce_raw_to_str_py( ): netfun = 0x30 cmd = '0x26' # Setup 26h_raw mesdc_26h_susram_hmrfpo_nonce_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x0F', '0xD3', '0x76', '0x00', '0x00', '0x20', '0x66', '0x77', '0x6E', '0x6F', '0x6E', '0x63', '0x65', '0x73', '0x74', '0x00',] return netfun, mesdc_26h_susram_hmrfpo_nonce_raw ## Function : MESDC CMD Converter 0x26h cmd to str format def mesdc_26h_nm_ptu_launch_state_raw_to_str_py( ): netfun = 0x30 cmd = '0x26' # Setup 26h_raw mesdc_26h_nm_ptu_launch_state_raw = [cmd,'0x57','0x01','0x00','0x04','0x06','0x02', '0xf5', '0x1a'] return netfun, mesdc_26h_nm_ptu_launch_state_raw ## Function : Converter DFh cmd to str format ## command: 01h = Recovery but no default. 02h= Restore facture default. 03h = PTT initial state restore. def dfh_raw_to_str( command ): netfun = OEM cmd = ' 0xdf ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 :Command byte4 = int_to_hex_string(command) # Setup dfh_raw dfh_raw = netfun + cmd + manufacture_id + byte4 return dfh_raw ## Function : Converter 60h cmd to str format ## request: 00h = No Launch. 01h= Launch at next boot def ptu_launch_60h_raw_to_str_py( request ): netfun = 0x2e cmd = '0x60' # Setup byte4 :Command byte4 = int_to_hex_string(request) # Setup dfh_raw ptu_60h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,ptu_60h_raw ## Function : Converter 61h cmd to str format ## domain: 00h = platform . 01h= CPU , 02h = Memory def ptu_result_61h_raw_to_str_py( domain ): netfun = 0x2e cmd = '0x61' # Setup byte4 :Domain byte4 = int_to_hex_string(domain) # Setup dfh_raw ptu_61h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,ptu_61h_raw ## Function : Converter 65h cmd to str format ## domain: 01h = CUPS Index . 02h= Dynamic load factors def get_cups_data_65h_raw_to_str_py( parameter_selector ): netfun = 0x2e cmd = '0x65' # Setup byte4 :parameter_selector byte4 = int_to_hex_string(parameter_selector) # Setup 65h_raw cups_65h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,cups_65h_raw ## Function : Converter 81h cmd to str format def btg_80h_raw_to_str_py( ): netfun = 0x2e cmd = '0x80' # Setup 80h_raw btg_81h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,btg_80h_raw ## Function : Converter 81h cmd to str format def btg_81h_raw_to_str_py( ): netfun = 0x2e cmd = '0x81' # Setup 81h_raw btg_81h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,btg_81h_raw ## Function : Converter 82h cmd to str format def btg_82h_raw_to_str_py( ): netfun = 0x2e cmd = '0x82' # Setup 82h_raw btg_82h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,btg_82h_raw ## Function : Converter 70h cmd to str format def ptt_70h_raw_to_str_py( ): netfun = 0x2e cmd = '0x70' # Setup 70h_raw ptt_70h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,ptt_70h_raw ## Function : Converter 71h cmd to str format def ptt_71h_raw_to_str_py( ): netfun = 0x2e cmd = '0x71' # Setup 71h_raw ptt_71h_raw = [cmd,'0x57','0x01','0x00'] return netfun ,ptt_71h_raw ## Function : Converter DFh cmd to str format ## command: 01h = Recovery but no default. 02h= Restore facture default. 03h = PTT initial state restore. def dfh_raw_to_str_py( command ): netfun = 0x2e cmd = '0xdf' # Setup byte4 :Command byte4 = int_to_hex_string(command) # Setup dfh_raw dfh_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,dfh_raw ## Function : Converter D4h cmd to str format ## control_knob = 0 , get p/t states number. = 1 get logical processors number def d4h_raw_to_str_py( control_knob ): netfun = 0x2e cmd = '0xd4' # Setup byte4 [5:4] :control knob control = bit_shift_left(control_knob , 4) byte4 = int_to_hex_string(control) # Setup d4h_raw d4h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,d4h_raw ## Function : Converter D3h cmd to str format ## control_knob = 0 , get maximum allowed p/t states number. = 1 get get maximum allowed logical processors number def d3h_raw_to_str_py( control_knob ): netfun = 0x2e cmd = '0xd3' # Setup byte4 [5:4] :control knob control = bit_shift_left(control_knob , 4) byte4 = int_to_hex_string(control) # Setup d4h_raw d3h_raw = [cmd,'0x57','0x01','0x00',byte4] return netfun ,d3h_raw ## Function : Converter D2h cmd to str format ## control_knob = 0 , set p/t states number. = 1 set logical processors number def d2h_raw_to_str_py( control_knob , max_p_states, max_t_states): netfun = 0x2e cmd = '0xd2' # Setup byte4 [5:4] :control knob control = bit_shift_left(control_knob , 4) byte4 = int_to_hex_string(control) # Setup byte5 : maximum allowed p states or logical cores # note when control_knob = 0, byte5 = max_p_states, when control_knob=1, bytes5 = #of logical cores low byte byte5 = int_to_hex_string(max_p_states) # Setup byte6 : maximum allowed t states or logical cores # note when control_knob = 0, byte6 = max_t_states, when control_knob=1, bytes6 = #of logical cores high byte = 0 byte6 = int_to_hex_string(max_t_states) # Setup d2h_raw d2h_raw = [cmd,'0x57','0x01','0x00',byte4 , byte5 , byte6 ] return netfun ,d2h_raw ## Function : Converter D0h cmd to str format def d0h_raw_to_str_py( domain , control , power_budget , component_id ): netfun = 0x2e cmd = '0xd0' # Setup byte4 # byte4 bit[7]: Per component control component_control = bit_shift_left(control , 7) byte4 = int_to_hex_string(domain | component_control) # Setup bytes[6:5] = Power Budget byte5 = power_budget[0] byte6 = power_budget[1] # Setup bytes7 = component_identifier byte7 = int_to_hex_string(component_id) # Setup d0h_raw d0h_raw = [cmd,'0x57','0x01','0x00',byte4 ,byte5, byte6, byte7 ] return netfun, d0h_raw ## Function : Converter C0h cmd to str format def c0h_raw_to_str_py( flags, domain, policy_id): netfun = 0x2e cmd = '0xc0' # Setup byte4 : Flags, bits[2:0] byte4 = int_to_hex_string(flags) # Conbine for byte5 byte5 = int_to_hex_string(domain) # Setup byte6 : Policy ID byte6 = int_to_hex_string(policy_id) # Setup c0h_raw c0h_raw = [cmd,'0x57','0x01','0x00',byte4,byte5,byte6] return netfun, c0h_raw ## Function : Converter C8h cmd to str format ## mode = 1 = power mode /2 Inlet temp /3 Global throttle status / ..... ## domain = 0 platform domain / 1 CPU domain / 2 Memory domain / 3 HW Protection/ 4 HPIO def c8h_raw_to_str( mode, domain, power_domain, policy_id): netfun = OEM cmd = ' 0xc8 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Mode, bits[4:0] byte4 = int_to_hex_string(mode) # Setup byte5 : Domain ID, bits[3:0] domain = domain # byte5 bit[4]: power domain power_domain = bit_shift_left(power_domain , 4) # Conbine for byte5 byte5 = int_to_hex_string(domain | power_domain) # Setup byte6 : Policy ID byte6 = int_to_hex_string(policy_id) # Setup c8h_raw c8h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 + ' '+ byte6 return c8h_raw ## Function : Converter C8h cmd to str format ## mode = 1 = power mode /2 Inlet temp /3 Global throttle status / ..... ## domain = 0 platform domain / 1 CPU domain / 2 Memory domain / 3 HW Protection/ 4 HPIO def c8h_raw_to_str_py( mode, domain, power_domain, policy_id): netfun = 0x2e cmd = '0xc8' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Mode, bits[4:0] byte4 = int_to_hex_string(mode) # Setup byte5 : Domain ID, bits[3:0] domain = domain # byte5 bit[4]: power domain power_domain = bit_shift_left(power_domain , 4) # Conbine for byte5 byte5 = int_to_hex_string(domain | power_domain) # Setup byte6 : Policy ID byte6 = int_to_hex_string(policy_id) # Setup c8h_raw c8h_raw = [cmd,'0x57','0x01','0x00',byte4,byte5,byte6] return netfun, c8h_raw ## Function : Converter C9h cmd to str format ## domain = 0 platform domain / 1 CPU domain / 2 Memory domain / 3 HW Protection/ 4 HPIO ## Total request 5bytes def c9h_raw_to_str( domain, policy_trigger_type, policy_type, power_domain): DEBUG('c9h_raw_str:') netfun = OEM cmd = ' 0xc9 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 byte4 = int_to_hex_string(domain) # Setup byte5 , bits[3:0] policy_trigger_type = policy_trigger_type # bits[6:4] policy_type = bit_shift_left(policy_type , 4) # bit[7] power_domain = bit_shift_left(power_domain , 7) # Conbine for byte5 byte5 = int_to_hex_string(policy_trigger_type | policy_type | power_domain) # Setup c9h_raw c9h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 DEBUG(c9h_raw) return c9h_raw ## Function : Converter C9h cmd to str format ## domain = 0 platform domain / 1 CPU domain / 2 Memory domain / 3 HW Protection/ 4 HPIO ## Total request 5bytes def c9h_raw_to_str_py( domain, policy_trigger_type, policy_type, power_domain): DEBUG('c9h_raw_str:') netfun = 0x2e cmd = ' 0xc9 ' # Setup byte4 byte4 = int_to_hex_string(domain) # Setup byte5 , bits[3:0] policy_trigger_type = policy_trigger_type # bits[6:4] policy_type = bit_shift_left(policy_type , 4) # bit[7] power_domain = bit_shift_left(power_domain , 7) # Conbine for byte5 byte5 = int_to_hex_string(policy_trigger_type | policy_type | power_domain) # Setup c9h_raw c9h_raw = [cmd,'0x57','0x01','0x00',byte4,byte5] return netfun, c9h_raw ## Function : Converter EAh cmd to str format ## domain = 0 platform domain def eah_raw_to_str_py( domain ): netfun = 0x2e cmd = ' 0xea ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Domain ID, bits[3:0] domain = domain # Conbine for byte4 byte4 = int_to_hex_string(domain) # Setup eah_raw eah_raw = [ cmd, '0x57','0x01','0x00', byte4 ] return netfun, eah_raw ## Function : Converter F4h cmd to str format ## index = device index (XML file => SDR => User Device ID in each sensors) def f4h_raw_to_str_py( index ): netfun = 0x2e cmd = ' 0xf4 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Device Index, bits[4:0] index = index # Conbine for byte4 byte4 = int_to_hex_string(index) # Setup f4h_raw f4h_raw = [ cmd, '0x57','0x01','0x00', byte4 ] return netfun, f4h_raw ## Function : Converter F4h cmd to str format ## index = device index (XML file => SDR => User Device ID in each sensors) def f5h_raw_to_str_py( index , page, offset ): netfun = 0x2e cmd = ' 0xf5 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Device Index, bits[4:0] index = index # Conbine for byte4 byte4 = int_to_hex_string(index) # Setup byte5 : page =bit[7:4], History=bit[3:0] = 0xF page = bit_shift_left(page , 4) history = bit_shift_left( 0x0f, 0) #conbine for byte5 byte5 = int_to_hex_string( page | history ) # Setup byte6 : offset byte6 = int_to_hex_string(offset) # Setup f4h_raw f5h_raw = [ cmd, '0x57','0x01','0x00', byte4 ,byte5, byte6 ] return netfun, f5h_raw ## Function : Converter 6Ah cmd to str format ## ## Total request 10bytes def hwpm_6ah_raw_to_str_py(control_scope, scope, min_ratio_bias, max_ratio, performance_preference): DEBUG('6ah_raw_str:') netfun = 0x2e cmd = ' 0x6a ' # Setup byte4 , bits[3:0] control_scope = control_scope # bits[7:6] scope = bit_shift_left(scope , 6) # Conbine for byte4 byte4 = int_to_hex_string(control_scope | scope ) #Setup byte5 = 0 byte5 = str(0) #Setup byte6 = 0 byte6 = str(0) #Setup byte7 byte7 = ' '.join(min_ratio_bias) #Setup byte8 byte8 = ' '.join(max_ratio) #Setup byte9 byte9 = ' '.join(performance_preference) #Setup byte10 byte10 = str(0) # Setup 6ah_raw hwpm_6ah_raw = [cmd,'0x57','0x01','0x00',byte4,byte5, byte6, byte7, byte8, byte9, byte10] return netfun, hwpm_6ah_raw ## Function : Convert 0xC1 cmd to str format def c1h_raw_to_str(domain, policy_enable, policy_id, policy_trigger_type, policy_add, aggressive, storage_mode, alert, shutdown, power_domain, limit, correction, trigger_limit, report_period ): netfun = OEM cmd = ' 0xc1 ' manufacture_id = INTEL_MANUFACTURE_ID # Setup byte4 : Domain ID, bits[3:0] domain = domain # byte4 bit[4]: Policy Enable policy_enable = bit_shift_left(policy_enable , 4) # Conbine for byte4 byte4 = int_to_hex_string(domain | policy_enable) # Setup byte5 : Policy ID byte5 = int_to_hex_string(policy_id) # Setup byte6 : Policy Type # byte6 bit[3:0] :Policy trigger type policy_trigger_type = policy_trigger_type # byte6 bit[4] :Policy config action policy_add = bit_shift_left(policy_add , 4) # byte6 bit[6:5] :aggressive mode aggressive = bit_shift_left(aggressive , 5) # byte6 bit[7] : storage_mode storage_mode = bit_shift_left(storage_mode , 7) # Conbine for byte6 byte6 = int_to_hex_string(policy_trigger_type | policy_add | aggressive | storage_mode | storage_mode) # Setup byte7 : Exception action # byte7 bit[0]: Send alert alert = alert # byte7 bit[1]: Shutdown shutdown = bit_shift_left(shutdown , 1) # byte7 bit[7]: power domain power_domain = bit_shift_left(power_domain , 7) # Conbine for byte6 byte7 = int_to_hex_string(power_domain | shutdown | alert) # Setup bytes[9:8] = Target Limit value byte8_to_byte9 = ' '.join(limit) # Setup bytes[13:10] = Correction time byte10_to_byte13 = ' '.join(correction) # Setup bytes[15:14] = Trigger Limit Point value byte14_to_byte15 = ' '.join(trigger_limit) # Setup bytes[17:16] = Statistic Report Period in second byte16_to_byte17 = ' '.join(report_period) c1h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 + ' ' + byte6 + ' ' + byte7 + ' ' + byte8_to_byte9 + ' ' + byte10_to_byte13 + ' ' + byte14_to_byte15 + ' ' + byte16_to_byte17 return c1h_raw ## Function : Convert 0xCB cmd to str format def cbh_raw_to_str_py(domain, min_power_draw_range, max_power_draw_range ): netfun = 0x2e cmd = '0xcb' # Setup byte4 : Domain ID, bits[3:0] byte4 = int_to_hex_string(domain) # Setup bytes[6:5] = Minimum Power Draw Range byte5 = min_power_draw_range[0] byte6 = min_power_draw_range[1] # Setup bytes[8:7] = Minimum Power Draw Range byte7 = max_power_draw_range[0] byte8 = max_power_draw_range[1] cbh_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6, byte7 , byte8 ] return netfun, cbh_raw ## Function : Convert 0xC1 cmd to str format def c1h_raw_to_str_py(domain, policy_enable, policy_id, policy_trigger_type, policy_add, aggressive, storage_mode, alert, shutdown, power_domain, limit, correction, trigger_limit, report_period ): netfun = 0x2e cmd = ' 0xc1 ' # Setup byte4 : Domain ID, bits[3:0] domain = domain # byte4 bit[4]: Policy Enable policy_enable = bit_shift_left(policy_enable , 4) # Conbine for byte4 byte4 = int_to_hex_string(domain | policy_enable) # Setup byte5 : Policy ID byte5 = int_to_hex_string(policy_id) # Setup byte6 : Policy Type # byte6 bit[3:0] :Policy trigger type policy_trigger_type = policy_trigger_type # byte6 bit[4] :Policy config action policy_add = bit_shift_left(policy_add , 4) # byte6 bit[6:5] :aggressive mode aggressive = bit_shift_left(aggressive , 5) # byte6 bit[7] : storage_mode storage_mode = bit_shift_left(storage_mode , 7) # Conbine for byte6 byte6 = int_to_hex_string(policy_trigger_type | policy_add | aggressive | storage_mode | storage_mode) # Setup byte7 : Exception action # byte7 bit[0]: Send alert alert = alert # byte7 bit[1]: Shutdown shutdown = bit_shift_left(shutdown , 1) # byte7 bit[7]: power domain power_domain = bit_shift_left(power_domain , 7) # Conbine for byte6 byte7 = int_to_hex_string(power_domain | shutdown | alert) # Setup bytes[9:8] = Target Limit value byte8 = limit[0] byte9 = limit[1] # Setup bytes[13:10] = Correction time byte10 = correction[0] byte11 = correction[1] byte12 = correction[2] byte13 = correction[3] # Setup bytes[15:14] = Trigger Limit Point value byte14 = trigger_limit[0] byte15 = trigger_limit[1] # Setup bytes[17:16] = Statistic Report Period in second byte16 = report_period[0] byte17 = report_period[1] #c1h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 + ' ' + byte6 + ' ' + byte7 + ' ' + byte8_to_byte9 + ' ' + byte10_to_byte13 + ' ' + byte14_to_byte15 + ' ' + byte16_to_byte17 c1h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6, byte7 , byte8, byte9 , byte10 , byte11, byte12, byte13, byte14, byte15, byte16, byte17 ] return netfun, c1h_raw ## Function : Convert 0xF0 cmd to str format def f0h_set_k_coefficiency_raw_to_str_py( k_coefficiency ): netfun = 0x2e cmd = '0xf0' # Setup byte4 : Domain ID, bits[3:0] byte4 = k_coefficiency[0] f0h_raw = [cmd,'0x57','0x01','0x00', byte4 ] return netfun, f0h_raw ## Function : Convert 0xF2 cmd to str format def f2h_raw_to_str_py(domain): netfun = 0x2e cmd = '0xf2' # Setup byte4 : Domain ID, bits[3:0] byte4 = int_to_hex_string(domain) f2h_raw = [cmd,'0x57','0x01','0x00', byte4 ] return netfun, f2h_raw ## Function : Converter 40h cmd : Send RAW PECI cmd to str format ## PECI client address = 30h CPU0 / 31h CPU1 domain / 32h CPU2 / 33h CPU3 ## 70h~73h cmd via PECI over DMI ## B0h~B3h cmd via PECI wire def peci_40h_raw_to_str( client_addr, interface , write_length, read_length, raw ): DEBUG('40h_raw_str:') netfun = OEM cmd = ' 0x40 ' manufacture_id = INTEL_MANUFACTURE_ID # byte4 bit[7:6]: Interface select interface = bit_shift_left(interface , 6) # byte4 bit[5:0]: Client address client_addr = client_addr # Setup byte4 byte4 = int_to_hex_string(client_addr | interface) # Setup byte5 : Write length byte5 = int_to_hex_string(write_length) # Setup byte6 : Read length byte6 = int_to_hex_string(read_length) # Setup byte7-byteN : raw PECI data byte7_byteN = ' '.join(raw) # Setup 40h_raw peci_40h_raw = netfun + cmd + manufacture_id + byte4 + ' ' + byte5 + ' ' + byte6 + ' ' + byte7_byteN DEBUG(peci_40h_raw) return peci_40h_raw ## Function : Converter 40h cmd : Send RAW PECI cmd to str format ## PECI client address = 30h CPU0 / 31h CPU1 domain / 32h CPU2 / 33h CPU3 ## 70h~73h cmd via PECI over DMI ## B0h~B3h cmd via PECI wire def peci_40h_raw_to_str_py( client_addr, interface , write_length, read_length, raw ): DEBUG('40h_raw_str:') netfun = 0x2e cmd = '0x40' # byte4 bit[7:6]: Interface select interface = bit_shift_left(interface , 6) # byte4 bit[5:0]: Client address client_addr = client_addr # Setup byte4 byte4 = int_to_hex_string(client_addr | interface) # Setup byte5 : Write length byte5 = int_to_hex_string(write_length) # Setup byte6 : Read length byte6 = int_to_hex_string(read_length) # Setup 40h_raw peci_40h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 ] # Setup byte7-byteN : raw PECI data for loop in range(0 , len(raw)): peci_40h_raw.append(format(raw[loop])) DEBUG(peci_40h_raw) return netfun, peci_40h_raw ## Function : Converter d9h cmd : Send RAW PMbus extended cmd to str format def d9h_raw_to_str_py( msg_type , pec_report, pec_en, sensor_bus, target_addr, mux_addr, mux_ch, mux_config, trans_protocol, write_len, read_len, command ): DEBUG('d9h_raw_str:') netfun = 0x2e cmd = '0xd9' # byte4 bit[3:1]: msg_trans_type = bit_shift_left(msg_type , 1) # byte4 bit[5:4]: extended address formet = 1 extended_addr_format = bit_shift_left(d9h_extended_device_addr , 4) pec_rep = bit_shift_left(pec_report , 6) pec_enable = bit_shift_left(pec_en , 7) # Setup byte4 byte4 = int_to_hex_string(msg_trans_type | extended_addr_format | pec_rep | pec_enable ) # Setup byte5 : Sensor Bus byte5 = int_to_hex_string(sensor_bus) # Setup byte6 : Target Addr byte6 = int_to_hex_string(target_addr) # Setup byte7 : Mux Addr byte7 = int_to_hex_string(mux_addr) # Setup byte8 : Mux Channel byte8 = int_to_hex_string(mux_ch) # Setup byte9 : Mux Config byte9 = int_to_hex_string(mux_config) # Setup byte10 : Transmission Protocol parameter trans_ptol = bit_shift_left(trans_protocol , 5) byte10 = int_to_hex_string(trans_ptol) # Setup byte11 : Write Length byte11 = int_to_hex_string(write_len) # Setup byte12 : Read length byte12 = int_to_hex_string(read_len) # Setup byte13 : Command byte13 = int_to_hex_string(command) # Setup d9h_raw pmbus_d9h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12, byte13 ] return netfun, pmbus_d9h_raw ## Function : Converter d9h cmd : Send RAW PMbus extended cmd to str format def d9h_set_raw_to_str_py( msg_type , pec_report, pec_en, sensor_bus, target_addr, mux_addr, mux_ch, mux_config, trans_protocol, write_len, read_len, command ): DEBUG('d9h_raw_str:') netfun = 0x2e cmd = '0xd9' # byte4 bit[3:1]: msg_trans_type = bit_shift_left(msg_type , 1) # byte4 bit[5:4]: extended address formet = 1 extended_addr_format = bit_shift_left(d9h_extended_device_addr , 4) pec_rep = bit_shift_left(pec_report , 6) pec_enable = bit_shift_left(pec_en , 7) # Setup byte4 byte4 = int_to_hex_string(msg_trans_type | extended_addr_format | pec_rep | pec_enable ) # Setup byte5 : Sensor Bus byte5 = int_to_hex_string(sensor_bus) # Setup byte6 : Target Addr byte6 = int_to_hex_string(target_addr) # Setup byte7 : Mux Addr byte7 = int_to_hex_string(mux_addr) # Setup byte8 : Mux Channel byte8 = int_to_hex_string(mux_ch) # Setup byte9 : Mux Config byte9 = int_to_hex_string(mux_config) # Setup byte10 : Transmission Protocol parameter trans_ptol = bit_shift_left(trans_protocol , 5) byte10 = int_to_hex_string(trans_ptol) # Setup byte11 : Write Length byte11 = int_to_hex_string(write_len) # Setup byte12 : Read length byte12 = int_to_hex_string(read_len) # Setup d9h_raw pmbus_d9h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12 ] for loop in range(0 , len(command)): pmbus_d9h_raw.append(format(command[loop])) DEBUG(pmbus_d9h_raw) return netfun, pmbus_d9h_raw ## Function : Converter d7h cmd : Send RAW PMbus extended cmd to str format def d7h_set_raw_to_str_py( domain, psu1_addr, psu2_addr, psu3_addr, psu4_addr, psu5_addr, psu6_addr, psu7_addr, psu8_addr ): netfun = 0x2e cmd = '0xd7' # Setup byte4 byte4 = int_to_hex_string(domain) # Setup byte5 : PSU1 Slave addr byte5 = int_to_hex_string(psu1_addr) # Setup byte6 : PSU2 Slave addr byte6 = int_to_hex_string(psu2_addr) # Setup byte7 : PSU3 Slave addr byte7 = int_to_hex_string(psu3_addr) # Setup byte8 : PSU4 Slave addr byte8 = int_to_hex_string(psu4_addr) # Setup byte9 : PSU5 Slave addr byte9 = int_to_hex_string(psu5_addr) # Setup byte10 : PSU6 Slave addr byte10 = int_to_hex_string(psu6_addr) # Setup byte11 : PSU7 Slave addr byte11 = int_to_hex_string(psu7_addr) # Setup byte12 :PSU8 Slave addr byte12 = int_to_hex_string(psu8_addr) # Setup d7h_raw d7h_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12 ] return netfun, d7h_raw ## Function : Converter Get DID : (NetFun = App(0x06), CMD = 01h) to str format def get_did_raw_to_str( ): netfun = App cmd = ' 0x01 ' # Setup get_did_raw get_did_raw = netfun + cmd return get_did_raw ## Function : Converter Get DID : (NetFun = App(0x06), CMD = 01h) to str format def get_did_raw_to_str_py(): netfun = 0x06 cmd = '0x01' # Setup get_did_raw get_did_raw = [cmd] return netfun, get_did_raw ## Function : Converter Cold Reset : (NetFun = App(0x06), CMD = 02h) to str format def cold_reset_raw_to_str_py(): netfun = 0x06 cmd = '0x02' # Setup get_did_raw cold_reset_raw = [cmd] return netfun, cold_reset_raw ## Function : Converter Get SEL TIME : (NetFun = Storage (0x0A), CMD = 48h) to str format def get_sel_time_raw_to_str( ): netfun = Storage cmd = ' 0x48 ' # Setup get_sel_time_raw get_sel_time_raw = netfun + cmd return get_sel_time_raw ## Function : Converter Get SEL TIME : (NetFun = Storage (0x0A), CMD = 48h) to str format def get_sel_time_raw_to_str_py( ): netfun = 0x0a cmd = '0x48' # Setup get_sel_time_raw get_sel_time_raw = [cmd] return netfun, get_sel_time_raw ## Function : Convert Get Sensor Reading : (NetFun = Sensor (0x04), CMD = 2Dh) to str format def get_sensor_reading_raw_to_str( sensor_number ): netfun = Sensor cmd = ' 0x2d ' # Setup byte1 : sensor_number byte1 = ' '.join(sensor_number) # Setup get_sensor_reading_raw get_sensor_reading_raw = netfun + cmd + ' ' + byte1 return get_sensor_reading_raw ## Function : Convert Get Sensor Reading : (NetFun = Sensor (0x04), CMD = 2Dh) to str format def get_sensor_reading_raw_to_str_py( sensor_number ): netfun = 0x04 cmd = '0x2d' # Setup byte1 : sensor_number byte1 = ' '.join(sensor_number) # Setup get_sensor_reading_raw get_sensor_reading_raw = [cmd, byte1] return netfun, get_sensor_reading_raw ## Function : Converter 01h cmd to str format: Get chassis status def get_chassis_power_status_raw_to_str_py( ): netfun = 0x00 cmd = '0x01' # Setup get_chassis_status_raw get_chassis_status_raw = [cmd] return netfun ,get_chassis_status_raw ## Function : Converter 02h cmd to str format: Get chassis status def chassis_control_raw_to_str_py( control ): netfun = 0x00 cmd = '0x02' # Setup byte1 : sensor_number byte1 = int_to_hex_string(control) # Setup chassis_control_raw chassis_control_raw = [cmd, byte1] return netfun ,chassis_control_raw ## Function : Converter 4Bh cmd : Get CPU and Memory Temperature cmd to str format def get_cpu_mem_temp_4bh_raw_to_str_py( cup_select, cpu_set, mem_channel_set, request_format ): DEBUG('4bh_raw_str:') netfun = 0x2e cmd = '0x4B' # byte4 bit[3:0]: cpu_sel = cup_select # byte4 bit[5:4]: cup_set cup_domain = bit_shift_left(cpu_set , 4) # byte4 bit[6]: mem_set mem_domain = bit_shift_left(mem_channel_set , 6) # byte4 bit[7]: request format format = bit_shift_left(request_format , 7) # Setup byte4 byte4 = int_to_hex_string(cpu_sel | cup_domain | mem_domain | format ) # Byte5~ Byte12 for standard mode if(request_format == get_temp_4bh_standard): # CPU0 Channels byte5 = get_temp_4bh_channel_all byte6 = get_temp_4bh_channel_all # CPU1 Channels byte7 = get_temp_4bh_channel_all byte8 = get_temp_4bh_channel_all # CPU2 Channels byte9 = get_temp_4bh_channel_all byte10 = get_temp_4bh_channel_all # CPU3 Channels byte11 = get_temp_4bh_channel_all byte12 = get_temp_4bh_channel_all # Setup 4bh_raw get_cpu_mem_temp_4bh_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12 ] DEBUG(get_cpu_mem_temp_4bh_raw) return netfun, get_cpu_mem_temp_4bh_raw # Byte5 ~ Byte20 for extended mode elif(request_format == get_temp_4bh_extended): # CPU0 Channels byte5 = get_temp_4bh_channel_all byte6 = get_temp_4bh_channel_all byte7 = get_temp_4bh_channel_all byte8 = get_temp_4bh_channel_all # CPU2 Channels byte9 = get_temp_4bh_channel_all byte10 = get_temp_4bh_channel_all byte11 = get_temp_4bh_channel_all byte12 = get_temp_4bh_channel_all # CPU3 Channels #byte13 = get_temp_4bh_channel_all #byte14 = get_temp_4bh_channel_all #byte15 = get_temp_4bh_channel_all #byte16 = get_temp_4bh_channel_all # CPU4 Channels #byte17 = get_temp_4bh_channel_all #byte18 = get_temp_4bh_channel_all #byte19 = get_temp_4bh_channel_all #byte20 = get_temp_4bh_channel_all # CPU3 Channels byte13 = get_temp_4bh_channel_none byte14 = get_temp_4bh_channel_none byte15 = get_temp_4bh_channel_none byte16 = get_temp_4bh_channel_none # CPU4 Channels byte17 = get_temp_4bh_channel_none byte18 = get_temp_4bh_channel_none byte19 = get_temp_4bh_channel_none byte20 = get_temp_4bh_channel_none # Setup 4bh_raw get_cpu_mem_temp_4bh_raw = [cmd,'0x57','0x01','0x00', byte4, byte5, byte6 , byte7, byte8, byte9, byte10, byte11, byte12, byte13, byte14, byte15, byte16, byte17, byte18, byte19, byte20] DEBUG(get_cpu_mem_temp_4bh_raw) return netfun, get_cpu_mem_temp_4bh_raw
0.172799
0.13148
# https://github.com/dbullockphd/RPiSense from os import getenv from random import randint from numpy import uint8, where, genfromtxt class Colors (object): """ Keep a list of colors organized and sorted. You can select colors by name, by index number, and even at random. :param order: (`array_like`) This defines the hierarchy of field names when sorting. See the colors.csv file for information about these names. :param ascending: (`boolean`) Sorting can either be ascending or descending. """ __colorscsv = '/home/pi/src/RPiSense/configs/colors.csv' def __init__ (self, order=('H','L','S'), ascending=True): self.colors = genfromtxt (self.__colorscsv, dtype=[ ('name',object), ('r',uint8), ('g',uint8), ('b',uint8), ('h',uint8), ('s',uint8), ('v',uint8), ('H',uint8), ('L',uint8), ('S',uint8), ], delimiter=',') # sort by columns self.colors.sort (kind='mergesort', order=order) pass def random (self, mode='rgb'): """ Select a color at random. :param mode: ('string') This is a 3-character description for the color category to return. Valid options are 'rgb' (red, green, blue), 'hsv' (hue, saturation, value), and 'HLS' (HUE, LUMINOSITY, SATURATION). :return: - **name** (`string`) -- The name of the color. - **abc** (`tuple`) -- Three 8-bit values for the color. """ # pick a random color between low and high i = randint (0, self.colors.shape[0]-1) name, abc = self.__get (i, mode) return name, abc def randomColor (self, mode='rgb'): """ Select a color at random. :param mode: ('string') This is a 3-character description for the color category to return. Valid options are 'rgb' (red, green, blue), 'hsv' (hue, saturation, value), and 'HLS' (HUE, LUMINOSITY, SATURATION). :return: - **abc** (`tuple`) -- Three 8-bit values for the color. """ # get the tuple from the random method name, abc = self.random (mode) return abc def randomName (self, mode='rgb'): """ Select a color at random. :param mode: ('string') This is a 3-character description for the color category to return. Valid options are 'rgb' (red, green, blue), 'hsv' (hue, saturation, value), and 'HLS' (HUE, LUMINOSITY, SATURATION). :return: - **name** (`string`) -- The name of the color. - **abc** (`tuple`) -- Three 8-bit values for the color. """ # get the name from the random method name, abc = self.random (mode) return name def fetch (self, key, mode='rgb'): """ Select a color at random. :param key: (`string`) The name of the color to find. :param mode: ('string') This is a 3-character description for the color category to return. Valid options are 'rgb' (red, green, blue), 'hsv' (hue, saturation, value), and 'HLS' (HUE, LUMINOSITY, SATURATION). :return: - **abc** (`tuple`) -- Three 8-bit values for the color. """ # get a color by name f = where (self.colors['name'] == key) if len(f[0]) == 0: raise ValueError ('color {0:s} unknown'.format(key)) i = f[0][0] name, abc = self.__get (i, mode) return abc def __get (self, i, mode): # get color by mode if mode == 'rgb': a = self.colors['r'][i] b = self.colors['g'][i] c = self.colors['b'][i] pass elif mode == 'hsv': a = self.colors['h'][i] b = self.colors['s'][i] c = self.colors['v'][i] pass elif mode == 'HLS': a = self.colors['H'][i] b = self.colors['L'][i] c = self.colors['S'][i] pass name = self.colors['name'][i] return name, (a, b, c) def __getitem__ (self, key): # dictionary-like name access if key == 'random': return self.randomColor () else: return self.fetch (key, 'rgb') pass pass if __name__ == '__main__': from argparse import ArgumentParser as AP # get a color by name on the command line p = AP (description='get a color by name') p.add_argument ('color', default='random') args = p.parse_args () # get the color values c = Colors () if args.color == 'random': name, rgb = c.random () else: name = args.color rgb = c[args.color] pass # display the name of the color and its values print ('{0:s}: {1:s}'.format (str(name), str(rgb))) pass
python/RPiSense/Colors.py
# https://github.com/dbullockphd/RPiSense from os import getenv from random import randint from numpy import uint8, where, genfromtxt class Colors (object): """ Keep a list of colors organized and sorted. You can select colors by name, by index number, and even at random. :param order: (`array_like`) This defines the hierarchy of field names when sorting. See the colors.csv file for information about these names. :param ascending: (`boolean`) Sorting can either be ascending or descending. """ __colorscsv = '/home/pi/src/RPiSense/configs/colors.csv' def __init__ (self, order=('H','L','S'), ascending=True): self.colors = genfromtxt (self.__colorscsv, dtype=[ ('name',object), ('r',uint8), ('g',uint8), ('b',uint8), ('h',uint8), ('s',uint8), ('v',uint8), ('H',uint8), ('L',uint8), ('S',uint8), ], delimiter=',') # sort by columns self.colors.sort (kind='mergesort', order=order) pass def random (self, mode='rgb'): """ Select a color at random. :param mode: ('string') This is a 3-character description for the color category to return. Valid options are 'rgb' (red, green, blue), 'hsv' (hue, saturation, value), and 'HLS' (HUE, LUMINOSITY, SATURATION). :return: - **name** (`string`) -- The name of the color. - **abc** (`tuple`) -- Three 8-bit values for the color. """ # pick a random color between low and high i = randint (0, self.colors.shape[0]-1) name, abc = self.__get (i, mode) return name, abc def randomColor (self, mode='rgb'): """ Select a color at random. :param mode: ('string') This is a 3-character description for the color category to return. Valid options are 'rgb' (red, green, blue), 'hsv' (hue, saturation, value), and 'HLS' (HUE, LUMINOSITY, SATURATION). :return: - **abc** (`tuple`) -- Three 8-bit values for the color. """ # get the tuple from the random method name, abc = self.random (mode) return abc def randomName (self, mode='rgb'): """ Select a color at random. :param mode: ('string') This is a 3-character description for the color category to return. Valid options are 'rgb' (red, green, blue), 'hsv' (hue, saturation, value), and 'HLS' (HUE, LUMINOSITY, SATURATION). :return: - **name** (`string`) -- The name of the color. - **abc** (`tuple`) -- Three 8-bit values for the color. """ # get the name from the random method name, abc = self.random (mode) return name def fetch (self, key, mode='rgb'): """ Select a color at random. :param key: (`string`) The name of the color to find. :param mode: ('string') This is a 3-character description for the color category to return. Valid options are 'rgb' (red, green, blue), 'hsv' (hue, saturation, value), and 'HLS' (HUE, LUMINOSITY, SATURATION). :return: - **abc** (`tuple`) -- Three 8-bit values for the color. """ # get a color by name f = where (self.colors['name'] == key) if len(f[0]) == 0: raise ValueError ('color {0:s} unknown'.format(key)) i = f[0][0] name, abc = self.__get (i, mode) return abc def __get (self, i, mode): # get color by mode if mode == 'rgb': a = self.colors['r'][i] b = self.colors['g'][i] c = self.colors['b'][i] pass elif mode == 'hsv': a = self.colors['h'][i] b = self.colors['s'][i] c = self.colors['v'][i] pass elif mode == 'HLS': a = self.colors['H'][i] b = self.colors['L'][i] c = self.colors['S'][i] pass name = self.colors['name'][i] return name, (a, b, c) def __getitem__ (self, key): # dictionary-like name access if key == 'random': return self.randomColor () else: return self.fetch (key, 'rgb') pass pass if __name__ == '__main__': from argparse import ArgumentParser as AP # get a color by name on the command line p = AP (description='get a color by name') p.add_argument ('color', default='random') args = p.parse_args () # get the color values c = Colors () if args.color == 'random': name, rgb = c.random () else: name = args.color rgb = c[args.color] pass # display the name of the color and its values print ('{0:s}: {1:s}'.format (str(name), str(rgb))) pass
0.890038
0.308112
import numpy as np """ For conv2D methods: Weights shape must be in form of (o, i, k_h, k_w), where 'o' stands for number of outputs, 'i' number of inputs, 'k_h' is kernel height and 'k_w' is kernel width fMaps stands for Feature Maps, or input images, its shape must be in form of (i, h, w), where 'i' is the number of inputs, 'h' is image height and 'w' is image width For dense method: Weights shape must be in form of (o, i), where 'o' stands for number of outputs and 'i' number of inputs fMaps stands for Feature Maps, or input images, its a flattened array """ # Convolves feature maps and weights def conv2D(fMaps, weights, bias, padding='same'): kernels_per_fmap = weights.shape[1] image_initial_addr = [0,0] if padding == 'same': ### PADDING == 'SAME' if len(fMaps.shape) > 2: fMap_height = fMaps.shape[1] fMap_width = fMaps.shape[2] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[1], fMaps.shape[2])) else: fMap_height = fMaps.shape[0] fMap_width = fMaps.shape[1] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[0], fMaps.shape[1])) for j, w in enumerate(weights): # Loops over FMaps weights convolved_rows = -1 while(convolved_rows < (fMap_height-1)): convolved_cols = -1 while (convolved_cols < (fMap_width-1)): convolved = np.zeros((3,3)) #Convolved Matrix for i, kernel in enumerate(w): # Loops over weights # Convolve image and kernel for col in range(0, 3): for row in range(0, 3): col_addr = convolved_cols + col row_addr = convolved_rows + row # Verifies if convolution is occurring at image borders if col_addr < image_initial_addr[1] or col_addr == fMap_width or row_addr < image_initial_addr[0] or row_addr == fMap_height: convolved[row][col] += 0 else: convolved[row][col] += (fMaps[i][row_addr,col_addr] * kernel[row][col]) summ = np.asarray(convolved).sum() + bias[j] convolved_fMap[j][convolved_rows+1, convolved_cols+1] = summ convolved_cols += 1 convolved_rows += 1 # Counts how many lines have been convolved else: ### PADDING == 'VALID' if len(fMaps.shape) > 2: fMap_height = fMaps.shape[1] fMap_width = fMaps.shape[2] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[1]-2, fMaps.shape[2]-2)) else: fMap_height = fMaps.shape[0] fMap_width = fMaps.shape[1] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[0]-2, fMaps.shape[1]-2)) for j, w in enumerate(weights): # Loops over FMaps weights im_row_addr = image_initial_addr[0] while(im_row_addr < (fMap_height - 2)): im_col_addr = image_initial_addr[1] while (im_col_addr < (fMap_width-2)): convolved = np.zeros((3,3)) #Convolved Matrix #Creates a 3x3 kernel matrix for i, kernel in enumerate(w): # Loops over weights for k in range(0, 3): convolved[0][k] += (fMaps[i][im_row_addr,k+im_col_addr] * kernel[0][k]) convolved[1][k] += (fMaps[i][1+im_row_addr,k+im_col_addr] * kernel[1][k]) convolved[2][k] += (fMaps[i][2+im_row_addr,k+im_col_addr] * kernel[2][k]) summ = np.asarray(convolved).sum() + bias[j] convolved_fMap[j][im_row_addr, im_col_addr] += summ im_col_addr += 1 im_row_addr += 1 # Counts how many lines have been denoised return convolved_fMap ### DENSE def dense(fMap, weights, bias): out = np.zeros((weights.shape[0],)) ## Output vector for j, w in enumerate(weights): summ = 0 for i, k in enumerate(w): summ += k*fMap[i] summ = summ.sum() + bias[j] out[j] = summ return out def softmax(x): ### Compute softmax values for each sets of scores in x. e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) # only difference def LeakyReLU(x, alpha): ### Leaky Rectified Linear Unit activation ## If 'alpha' is equal to zero, then it becomes a standard ReLU return np.maximun(x, alpha*x)
NNLayers.py
import numpy as np """ For conv2D methods: Weights shape must be in form of (o, i, k_h, k_w), where 'o' stands for number of outputs, 'i' number of inputs, 'k_h' is kernel height and 'k_w' is kernel width fMaps stands for Feature Maps, or input images, its shape must be in form of (i, h, w), where 'i' is the number of inputs, 'h' is image height and 'w' is image width For dense method: Weights shape must be in form of (o, i), where 'o' stands for number of outputs and 'i' number of inputs fMaps stands for Feature Maps, or input images, its a flattened array """ # Convolves feature maps and weights def conv2D(fMaps, weights, bias, padding='same'): kernels_per_fmap = weights.shape[1] image_initial_addr = [0,0] if padding == 'same': ### PADDING == 'SAME' if len(fMaps.shape) > 2: fMap_height = fMaps.shape[1] fMap_width = fMaps.shape[2] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[1], fMaps.shape[2])) else: fMap_height = fMaps.shape[0] fMap_width = fMaps.shape[1] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[0], fMaps.shape[1])) for j, w in enumerate(weights): # Loops over FMaps weights convolved_rows = -1 while(convolved_rows < (fMap_height-1)): convolved_cols = -1 while (convolved_cols < (fMap_width-1)): convolved = np.zeros((3,3)) #Convolved Matrix for i, kernel in enumerate(w): # Loops over weights # Convolve image and kernel for col in range(0, 3): for row in range(0, 3): col_addr = convolved_cols + col row_addr = convolved_rows + row # Verifies if convolution is occurring at image borders if col_addr < image_initial_addr[1] or col_addr == fMap_width or row_addr < image_initial_addr[0] or row_addr == fMap_height: convolved[row][col] += 0 else: convolved[row][col] += (fMaps[i][row_addr,col_addr] * kernel[row][col]) summ = np.asarray(convolved).sum() + bias[j] convolved_fMap[j][convolved_rows+1, convolved_cols+1] = summ convolved_cols += 1 convolved_rows += 1 # Counts how many lines have been convolved else: ### PADDING == 'VALID' if len(fMaps.shape) > 2: fMap_height = fMaps.shape[1] fMap_width = fMaps.shape[2] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[1]-2, fMaps.shape[2]-2)) else: fMap_height = fMaps.shape[0] fMap_width = fMaps.shape[1] convolved_fMap = np.zeros((weights.shape[0], fMaps.shape[0]-2, fMaps.shape[1]-2)) for j, w in enumerate(weights): # Loops over FMaps weights im_row_addr = image_initial_addr[0] while(im_row_addr < (fMap_height - 2)): im_col_addr = image_initial_addr[1] while (im_col_addr < (fMap_width-2)): convolved = np.zeros((3,3)) #Convolved Matrix #Creates a 3x3 kernel matrix for i, kernel in enumerate(w): # Loops over weights for k in range(0, 3): convolved[0][k] += (fMaps[i][im_row_addr,k+im_col_addr] * kernel[0][k]) convolved[1][k] += (fMaps[i][1+im_row_addr,k+im_col_addr] * kernel[1][k]) convolved[2][k] += (fMaps[i][2+im_row_addr,k+im_col_addr] * kernel[2][k]) summ = np.asarray(convolved).sum() + bias[j] convolved_fMap[j][im_row_addr, im_col_addr] += summ im_col_addr += 1 im_row_addr += 1 # Counts how many lines have been denoised return convolved_fMap ### DENSE def dense(fMap, weights, bias): out = np.zeros((weights.shape[0],)) ## Output vector for j, w in enumerate(weights): summ = 0 for i, k in enumerate(w): summ += k*fMap[i] summ = summ.sum() + bias[j] out[j] = summ return out def softmax(x): ### Compute softmax values for each sets of scores in x. e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) # only difference def LeakyReLU(x, alpha): ### Leaky Rectified Linear Unit activation ## If 'alpha' is equal to zero, then it becomes a standard ReLU return np.maximun(x, alpha*x)
0.601711
0.596874
import atexit import sys import time import threading from Queue import Queue, Empty from flask import Flask, request, render_template from sign import Sign queue = Queue() stop = threading.Event() t = None def do_command(sign, command, args): if command == 'change_modes': sign.change_modes() else: sign.mode = 'noop' if command == 'off': sign.strip.set_colour_rgb(0, 0, 0) if command == 'on': sign.mode = 'aqua' sign.tick() elif command == 'red': sign.strip.set_colour_rgb(255, 0, 0) elif command == 'green': sign.strip.set_colour_rgb(0, 255, 0) elif command == 'blue': sign.strip.set_colour_rgb(0, 0, 255) elif command == 'set_rgb': sign.strip.set_colour_rgb(*args) def loop(): sign = Sign() while not stop.is_set(): try: pop = queue.get_nowait() command = pop[0] args = pop[1:] do_command(sign, command, args) except Empty: pass if sign.mode != 'noop': sign.tick() time.sleep(0.01) print('End of loop') def create_app(): app = Flask(__name__, template_folder='.') t = threading.Thread(target=loop) t.daemon = True t.start() return app app = create_app() @app.route("/change_modes") def change_modes(): queue.put_nowait(('change_modes', )) return "Changing modes!" @app.route("/off") def off(): queue.put_nowait(('off', )) return "Bye!" @app.route("/on") def on(): queue.put_nowait(('on', )) return "Hey!" @app.route("/red") def red(): queue.put_nowait(('red', )) return "Red!" @app.route("/green") def green(): queue.put_nowait(('green', )) return "Red!" @app.route("/blue") def blue(): queue.put_nowait(('blue', )) return "Red!" @app.route("/rgb/<r>,<g>,<b>") def set_rgb(r, g, b): queue.put_nowait(('set_rgb', int(r), int(g), int(b))) return "Bye!" @app.route("/") def home(): return render_template('index.html', host=request.host) def die(): stop.set() if __name__ == '__main__': print('Starting up') atexit.register(die) try: app.run(host='0.0.0.0', port=5000, debug=False) except KeyboardInterrupt: print('Shutting down') sign = Sign() sign.strip.set_colour_rgb(0, 0, 0) finally: # Trigger stop stop.set() sys.exit(0)
sign_api.py
import atexit import sys import time import threading from Queue import Queue, Empty from flask import Flask, request, render_template from sign import Sign queue = Queue() stop = threading.Event() t = None def do_command(sign, command, args): if command == 'change_modes': sign.change_modes() else: sign.mode = 'noop' if command == 'off': sign.strip.set_colour_rgb(0, 0, 0) if command == 'on': sign.mode = 'aqua' sign.tick() elif command == 'red': sign.strip.set_colour_rgb(255, 0, 0) elif command == 'green': sign.strip.set_colour_rgb(0, 255, 0) elif command == 'blue': sign.strip.set_colour_rgb(0, 0, 255) elif command == 'set_rgb': sign.strip.set_colour_rgb(*args) def loop(): sign = Sign() while not stop.is_set(): try: pop = queue.get_nowait() command = pop[0] args = pop[1:] do_command(sign, command, args) except Empty: pass if sign.mode != 'noop': sign.tick() time.sleep(0.01) print('End of loop') def create_app(): app = Flask(__name__, template_folder='.') t = threading.Thread(target=loop) t.daemon = True t.start() return app app = create_app() @app.route("/change_modes") def change_modes(): queue.put_nowait(('change_modes', )) return "Changing modes!" @app.route("/off") def off(): queue.put_nowait(('off', )) return "Bye!" @app.route("/on") def on(): queue.put_nowait(('on', )) return "Hey!" @app.route("/red") def red(): queue.put_nowait(('red', )) return "Red!" @app.route("/green") def green(): queue.put_nowait(('green', )) return "Red!" @app.route("/blue") def blue(): queue.put_nowait(('blue', )) return "Red!" @app.route("/rgb/<r>,<g>,<b>") def set_rgb(r, g, b): queue.put_nowait(('set_rgb', int(r), int(g), int(b))) return "Bye!" @app.route("/") def home(): return render_template('index.html', host=request.host) def die(): stop.set() if __name__ == '__main__': print('Starting up') atexit.register(die) try: app.run(host='0.0.0.0', port=5000, debug=False) except KeyboardInterrupt: print('Shutting down') sign = Sign() sign.strip.set_colour_rgb(0, 0, 0) finally: # Trigger stop stop.set() sys.exit(0)
0.261708
0.120103
import csv from pathlib import Path from scipy.sparse import csr_matrix from scipy.sparse.csgraph import connected_components # Read feature vectors from dataDir and write them to outDir after processing, one canopy at a time def processRexa(dataDir, outDir): folderList = [str(f) for f in Path(dataDir).glob("*") if f.is_dir()] for ctr, folder in enumerate(folderList): canopyId = folder.split("/")[-1] pairFeatures = {} mentToId = {} pidToCluster = {} rows,cols,data = [],[],[] with open("{}/pair_vecs.tsv".format(folder),"r") as f: reader = csv.reader(f,delimiter="\t") for line in reader: m1, m2 = line[0], line[1] featureVec = line[3:-1] pairFeatures[(m1, m2)] = featureVec mentToId[m1] = 1 mentToId[m2] = 1 if line[2] == "1": # Accumulate data to create sparse matrix and then run connected components to retrieve gt clusters rows += [m1] cols += [m2] data += [1] rows += [m2] cols += [m1] data += [1] elif line[2] == "0": pass else: print(line[2]) raise Exception("Invalid end token") mentToId = {ment:ctr for ctr,ment in enumerate(mentToId)} # Assign unique id to each point # Find out ground-truth cluster after running connected components rows = [mentToId[ment] for ment in rows] cols = [mentToId[ment] for ment in cols] numPoints = len(mentToId) sparseMatrix = csr_matrix((data, (rows, cols)), shape=(numPoints, numPoints)) connComp = connected_components(sparseMatrix) for pid in range(numPoints): pidToCluster[pid] = connComp[1][pid] Path("{}/{}".format(outDir, canopyId)).mkdir(parents=True, exist_ok=True) with open("{}/{}/gtClusters.tsv".format(outDir, canopyId), "w") as f: for pid in pidToCluster: f.write("{}\t{}\n".format(pid, pidToCluster[pid])) with open("{}/{}/pairFeatures.csv".format(outDir, canopyId), "w") as f: writer = csv.writer(f) for m1,m2 in pairFeatures: line = [ mentToId[m1], mentToId[m2] ] + pairFeatures[(m1,m2)] if pidToCluster[mentToId[m1]] == pidToCluster[mentToId[m2]]: line.append(1) else: line.append(0) writer.writerow(line) if __name__ == "__main__": # dataDir = "../data/data/rexa/canopy" dataDir = "../data/data_rexa_all/nick-rexa/rexa/canopy" outDir = "../data/rexa_new" processRexa(dataDir=dataDir, outDir=outDir)
src/utils/processRexa.py
import csv from pathlib import Path from scipy.sparse import csr_matrix from scipy.sparse.csgraph import connected_components # Read feature vectors from dataDir and write them to outDir after processing, one canopy at a time def processRexa(dataDir, outDir): folderList = [str(f) for f in Path(dataDir).glob("*") if f.is_dir()] for ctr, folder in enumerate(folderList): canopyId = folder.split("/")[-1] pairFeatures = {} mentToId = {} pidToCluster = {} rows,cols,data = [],[],[] with open("{}/pair_vecs.tsv".format(folder),"r") as f: reader = csv.reader(f,delimiter="\t") for line in reader: m1, m2 = line[0], line[1] featureVec = line[3:-1] pairFeatures[(m1, m2)] = featureVec mentToId[m1] = 1 mentToId[m2] = 1 if line[2] == "1": # Accumulate data to create sparse matrix and then run connected components to retrieve gt clusters rows += [m1] cols += [m2] data += [1] rows += [m2] cols += [m1] data += [1] elif line[2] == "0": pass else: print(line[2]) raise Exception("Invalid end token") mentToId = {ment:ctr for ctr,ment in enumerate(mentToId)} # Assign unique id to each point # Find out ground-truth cluster after running connected components rows = [mentToId[ment] for ment in rows] cols = [mentToId[ment] for ment in cols] numPoints = len(mentToId) sparseMatrix = csr_matrix((data, (rows, cols)), shape=(numPoints, numPoints)) connComp = connected_components(sparseMatrix) for pid in range(numPoints): pidToCluster[pid] = connComp[1][pid] Path("{}/{}".format(outDir, canopyId)).mkdir(parents=True, exist_ok=True) with open("{}/{}/gtClusters.tsv".format(outDir, canopyId), "w") as f: for pid in pidToCluster: f.write("{}\t{}\n".format(pid, pidToCluster[pid])) with open("{}/{}/pairFeatures.csv".format(outDir, canopyId), "w") as f: writer = csv.writer(f) for m1,m2 in pairFeatures: line = [ mentToId[m1], mentToId[m2] ] + pairFeatures[(m1,m2)] if pidToCluster[mentToId[m1]] == pidToCluster[mentToId[m2]]: line.append(1) else: line.append(0) writer.writerow(line) if __name__ == "__main__": # dataDir = "../data/data/rexa/canopy" dataDir = "../data/data_rexa_all/nick-rexa/rexa/canopy" outDir = "../data/rexa_new" processRexa(dataDir=dataDir, outDir=outDir)
0.23231
0.363845
import logging from cliff.lister import Lister from os_capacity.data import metrics from os_capacity import utils class FlavorList(Lister): """List all the flavors.""" log = logging.getLogger(__name__) def take_action(self, parsed_args): flavors = utils.get_flavors(self.app) return (('UUID', 'Name', 'VCPUs', 'RAM MB', 'DISK GB', 'Extra Specs'), flavors) class ListResourcesAll(Lister): """List all resource providers, with their resources and servers.""" def take_action(self, parsed_args): inventories = utils.get_providers_with_resources_and_servers(self.app) return (('Provider Name', 'Resources', 'Severs'), inventories) class ListResourcesGroups(Lister): """Lists counts of resource providers with similar inventories.""" def take_action(self, parsed_args): groups = utils.group_providers_by_type_with_capacity(self.app) groups = list(groups) # convert iterator metrics_to_send = [] for group in groups: flavors = group[4].replace(", ", "-") if not flavors: # skip empty hosts continue resources = group[0] total = group[1] used = group[2] free = group[3] metrics_to_send.append(metrics.Metric( name="resources.total", value=total, value_meta={"flavor_resources": resources}, dimensions={"flavor": flavors})) metrics_to_send.append(metrics.Metric( name="resources.used", value=used, value_meta={"flavor_resources": resources}, dimensions={"flavor": flavors})) metrics_to_send.append(metrics.Metric( name="resources.free", value=free, value_meta={"flavor_resources": resources}, dimensions={"flavor": flavors})) metrics.send_metrics(self.app.monitoring_client, metrics_to_send) return ( ('Resource Class Groups', 'Total', 'Used', 'Free', 'Flavors'), groups) class ListUsagesAll(Lister): """List all current resource usages.""" def take_action(self, parsed_args): allocations = utils.get_allocations_with_server_info(self.app, get_names=True) return ( ('Provider Name', 'Server UUID', 'Resources', 'Flavor', 'Days', 'Project', 'User'), allocations) class ListUsagesGroup(Lister): """Group usage by specified key (by user or project). NOTE: The usage days is not complete as it only takes into account any currently active servers. Any previously deleted servers are not counted. """ def get_parser(self, prog_name): parser = super(ListUsagesGroup, self).get_parser(prog_name) parser.add_argument('group_by', nargs='?', default='user', help='Group by user_id or project_id or all', choices=['user', 'project', 'all']) return parser def take_action(self, parsed_args): usages = utils.group_usage(self.app, parsed_args.group_by) sort_key_title = parsed_args.group_by.title() return ((sort_key_title, 'Current Usage', 'Usage Days'), usages)
os_capacity/commands/commands.py
import logging from cliff.lister import Lister from os_capacity.data import metrics from os_capacity import utils class FlavorList(Lister): """List all the flavors.""" log = logging.getLogger(__name__) def take_action(self, parsed_args): flavors = utils.get_flavors(self.app) return (('UUID', 'Name', 'VCPUs', 'RAM MB', 'DISK GB', 'Extra Specs'), flavors) class ListResourcesAll(Lister): """List all resource providers, with their resources and servers.""" def take_action(self, parsed_args): inventories = utils.get_providers_with_resources_and_servers(self.app) return (('Provider Name', 'Resources', 'Severs'), inventories) class ListResourcesGroups(Lister): """Lists counts of resource providers with similar inventories.""" def take_action(self, parsed_args): groups = utils.group_providers_by_type_with_capacity(self.app) groups = list(groups) # convert iterator metrics_to_send = [] for group in groups: flavors = group[4].replace(", ", "-") if not flavors: # skip empty hosts continue resources = group[0] total = group[1] used = group[2] free = group[3] metrics_to_send.append(metrics.Metric( name="resources.total", value=total, value_meta={"flavor_resources": resources}, dimensions={"flavor": flavors})) metrics_to_send.append(metrics.Metric( name="resources.used", value=used, value_meta={"flavor_resources": resources}, dimensions={"flavor": flavors})) metrics_to_send.append(metrics.Metric( name="resources.free", value=free, value_meta={"flavor_resources": resources}, dimensions={"flavor": flavors})) metrics.send_metrics(self.app.monitoring_client, metrics_to_send) return ( ('Resource Class Groups', 'Total', 'Used', 'Free', 'Flavors'), groups) class ListUsagesAll(Lister): """List all current resource usages.""" def take_action(self, parsed_args): allocations = utils.get_allocations_with_server_info(self.app, get_names=True) return ( ('Provider Name', 'Server UUID', 'Resources', 'Flavor', 'Days', 'Project', 'User'), allocations) class ListUsagesGroup(Lister): """Group usage by specified key (by user or project). NOTE: The usage days is not complete as it only takes into account any currently active servers. Any previously deleted servers are not counted. """ def get_parser(self, prog_name): parser = super(ListUsagesGroup, self).get_parser(prog_name) parser.add_argument('group_by', nargs='?', default='user', help='Group by user_id or project_id or all', choices=['user', 'project', 'all']) return parser def take_action(self, parsed_args): usages = utils.group_usage(self.app, parsed_args.group_by) sort_key_title = parsed_args.group_by.title() return ((sort_key_title, 'Current Usage', 'Usage Days'), usages)
0.661376
0.203965
import matplotlib.pyplot as plt import numpy as np import seaborn as sns from wordcloud import WordCloud from ..utils import flatten def radar(df, idx): # We are going to plot the first line of the data frame. # But we need to repeat the first value to close the circular graph: values = df.loc[idx].values.flatten().tolist() values += values[:1] values # What will be the angle of each axis in the plot? (we divide the plot / number of variable) angles = [n / float(df.shape[1]) * 2 * np.pi for n in range(df.shape[1])] angles += angles[:1] fig = plt.figure(figsize=(6, 6)) # Initialise the spider plot ax = plt.subplot(111, polar=True) # Draw one axe per variable + add labels labels yet plt.xticks(angles[:-1], df.columns, color='k', size=15) plt.yticks([0, .25, .5, .75, 1], ["0%", '25%', "50%", '75%', "100%"], color="grey", size=15) plt.ylim([0, 1]) # Plot data ax.plot(angles, values, linewidth=1, linestyle='solid') # Fill area ax.fill(angles, values, 'b', alpha=0.1) def lollipop_h(df, idx): values = df.loc[idx].values.flatten().tolist() fig = plt.figure(dpi=100) colors = [i for i in sns.color_palette('deep')] plt.hlines(y=df.columns, xmin=0, xmax=values, colors=colors, linewidth=4) for i, x, c in zip(range(len(values)), values, colors): plt.plot(x, i, 'o', color=c, markersize=10) plt.xlim([0, 1]) sns.despine(left=False, bottom=True) return plt def lollipop_v(df, idx): values = df.loc[idx].values.flatten().tolist() fig = plt.figure(dpi=100) colors = [i for i in sns.color_palette('deep')] plt.vlines(x=df.columns, ymin=0, ymax=values, colors=colors, linewidth=4) for i, x, c in zip(range(len(values)), values, colors): plt.plot(i, x, 'o', color=c, markersize=10) plt.ylim([0, 1]) plt.xticks(rotation=45) sns.despine(left=True, bottom=False) return plt def lollipop(df, idx, orientation='vertical'): if orientation.lower() == 'vertical': return lollipop_v(df, idx) elif orientation.lower() == 'horizontal': return lollipop_h(df, idx) else: print('Orientation {} not understood'.format(orientation)) def wordcloud(df, idx): x, y = np.ogrid[:300, :300] mask = (x - 150)**2 + (y - 150)**2 > 130**2 mask = 255 * mask.astype(int) values = df.loc[idx].values.flatten().tolist() text = [] for c, v in zip(df.columns, values): text.append([c.replace(' ', '_')] * int(10 * v)) text = flatten(text) text = ' '.join(text) wc = WordCloud( mask=mask, background_color='white', colormap='Paired', max_font_size=100, min_font_size=1, contour_width=1, contour_color='gray').generate(text) plt.figure() plt.imshow(wc, interpolation="bilinear") plt.axis("off") plt.margins(x=0, y=0) return plt
saa/plot/snapshot.py
import matplotlib.pyplot as plt import numpy as np import seaborn as sns from wordcloud import WordCloud from ..utils import flatten def radar(df, idx): # We are going to plot the first line of the data frame. # But we need to repeat the first value to close the circular graph: values = df.loc[idx].values.flatten().tolist() values += values[:1] values # What will be the angle of each axis in the plot? (we divide the plot / number of variable) angles = [n / float(df.shape[1]) * 2 * np.pi for n in range(df.shape[1])] angles += angles[:1] fig = plt.figure(figsize=(6, 6)) # Initialise the spider plot ax = plt.subplot(111, polar=True) # Draw one axe per variable + add labels labels yet plt.xticks(angles[:-1], df.columns, color='k', size=15) plt.yticks([0, .25, .5, .75, 1], ["0%", '25%', "50%", '75%', "100%"], color="grey", size=15) plt.ylim([0, 1]) # Plot data ax.plot(angles, values, linewidth=1, linestyle='solid') # Fill area ax.fill(angles, values, 'b', alpha=0.1) def lollipop_h(df, idx): values = df.loc[idx].values.flatten().tolist() fig = plt.figure(dpi=100) colors = [i for i in sns.color_palette('deep')] plt.hlines(y=df.columns, xmin=0, xmax=values, colors=colors, linewidth=4) for i, x, c in zip(range(len(values)), values, colors): plt.plot(x, i, 'o', color=c, markersize=10) plt.xlim([0, 1]) sns.despine(left=False, bottom=True) return plt def lollipop_v(df, idx): values = df.loc[idx].values.flatten().tolist() fig = plt.figure(dpi=100) colors = [i for i in sns.color_palette('deep')] plt.vlines(x=df.columns, ymin=0, ymax=values, colors=colors, linewidth=4) for i, x, c in zip(range(len(values)), values, colors): plt.plot(i, x, 'o', color=c, markersize=10) plt.ylim([0, 1]) plt.xticks(rotation=45) sns.despine(left=True, bottom=False) return plt def lollipop(df, idx, orientation='vertical'): if orientation.lower() == 'vertical': return lollipop_v(df, idx) elif orientation.lower() == 'horizontal': return lollipop_h(df, idx) else: print('Orientation {} not understood'.format(orientation)) def wordcloud(df, idx): x, y = np.ogrid[:300, :300] mask = (x - 150)**2 + (y - 150)**2 > 130**2 mask = 255 * mask.astype(int) values = df.loc[idx].values.flatten().tolist() text = [] for c, v in zip(df.columns, values): text.append([c.replace(' ', '_')] * int(10 * v)) text = flatten(text) text = ' '.join(text) wc = WordCloud( mask=mask, background_color='white', colormap='Paired', max_font_size=100, min_font_size=1, contour_width=1, contour_color='gray').generate(text) plt.figure() plt.imshow(wc, interpolation="bilinear") plt.axis("off") plt.margins(x=0, y=0) return plt
0.768038
0.788746
from constants import * from disc import Disc class Board: """ Board class that implements Othello game logic. """ def __init__(self): """ Initializes the board by setting all of the initial values. """ self.discs = [[Disc(EMPTY) for x in range(SIZE)] for y in range(SIZE)] self.discs[3][3].owner = WHITE self.discs[4][4].owner = WHITE self.discs[3][4].owner = BLACK self.discs[4][3].owner = BLACK self.scores = {WHITE: 2, BLACK: 2} self.depth = 0 self.current_player = WHITE # _flips is a dict that caches move calculations. # get_discs_flipped and get_num_discs_flipped should be used instead of direct access. self._flips = {} # move_history holds each move as a 2D array [ [depth, player, pos], ...] # where pos is a tuple of (row, col) self.move_history = [] def reset(self): """ Resets the board by calling __init__ again. :return: None """ self.__init__() def _update_scores(self): """ Updates self.scores for each player using self.move_history to calculate the change in points. Should only be called by make_move :return: dict self.scores {WHITE: score, BLACK: score} after updating """ change = len(self._flips[str(self.move_history[-1])]) player = self.move_history[-1][1] self.scores[player] += change + 1 self.scores[player * -1] -= change return self.scores def get_discs_flipped(self, player, pos): """ Given the player and the move position, returns an array of discs that would be flipped at this position. Answers are cached in dict self._flips under key str([self.depth, player, pos]), where depth is the depth AFTER the move :param player: The player color from constants. :param pos: The position to play from. :return: array of possible moves [(row, col), ...] """ discs_flipped = [] temp_flip = [] if str([self.depth + 1, player, pos]) in self._flips: # If we've already calculated the flips, return them. return self._flips[str([self.depth + 1, player, pos])] else: # We need to calculate them from scratch. for d in DIRECTIONS: # print("DIRECTION: ", d) # print("POS: ", pos) x = pos[0] y = pos[1] while 0 <= x < len(self.discs) and 0 <= y < len(self.discs[x]): x += d[0] y += d[1] # print('[{}][{}]'.format(x, y)) try: if self.discs[x][y].owner is self.current_player * -1 and x is not -1 and y is not -1: # If the disc belongs to the other player and we didn't go backwards. # print("Found a flip!") # discs_flipped += 1 temp_flip.append([x, y]) else: # print("End of this direction!") if self.discs[x][y].owner is self.current_player: # print("End is a matching disc.") discs_flipped = discs_flipped + temp_flip temp_flip = [] break except IndexError: # We reached the edge of the board. break # Caches list of discs flipped in a dict with str([self.depth, player, pos]) as key self._flips[str([self.depth + 1, player, pos])] = discs_flipped return discs_flipped def get_num_discs_flipped(self, player, pos): """ Given the player and the move position, returns how many discs would be flipped at this position. :param player: The player color from constants. :param pos: The position to play from. :return: int number of discs that would be flipped by a player at pos. """ return len(self.get_discs_flipped(player, pos)) def get_valid_moves(self, player): """ Given the current player, returns an array of valid moves. :param player: A player as specified in constants. :return: an array of valid moves of the form [(row, col), ...] """ valid_moves = [] for r in range(SIZE): for c in range(SIZE): pos = (r, c) if self.is_legal_move(pos, player): valid_moves.append(pos) return valid_moves def is_legal_move(self, pos, player=None): """ Given the current player and position, returns whether or not the current move is legal. :param player: The player color from constants. :param pos: The position to play from. :return: boolean of whether or not the current player can move here. """ if not player: player = self.current_player return self.discs[pos[0]][pos[1]].owner is EMPTY and self.get_num_discs_flipped(player, pos) > 0 def make_move(self, pos, player=None): """ Given a player and position (row, col), attempts to move and returns True. If it is not a valid move, returns False. :param player: The player color from constants. :param pos: The position to play from as tuple (row, col) :return: Boolean of whether or not the move succeeded """ if not player: player = self.current_player if not pos: raise Exception("pos not specified") if self.is_legal_move(pos, player): self.depth += 1 self.discs[pos[0]][pos[1]].owner = player self.move_history.append([self.depth, player, pos]) for d in self.get_discs_flipped(player, pos): x = d[0] y = d[1] self.discs[x][y].flip() # Success! self.current_player = self.current_player * -1 if len(self.get_valid_moves(self.current_player)) is 0: self.current_player = self.current_player * -1 self._update_scores() return True else: # Illegal move was attempted return False def terminal_test(self): return len(self.get_valid_moves(self.current_player)) == 0 def board_string(self): """ :return: A string showing B and W discs on the board. """ board = "" for row in self.discs: board += "{}\n".format(row) return board def turn_string(self): """ :return: A string stating whose turn it is. """ if self.current_player == WHITE: p_string = "WHITE" else: p_string = "BLACK" return "It is {}'s turn".format(p_string) def score_string(self): """ :return: A string comparing the current scores. """ return "WHITE: {} | BLACK: {}".format(self.scores[WHITE], self.scores[BLACK]) def __str__(self): """ :return: Full string representation of the board including current player and score. """ out_str = "Board with depth {}:\n".format(self.depth) out_str += self.board_string() out_str += self.score_string() out_str += self.turn_string() return out_str
board.py
from constants import * from disc import Disc class Board: """ Board class that implements Othello game logic. """ def __init__(self): """ Initializes the board by setting all of the initial values. """ self.discs = [[Disc(EMPTY) for x in range(SIZE)] for y in range(SIZE)] self.discs[3][3].owner = WHITE self.discs[4][4].owner = WHITE self.discs[3][4].owner = BLACK self.discs[4][3].owner = BLACK self.scores = {WHITE: 2, BLACK: 2} self.depth = 0 self.current_player = WHITE # _flips is a dict that caches move calculations. # get_discs_flipped and get_num_discs_flipped should be used instead of direct access. self._flips = {} # move_history holds each move as a 2D array [ [depth, player, pos], ...] # where pos is a tuple of (row, col) self.move_history = [] def reset(self): """ Resets the board by calling __init__ again. :return: None """ self.__init__() def _update_scores(self): """ Updates self.scores for each player using self.move_history to calculate the change in points. Should only be called by make_move :return: dict self.scores {WHITE: score, BLACK: score} after updating """ change = len(self._flips[str(self.move_history[-1])]) player = self.move_history[-1][1] self.scores[player] += change + 1 self.scores[player * -1] -= change return self.scores def get_discs_flipped(self, player, pos): """ Given the player and the move position, returns an array of discs that would be flipped at this position. Answers are cached in dict self._flips under key str([self.depth, player, pos]), where depth is the depth AFTER the move :param player: The player color from constants. :param pos: The position to play from. :return: array of possible moves [(row, col), ...] """ discs_flipped = [] temp_flip = [] if str([self.depth + 1, player, pos]) in self._flips: # If we've already calculated the flips, return them. return self._flips[str([self.depth + 1, player, pos])] else: # We need to calculate them from scratch. for d in DIRECTIONS: # print("DIRECTION: ", d) # print("POS: ", pos) x = pos[0] y = pos[1] while 0 <= x < len(self.discs) and 0 <= y < len(self.discs[x]): x += d[0] y += d[1] # print('[{}][{}]'.format(x, y)) try: if self.discs[x][y].owner is self.current_player * -1 and x is not -1 and y is not -1: # If the disc belongs to the other player and we didn't go backwards. # print("Found a flip!") # discs_flipped += 1 temp_flip.append([x, y]) else: # print("End of this direction!") if self.discs[x][y].owner is self.current_player: # print("End is a matching disc.") discs_flipped = discs_flipped + temp_flip temp_flip = [] break except IndexError: # We reached the edge of the board. break # Caches list of discs flipped in a dict with str([self.depth, player, pos]) as key self._flips[str([self.depth + 1, player, pos])] = discs_flipped return discs_flipped def get_num_discs_flipped(self, player, pos): """ Given the player and the move position, returns how many discs would be flipped at this position. :param player: The player color from constants. :param pos: The position to play from. :return: int number of discs that would be flipped by a player at pos. """ return len(self.get_discs_flipped(player, pos)) def get_valid_moves(self, player): """ Given the current player, returns an array of valid moves. :param player: A player as specified in constants. :return: an array of valid moves of the form [(row, col), ...] """ valid_moves = [] for r in range(SIZE): for c in range(SIZE): pos = (r, c) if self.is_legal_move(pos, player): valid_moves.append(pos) return valid_moves def is_legal_move(self, pos, player=None): """ Given the current player and position, returns whether or not the current move is legal. :param player: The player color from constants. :param pos: The position to play from. :return: boolean of whether or not the current player can move here. """ if not player: player = self.current_player return self.discs[pos[0]][pos[1]].owner is EMPTY and self.get_num_discs_flipped(player, pos) > 0 def make_move(self, pos, player=None): """ Given a player and position (row, col), attempts to move and returns True. If it is not a valid move, returns False. :param player: The player color from constants. :param pos: The position to play from as tuple (row, col) :return: Boolean of whether or not the move succeeded """ if not player: player = self.current_player if not pos: raise Exception("pos not specified") if self.is_legal_move(pos, player): self.depth += 1 self.discs[pos[0]][pos[1]].owner = player self.move_history.append([self.depth, player, pos]) for d in self.get_discs_flipped(player, pos): x = d[0] y = d[1] self.discs[x][y].flip() # Success! self.current_player = self.current_player * -1 if len(self.get_valid_moves(self.current_player)) is 0: self.current_player = self.current_player * -1 self._update_scores() return True else: # Illegal move was attempted return False def terminal_test(self): return len(self.get_valid_moves(self.current_player)) == 0 def board_string(self): """ :return: A string showing B and W discs on the board. """ board = "" for row in self.discs: board += "{}\n".format(row) return board def turn_string(self): """ :return: A string stating whose turn it is. """ if self.current_player == WHITE: p_string = "WHITE" else: p_string = "BLACK" return "It is {}'s turn".format(p_string) def score_string(self): """ :return: A string comparing the current scores. """ return "WHITE: {} | BLACK: {}".format(self.scores[WHITE], self.scores[BLACK]) def __str__(self): """ :return: Full string representation of the board including current player and score. """ out_str = "Board with depth {}:\n".format(self.depth) out_str += self.board_string() out_str += self.score_string() out_str += self.turn_string() return out_str
0.663233
0.399694
import csv import matplotlib.pyplot as plt import numpy as np from .Data import Data, CalibData from ..Solver import reinitialize_solver, Solver from ..util.utilities import get_config REGULARIZATION_PARAM = get_config()["regularization_coeff"] THRESHOLD_PERCENTAGE = 0.9 class DataPlotter: """ Plotter for data. Attributes: solver: Solver object for inverse problems. """ def __init__(self, solver: Solver = None): if solver is None: self.solver = reinitialize_solver(REGULARIZATION_PARAM) else: self.solver = solver def get_COP(self, data: Data): """ Gets the center of position of the given data Args: data: Data class defined in {@class Data.py} Returns: x and y coordinates of the center and mean value bigger than Percentage threshold of the whole mesh """ if data.delta_V is None: raise Exception("WRONG!") capacitance = self.solver.solve(data.delta_V) x_sum = 0.0 y_sum = 0.0 v_sum = 0.0 count = 0.0 threshold = np.max(capacitance) * THRESHOLD_PERCENTAGE + np.min(capacitance) * (1 - THRESHOLD_PERCENTAGE) for i, c in enumerate(capacitance): if c > threshold: idx = self.solver.mesh.detection_index[i] x_sum += (self.solver.mesh.elem_param[idx][7]) y_sum += (self.solver.mesh.elem_param[idx][8]) v_sum += c count += 1 x_mean = x_sum / count y_mean = y_sum / count v_mean = v_sum / count return x_mean, y_mean, v_mean def draw_COP(self, dataset, calibration, ax, title="COP"): """ Draw a COP map given the dataset. Args: dataset: list of Data object calibration: CalibData Object for calibration ax: matplotlib.pyplot.axes object title: title of the graph Returns: values: mean value after threshold for every situation inside dataset list. """ x_re = [] y_re = [] x_gt = [] y_gt = [] values = [] for data in dataset: data.calc_delta_V(calibration) x, y, maxVal = self.get_COP(data) x_re.append(x * 2000) y_re.append(y * 2000) x_gt.append(data.x) y_gt.append(data.y) values.append(maxVal) ax.plot([x * 2000, data.x], [y * 2000, data.y], color=(0.1, 0.2, 0.5, 0.2)) ax.set_xlim(-100, 100) ax.set_ylim(-100, 100) ax.scatter(x_re, y_re, label="reconstruction") ax.scatter(x_gt, y_gt, label="Ground Truth") ax.set_title(title) # ax.legend() ax.set_aspect('equal') ax.grid(True) return np.array(values) def draw_one_situation(self, data: Data, calibration: CalibData, ax: plt.axes, vmax=None, vmin=None, title_prefix=""): """ Draw one situation with object on the graph. Args: data: Data object to be visualized calibration: CalibData object used for calibration ax: matplotlib.pyplot.axes object vmax: max value limit of the graph vmin: min value limit of the graph title_prefix: prefix for graph title Returns: im: matplotlib.image object """ obj = data obj.calc_delta_V(calibration) print(obj.x, obj.y, obj.z) capacitance = self.solver.solve(obj.delta_V) e_x, e_y, _ = self.get_COP(obj) im = self.solver.plot_map_in_detection_range(ax, capacitance, vmax=vmax, vmin=vmin) circle = plt.Circle((obj.x, obj.y), 13.5, color='#F55400', fill=False) circle_e = plt.Circle((e_x * 2000, e_y * 2000), 13.5, color='k', fill=False) ax.set_xlim(-100, 100) ax.set_ylim(-100, 100) ax.add_artist(circle) ax.add_artist(circle_e) ax.set_title(title_prefix + "(" + ",".join((str(obj.x), str(obj.y), str(obj.z))) + ")") ax.set_aspect('equal') return im class DataHandler(DataPlotter): """ Data handler for handling recorded data on different positions. For the accurate folder structure, please check README.md or Dataset initializer. Attributes: folder_path: path to the data folder """ def __init__(self, folder_path: str, solver: Solver = None): """ Initialize the data handler Args: folder_path: folder path to the data files """ super().__init__(solver) self.folder_path = folder_path def set_folder_path(self, folder_path: str): self.folder_path = folder_path def read_from_csv(self, filename): data = [] with open(self.folder_path + "/" + filename) as f: csvFile = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC) for line in csvFile: data.append(line) return np.array(data) def read_one_calibration_file(self, filename: str, contain_excitation=False) -> CalibData: """ Get one Calibration data object from file Args: filename: filename of the data contain_excitation: if the calibration contains excitation electrode data Returns: CalibData: calibration data """ data = self.read_from_csv(filename) height_idx = get_height_idx_from_filename(filename) return CalibData(data, height_idx, contain_excitation=contain_excitation) def read_one_data_file(self, filename: str, contain_excitation=False) -> Data: """ Get one reading data object from file Args: filename: filename of the data contain_excitation: if the data contains excitation electrode data Returns: Data: data object read """ x, y, z = get_corr_from_filename(filename) data = self.read_from_csv(filename) return Data(data, x, y, z, contain_excitation=contain_excitation) def get_corr_from_filename(filename: str): filename_raw = filename.split('.')[0] filename_arr = filename_raw.split('_') return int(filename_arr[3]), int(filename_arr[4]), int(filename_arr[5]) def get_height_idx_from_filename(filename: str): filename_raw = filename.split('.')[0] return int(filename_raw.split('_')[-1])
CEIT/datahandler/DataHandler.py
import csv import matplotlib.pyplot as plt import numpy as np from .Data import Data, CalibData from ..Solver import reinitialize_solver, Solver from ..util.utilities import get_config REGULARIZATION_PARAM = get_config()["regularization_coeff"] THRESHOLD_PERCENTAGE = 0.9 class DataPlotter: """ Plotter for data. Attributes: solver: Solver object for inverse problems. """ def __init__(self, solver: Solver = None): if solver is None: self.solver = reinitialize_solver(REGULARIZATION_PARAM) else: self.solver = solver def get_COP(self, data: Data): """ Gets the center of position of the given data Args: data: Data class defined in {@class Data.py} Returns: x and y coordinates of the center and mean value bigger than Percentage threshold of the whole mesh """ if data.delta_V is None: raise Exception("WRONG!") capacitance = self.solver.solve(data.delta_V) x_sum = 0.0 y_sum = 0.0 v_sum = 0.0 count = 0.0 threshold = np.max(capacitance) * THRESHOLD_PERCENTAGE + np.min(capacitance) * (1 - THRESHOLD_PERCENTAGE) for i, c in enumerate(capacitance): if c > threshold: idx = self.solver.mesh.detection_index[i] x_sum += (self.solver.mesh.elem_param[idx][7]) y_sum += (self.solver.mesh.elem_param[idx][8]) v_sum += c count += 1 x_mean = x_sum / count y_mean = y_sum / count v_mean = v_sum / count return x_mean, y_mean, v_mean def draw_COP(self, dataset, calibration, ax, title="COP"): """ Draw a COP map given the dataset. Args: dataset: list of Data object calibration: CalibData Object for calibration ax: matplotlib.pyplot.axes object title: title of the graph Returns: values: mean value after threshold for every situation inside dataset list. """ x_re = [] y_re = [] x_gt = [] y_gt = [] values = [] for data in dataset: data.calc_delta_V(calibration) x, y, maxVal = self.get_COP(data) x_re.append(x * 2000) y_re.append(y * 2000) x_gt.append(data.x) y_gt.append(data.y) values.append(maxVal) ax.plot([x * 2000, data.x], [y * 2000, data.y], color=(0.1, 0.2, 0.5, 0.2)) ax.set_xlim(-100, 100) ax.set_ylim(-100, 100) ax.scatter(x_re, y_re, label="reconstruction") ax.scatter(x_gt, y_gt, label="Ground Truth") ax.set_title(title) # ax.legend() ax.set_aspect('equal') ax.grid(True) return np.array(values) def draw_one_situation(self, data: Data, calibration: CalibData, ax: plt.axes, vmax=None, vmin=None, title_prefix=""): """ Draw one situation with object on the graph. Args: data: Data object to be visualized calibration: CalibData object used for calibration ax: matplotlib.pyplot.axes object vmax: max value limit of the graph vmin: min value limit of the graph title_prefix: prefix for graph title Returns: im: matplotlib.image object """ obj = data obj.calc_delta_V(calibration) print(obj.x, obj.y, obj.z) capacitance = self.solver.solve(obj.delta_V) e_x, e_y, _ = self.get_COP(obj) im = self.solver.plot_map_in_detection_range(ax, capacitance, vmax=vmax, vmin=vmin) circle = plt.Circle((obj.x, obj.y), 13.5, color='#F55400', fill=False) circle_e = plt.Circle((e_x * 2000, e_y * 2000), 13.5, color='k', fill=False) ax.set_xlim(-100, 100) ax.set_ylim(-100, 100) ax.add_artist(circle) ax.add_artist(circle_e) ax.set_title(title_prefix + "(" + ",".join((str(obj.x), str(obj.y), str(obj.z))) + ")") ax.set_aspect('equal') return im class DataHandler(DataPlotter): """ Data handler for handling recorded data on different positions. For the accurate folder structure, please check README.md or Dataset initializer. Attributes: folder_path: path to the data folder """ def __init__(self, folder_path: str, solver: Solver = None): """ Initialize the data handler Args: folder_path: folder path to the data files """ super().__init__(solver) self.folder_path = folder_path def set_folder_path(self, folder_path: str): self.folder_path = folder_path def read_from_csv(self, filename): data = [] with open(self.folder_path + "/" + filename) as f: csvFile = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC) for line in csvFile: data.append(line) return np.array(data) def read_one_calibration_file(self, filename: str, contain_excitation=False) -> CalibData: """ Get one Calibration data object from file Args: filename: filename of the data contain_excitation: if the calibration contains excitation electrode data Returns: CalibData: calibration data """ data = self.read_from_csv(filename) height_idx = get_height_idx_from_filename(filename) return CalibData(data, height_idx, contain_excitation=contain_excitation) def read_one_data_file(self, filename: str, contain_excitation=False) -> Data: """ Get one reading data object from file Args: filename: filename of the data contain_excitation: if the data contains excitation electrode data Returns: Data: data object read """ x, y, z = get_corr_from_filename(filename) data = self.read_from_csv(filename) return Data(data, x, y, z, contain_excitation=contain_excitation) def get_corr_from_filename(filename: str): filename_raw = filename.split('.')[0] filename_arr = filename_raw.split('_') return int(filename_arr[3]), int(filename_arr[4]), int(filename_arr[5]) def get_height_idx_from_filename(filename: str): filename_raw = filename.split('.')[0] return int(filename_raw.split('_')[-1])
0.789721
0.519765
from tensorflow import keras from sklearned.tasks.skatertasks import cached_surrogate_data from pprint import pprint import numpy as np # loss displayed ~ 0.0022 SLUGGISH_MOVING_AVERAGE_BEST = {'test_error': 0.02999787349626533, 'train_error': 0.022068321680294486, 'val_error': 0.025155224595028527} def build_sluggish_moving_average_champion_model(n_inputs): model = keras.Sequential() kernel_initializer_0 = keras.initializers.RandomUniform(minval=0.01, maxval=0.02, seed=None) bias_initializer_0 = keras.initializers.RandomUniform(minval=0.01, maxval=0.21, seed=None) model.add(keras.layers.Dense(80, activation="linear",input_shape=(1, n_inputs), kernel_initializer=kernel_initializer_0, bias_initializer=bias_initializer_0)) model.add(keras.layers.Dense(16, activation='softsign')) model.add(keras.layers.Dense(2, activation="tanh")) # selu model.add(keras.layers.Dense(1, activation="linear")) optimizer = keras.optimizers.Adagrad(learning_rate=0.005) model.compile(loss='mse',optimizer=optimizer) return model def build_sluggish_moving_average_challenger_model_overfitting(n_inputs): model = keras.Sequential() kernel_initializer_0 = keras.initializers.RandomUniform(minval=0.1, maxval=0.11, seed=None) bias_initializer_0 = keras.initializers.RandomUniform(minval=-0.01, maxval=0, seed=None) model.add(keras.layers.Dense(16, activation="linear",input_shape=(1, n_inputs), kernel_initializer=kernel_initializer_0, bias_initializer=bias_initializer_0)) model.add(keras.layers.Dense(8, activation='softsign')) model.add(keras.layers.Dense(2, activation="exponential")) model.add(keras.layers.Dense(1, activation="linear")) optimizer = keras.optimizers.RMSprop(learning_rate=0.001) model.compile(loss='mse',optimizer=optimizer) return model def ktrain(d:dict, epochs=200): callback = keras.callbacks.EarlyStopping(monitor='loss', patience=25) model = build_sluggish_moving_average_challenger_model_overfitting(n_inputs=d['n_input']) model.fit(x=d['x_train'], y=d['y_train'], epochs=epochs, verbose=1, callbacks=[callback]) #model.fit(x=d['x_train'], y=d['y_train'], epochs=epochs, verbose=1) y_test_hat = model.predict(d['x_test']) test_error = float(keras.metrics.mean_squared_error(y_test_hat[:,0,0], d['y_test'][:,0])) y_val_hat = model.predict(d['x_val']) val_error = float(keras.metrics.mean_squared_error(y_val_hat[:,0,0], d['y_val'][:,0])) y_train_hat = model.predict(d['x_train']) train_error = float(keras.metrics.mean_squared_error(y_train_hat[:,0,0], d['y_train'][:,0])) summary = {"train_error": train_error/d['y_train_typical'], "val_error": val_error/d['y_val_typical'], "test_error": test_error/d['y_test_typical']} return summary def compare_to_previous(): d = cached_surrogate_data(fname='sluggish_moving_average', k=1, n_real=50, n_samples=150, n_warm = 100, n_input=80) summary = ktrain(d=d, epochs=1000) ratio = summary['test_error'] / SLUGGISH_MOVING_AVERAGE_BEST['test_error'] summary['test_error_ratio'] = ratio return summary if __name__=='__main__': pprint(compare_to_previous())
experiments_on_skaters_old_style/keras_sluggish_moving_average.py
from tensorflow import keras from sklearned.tasks.skatertasks import cached_surrogate_data from pprint import pprint import numpy as np # loss displayed ~ 0.0022 SLUGGISH_MOVING_AVERAGE_BEST = {'test_error': 0.02999787349626533, 'train_error': 0.022068321680294486, 'val_error': 0.025155224595028527} def build_sluggish_moving_average_champion_model(n_inputs): model = keras.Sequential() kernel_initializer_0 = keras.initializers.RandomUniform(minval=0.01, maxval=0.02, seed=None) bias_initializer_0 = keras.initializers.RandomUniform(minval=0.01, maxval=0.21, seed=None) model.add(keras.layers.Dense(80, activation="linear",input_shape=(1, n_inputs), kernel_initializer=kernel_initializer_0, bias_initializer=bias_initializer_0)) model.add(keras.layers.Dense(16, activation='softsign')) model.add(keras.layers.Dense(2, activation="tanh")) # selu model.add(keras.layers.Dense(1, activation="linear")) optimizer = keras.optimizers.Adagrad(learning_rate=0.005) model.compile(loss='mse',optimizer=optimizer) return model def build_sluggish_moving_average_challenger_model_overfitting(n_inputs): model = keras.Sequential() kernel_initializer_0 = keras.initializers.RandomUniform(minval=0.1, maxval=0.11, seed=None) bias_initializer_0 = keras.initializers.RandomUniform(minval=-0.01, maxval=0, seed=None) model.add(keras.layers.Dense(16, activation="linear",input_shape=(1, n_inputs), kernel_initializer=kernel_initializer_0, bias_initializer=bias_initializer_0)) model.add(keras.layers.Dense(8, activation='softsign')) model.add(keras.layers.Dense(2, activation="exponential")) model.add(keras.layers.Dense(1, activation="linear")) optimizer = keras.optimizers.RMSprop(learning_rate=0.001) model.compile(loss='mse',optimizer=optimizer) return model def ktrain(d:dict, epochs=200): callback = keras.callbacks.EarlyStopping(monitor='loss', patience=25) model = build_sluggish_moving_average_challenger_model_overfitting(n_inputs=d['n_input']) model.fit(x=d['x_train'], y=d['y_train'], epochs=epochs, verbose=1, callbacks=[callback]) #model.fit(x=d['x_train'], y=d['y_train'], epochs=epochs, verbose=1) y_test_hat = model.predict(d['x_test']) test_error = float(keras.metrics.mean_squared_error(y_test_hat[:,0,0], d['y_test'][:,0])) y_val_hat = model.predict(d['x_val']) val_error = float(keras.metrics.mean_squared_error(y_val_hat[:,0,0], d['y_val'][:,0])) y_train_hat = model.predict(d['x_train']) train_error = float(keras.metrics.mean_squared_error(y_train_hat[:,0,0], d['y_train'][:,0])) summary = {"train_error": train_error/d['y_train_typical'], "val_error": val_error/d['y_val_typical'], "test_error": test_error/d['y_test_typical']} return summary def compare_to_previous(): d = cached_surrogate_data(fname='sluggish_moving_average', k=1, n_real=50, n_samples=150, n_warm = 100, n_input=80) summary = ktrain(d=d, epochs=1000) ratio = summary['test_error'] / SLUGGISH_MOVING_AVERAGE_BEST['test_error'] summary['test_error_ratio'] = ratio return summary if __name__=='__main__': pprint(compare_to_previous())
0.740174
0.41647
def value(registers, x): try: return int(x) except ValueError: return registers[x] def cpy(registers, src, dst): registers[dst] = value(registers, src) def inc(registers, reg): registers[reg] += 1 def dec(registers, reg): registers[reg] -= 1 def jnz(registers, val, tar): registers['pc'] += 0 if value(registers, val) == 0 else value(registers, tar) - 1 def tgl(registers, ins): mappings = {inc: dec, dec: inc, tgl: inc, jnz: cpy, cpy: jnz} target = registers['pc'] + value(registers, ins) registers['in'][target] = (mappings[registers['in'][target][0]], registers['in'][target][1]) def try_multiply(registers): try: pc = registers['pc'] chunk = registers['in'][pc:pc+6] instr, args = map(list, zip(*chunk)) if instr == [cpy, inc, dec, jnz, dec, jnz] \ and args[0][1] == args[2][0] == args[3][0] \ and args[4][0] == args[5][0] \ and args[1][0] != args[0][1] != args[4][0] != args[0][0] \ and args[3][1] == '-2' and args[5][1] == '-5': registers[args[1][0]] += value(registers, args[0][0]) * value(registers, args[4][0]) registers[args[2][0]] = 0 registers[args[4][0]] = 0 registers['pc'] += 5 return True except: pass return False def step(registers): if not try_multiply(registers): try: registers['in'][registers['pc']][0](registers, *registers['in'][registers['pc']][1]) except: pass def run(values): registers = values registers['pc'] = 0 registers['in'] = instr.copy() while registers['pc'] < len(instr): step(registers) registers['pc'] += 1 return registers['a'] with open('data/23.txt', 'r') as file: instr = map(str.split, map(str.strip, file.readlines())) instr = [(globals()[i[0]], i[1:]) for i in instr] print("Stage 1: %s" % run({'a': 7, 'b': 0, 'c': 0, 'd': 0})) print("Stage 2: %s" % run({'a': 12, 'b': 0, 'c': 0, 'd': 0}))
23.py
def value(registers, x): try: return int(x) except ValueError: return registers[x] def cpy(registers, src, dst): registers[dst] = value(registers, src) def inc(registers, reg): registers[reg] += 1 def dec(registers, reg): registers[reg] -= 1 def jnz(registers, val, tar): registers['pc'] += 0 if value(registers, val) == 0 else value(registers, tar) - 1 def tgl(registers, ins): mappings = {inc: dec, dec: inc, tgl: inc, jnz: cpy, cpy: jnz} target = registers['pc'] + value(registers, ins) registers['in'][target] = (mappings[registers['in'][target][0]], registers['in'][target][1]) def try_multiply(registers): try: pc = registers['pc'] chunk = registers['in'][pc:pc+6] instr, args = map(list, zip(*chunk)) if instr == [cpy, inc, dec, jnz, dec, jnz] \ and args[0][1] == args[2][0] == args[3][0] \ and args[4][0] == args[5][0] \ and args[1][0] != args[0][1] != args[4][0] != args[0][0] \ and args[3][1] == '-2' and args[5][1] == '-5': registers[args[1][0]] += value(registers, args[0][0]) * value(registers, args[4][0]) registers[args[2][0]] = 0 registers[args[4][0]] = 0 registers['pc'] += 5 return True except: pass return False def step(registers): if not try_multiply(registers): try: registers['in'][registers['pc']][0](registers, *registers['in'][registers['pc']][1]) except: pass def run(values): registers = values registers['pc'] = 0 registers['in'] = instr.copy() while registers['pc'] < len(instr): step(registers) registers['pc'] += 1 return registers['a'] with open('data/23.txt', 'r') as file: instr = map(str.split, map(str.strip, file.readlines())) instr = [(globals()[i[0]], i[1:]) for i in instr] print("Stage 1: %s" % run({'a': 7, 'b': 0, 'c': 0, 'd': 0})) print("Stage 2: %s" % run({'a': 12, 'b': 0, 'c': 0, 'd': 0}))
0.340047
0.619097
import pytest from pinakes.main.catalog.services.sanitize_parameters import ( SanitizeParameters, ) from pinakes.main.catalog.tests.factories import ( OrderItemFactory, PortfolioItemFactory, ServicePlanFactory, ) @pytest.mark.django_db def test_sanitize_parameters_from_service_plan_base(): fields = [ { "name": "Totally not a pass", "type": "password", "label": "Totally not a pass", "component": "text-field", "helperText": "", "isRequired": True, "initialValue": "", }, { "name": "most_important_var1", "label": "secret field 1", "component": "textarea-field", "helperText": "Has no effect on anything, ever.", "initialValue": "", }, { "name": "token idea", "label": "field 1", "component": "textarea-field", "helperText": "Don't look.", "initialValue": "", }, { "name": "name", "label": "field 1", "component": "textarea-field", "helperText": "That's not my name.", "initialValue": "{{product.artifacts.testk}}", "isSubstitution": True, }, ] base = {"schema": {"fields": fields}} service_parameters = { "name": "Joe", "Totally not a pass": "s3crete", "token idea": "my secret", } portfolio_item = PortfolioItemFactory() service_plan = ServicePlanFactory( portfolio_item=portfolio_item, base_schema=base ) order_item = OrderItemFactory( portfolio_item=portfolio_item, service_parameters=service_parameters, inventory_service_plan_ref=str(service_plan.id), ) svc = SanitizeParameters(order_item).process() assert svc.sanitized_parameters == { "name": "Joe", "Totally not a pass": "$protected$", "token idea": "$protected$", }
pinakes/main/catalog/tests/services/test_sanitize_parameters.py
import pytest from pinakes.main.catalog.services.sanitize_parameters import ( SanitizeParameters, ) from pinakes.main.catalog.tests.factories import ( OrderItemFactory, PortfolioItemFactory, ServicePlanFactory, ) @pytest.mark.django_db def test_sanitize_parameters_from_service_plan_base(): fields = [ { "name": "Totally not a pass", "type": "password", "label": "Totally not a pass", "component": "text-field", "helperText": "", "isRequired": True, "initialValue": "", }, { "name": "most_important_var1", "label": "secret field 1", "component": "textarea-field", "helperText": "Has no effect on anything, ever.", "initialValue": "", }, { "name": "token idea", "label": "field 1", "component": "textarea-field", "helperText": "Don't look.", "initialValue": "", }, { "name": "name", "label": "field 1", "component": "textarea-field", "helperText": "That's not my name.", "initialValue": "{{product.artifacts.testk}}", "isSubstitution": True, }, ] base = {"schema": {"fields": fields}} service_parameters = { "name": "Joe", "Totally not a pass": "s3crete", "token idea": "my secret", } portfolio_item = PortfolioItemFactory() service_plan = ServicePlanFactory( portfolio_item=portfolio_item, base_schema=base ) order_item = OrderItemFactory( portfolio_item=portfolio_item, service_parameters=service_parameters, inventory_service_plan_ref=str(service_plan.id), ) svc = SanitizeParameters(order_item).process() assert svc.sanitized_parameters == { "name": "Joe", "Totally not a pass": "$protected$", "token idea": "$protected$", }
0.469277
0.326943
import os,sys from time import process_time import argparse parser = argparse.ArgumentParser(description='This script works for rearrangement and indel analysis in fastq files for CRISPR-caused mutations in Rice. \ Default average length for fastq files is 150bp.\ Default length for sliding window in indel analysis is 25bp. \ Specify accordingly if changes are needed.') parser.add_argument('input_dir', type=str, help='Input dir name that contains fq files') parser.add_argument('-c', action='store_true',help='perform high coverage over fastq sequences for indel search in stage 2 analaysis') parser.add_argument('--output_dir', type=str, help='Output dir name for counts in csv files') parser.add_argument('--avg', default=150,type=int, help='enter the average length of fastq sequences for analysis (default is 150bp)') parser.add_argument('--l', default=25,type=int, help='enter the length of fragments for analysis (default is 25bp)') parser.add_argument('--m', default=1,type=int, help='which stage of analysis do you want to start (option to jump to stage 2 analaysis)') args = parser.parse_args() #print(args.input_dir) #print(args.l) #print(args.avg) #print(args.m) path_to_raw_data = os.getcwd()+'/'+args.input_dir+'/' if args.m == 1: print('start stage 1 analysis') #os.system('mkdir fqfiles') os.system('python -m analyze1.Section1 '+ path_to_raw_data) start_stage2 = input('start stage 2 indel analysis?[y/n]:') if start_stage2 == 'y': print('start indel analysis') if args.c: os.system('python -m analyze2.autoscan2 '+path_to_raw_data+' '+str(args.l)+' '+str(args.avg)) os.system('python -m analyze2.analysisindel2 '+os.getcwd()+' '+str(args.l)) else: os.system('python -m analyze2.autoscan '+path_to_raw_data+' '+str(args.l)+' '+str(args.avg)) os.system('python -m analyze2.analysisindel '+os.getcwd()+' '+str(args.l)) else: print('finished rearrangement analysis') elif args.m == 2: print('jump to stage 2 analysis') if args.c: os.system('python -m analyze2.autoscan2 '+path_to_raw_data+' '+str(args.l)+' '+str(args.avg)) os.system('python -m analyze2.analysisindel2 '+os.getcwd()+' '+str(args.l)) else: os.system('python -m analyze2.autoscan '+path_to_raw_data+' '+str(args.l)+' '+str(args.avg)) os.system('python -m analyze2.analysisindel '+os.getcwd()+' '+str(args.l)) process_to_diversity_analysis = input('start diversity analysis?[y/n]:') if process_to_diversity_analysis == 'y': print('checking needed files...') os.system('python -m analyze2.pre_diversity_analysis_processing '+os.getcwd()) #add Peter's diversity analysis codes else: print('finish all analysis process')
master.py
import os,sys from time import process_time import argparse parser = argparse.ArgumentParser(description='This script works for rearrangement and indel analysis in fastq files for CRISPR-caused mutations in Rice. \ Default average length for fastq files is 150bp.\ Default length for sliding window in indel analysis is 25bp. \ Specify accordingly if changes are needed.') parser.add_argument('input_dir', type=str, help='Input dir name that contains fq files') parser.add_argument('-c', action='store_true',help='perform high coverage over fastq sequences for indel search in stage 2 analaysis') parser.add_argument('--output_dir', type=str, help='Output dir name for counts in csv files') parser.add_argument('--avg', default=150,type=int, help='enter the average length of fastq sequences for analysis (default is 150bp)') parser.add_argument('--l', default=25,type=int, help='enter the length of fragments for analysis (default is 25bp)') parser.add_argument('--m', default=1,type=int, help='which stage of analysis do you want to start (option to jump to stage 2 analaysis)') args = parser.parse_args() #print(args.input_dir) #print(args.l) #print(args.avg) #print(args.m) path_to_raw_data = os.getcwd()+'/'+args.input_dir+'/' if args.m == 1: print('start stage 1 analysis') #os.system('mkdir fqfiles') os.system('python -m analyze1.Section1 '+ path_to_raw_data) start_stage2 = input('start stage 2 indel analysis?[y/n]:') if start_stage2 == 'y': print('start indel analysis') if args.c: os.system('python -m analyze2.autoscan2 '+path_to_raw_data+' '+str(args.l)+' '+str(args.avg)) os.system('python -m analyze2.analysisindel2 '+os.getcwd()+' '+str(args.l)) else: os.system('python -m analyze2.autoscan '+path_to_raw_data+' '+str(args.l)+' '+str(args.avg)) os.system('python -m analyze2.analysisindel '+os.getcwd()+' '+str(args.l)) else: print('finished rearrangement analysis') elif args.m == 2: print('jump to stage 2 analysis') if args.c: os.system('python -m analyze2.autoscan2 '+path_to_raw_data+' '+str(args.l)+' '+str(args.avg)) os.system('python -m analyze2.analysisindel2 '+os.getcwd()+' '+str(args.l)) else: os.system('python -m analyze2.autoscan '+path_to_raw_data+' '+str(args.l)+' '+str(args.avg)) os.system('python -m analyze2.analysisindel '+os.getcwd()+' '+str(args.l)) process_to_diversity_analysis = input('start diversity analysis?[y/n]:') if process_to_diversity_analysis == 'y': print('checking needed files...') os.system('python -m analyze2.pre_diversity_analysis_processing '+os.getcwd()) #add Peter's diversity analysis codes else: print('finish all analysis process')
0.060398
0.085251
import os import shutil import subprocess import tempfile import pytest import pysam from riboviz import h5 from riboviz import hisat2 from riboviz import sam_bam from riboviz import compare_files from riboviz import count_reads from riboviz import workflow_files from riboviz import workflow_r from riboviz.tools import prep_riboviz from riboviz import test @pytest.fixture(scope="module") def prep_riboviz_fixture(skip_workflow_fixture, config_fixture, nextflow_fixture): """ Run :py:mod:`riboviz.tools.prep_riboviz` if ``skip_workflow_fixture`` is not ``True``. :param skip_workflow_fixture: Should workflow not be run? :type skip_workflow_fixture: bool :param config_fixture: Configuration file :type config_fixture: str or unicode :param nextflow_fixture: Should Nextflow be run? :type nextflow_fixture: bool """ if not skip_workflow_fixture: if not nextflow_fixture: exit_code = prep_riboviz.prep_riboviz(config_fixture) else: cmd = ["nextflow", "run", test.NEXTFLOW_WORKFLOW, "-params-file", config_fixture, "-ansi-log", "false"] exit_code = subprocess.call(cmd) assert exit_code == 0, \ "prep_riboviz returned non-zero exit code %d" % exit_code @pytest.fixture(scope="function") def scratch_directory(): """ Create a scratch directory. :return: directory :rtype: str or unicode """ scratch_dir = tempfile.mkdtemp("tmp_scratch") yield scratch_dir shutil.rmtree(scratch_dir) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("index", list(range(1, test.NUM_INDICES))) def test_hisat2_build_index(expected_fixture, index_dir, index_prefix, index): """ Test ``hisat2-build`` index files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param index_dir: Index files directory, from configuration file :type index_dir: str or unicode :param index_prefix: Index file name prefix :type index_prefix: str or unicode :param index: File name index :type index: int """ file_name = hisat2.HT2_FORMAT.format(index_prefix, index) index_dir_name = os.path.basename(os.path.normpath(index_dir)) compare_files.compare_files( os.path.join(expected_fixture, index_dir_name, file_name), os.path.join(index_dir, file_name)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_cutadapt_fq(expected_fixture, tmp_dir, sample): """ Test ``cutadapt`` FASTQ files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.ADAPTER_TRIM_FQ), os.path.join(tmp_dir, sample, workflow_files.ADAPTER_TRIM_FQ)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_umitools_extract_fq(extract_umis, expected_fixture, tmp_dir, sample): """ Test ``umi_tools extract`` FASTQ files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI extraction was not enabled in the configuration that produced the data then this test is skipped. :param extract_umi: Was UMI extraction configured? :type extract_umis: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ if not extract_umis: pytest.skip('Skipped test applicable to UMI extraction') tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.UMI_EXTRACT_FQ), os.path.join(tmp_dir, sample, workflow_files.UMI_EXTRACT_FQ)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [ workflow_files.NON_RRNA_FQ, workflow_files.UNALIGNED_FQ]) def test_hisat_fq(expected_fixture, tmp_dir, sample, file_name): """ Test ``hisat`` FASTQ files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, file_name), os.path.join(tmp_dir, sample, file_name)) def compare_sam_files(expected_directory, directory, scratch_directory, sample, file_name): """ Test SAM files for equality. The SAM files are sorted into temporary SAM files which are then compared. See :py:func:`riboviz.compare_files.compare_files`. :param expected_directory: Expected data directory :type expected_directory: str or unicode :param directory: Data directory :type directory: str or unicode :param scratch_directory: scratch files directory :type scratch_directory: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ dir_name = os.path.basename(os.path.normpath(directory)) expected_file = os.path.join( expected_directory, dir_name, sample, file_name) actual_file = os.path.join(directory, sample, file_name) expected_copy_dir = os.path.join(scratch_directory, "expected") os.mkdir(expected_copy_dir) actual_copy_dir = os.path.join(scratch_directory, "actual") os.mkdir(actual_copy_dir) expected_copy_file = os.path.join(expected_copy_dir, file_name) actual_copy_file = os.path.join(actual_copy_dir, file_name) pysam.sort("-o", expected_copy_file, expected_file) pysam.sort("-o", actual_copy_file, actual_file) compare_files.compare_files(expected_copy_file, actual_copy_file) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [ workflow_files.ORF_MAP_SAM, workflow_files.RRNA_MAP_SAM]) def test_hisat2_sam(expected_fixture, tmp_dir, scratch_directory, sample, file_name): """ Test ``hisat`` SAM files for equality. The SAM files are sorted into temporary SAM files which are then compared. See :py:func:`compare_sam_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param scratch_directory: scratch files directory :type scratch_directory: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ compare_sam_files(expected_fixture, tmp_dir, scratch_directory, sample, file_name) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_trim5p_mismatch_sam(expected_fixture, tmp_dir, scratch_directory, sample): """ Test :py:mod:`riboviz.tools.trim_5p_mismatch` SAM files for equality. The SAM files are sorted into temporary SAM files which are then compared. See :py:func:`compare_files.compare_sam_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param scratch_directory: scratch files directory :type scratch_directory: str or unicode :param sample: sample name :type sample: str or unicode """ compare_sam_files(expected_fixture, tmp_dir, scratch_directory, sample, workflow_files.ORF_MAP_CLEAN_SAM) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_trim5p_mismatch_tsv(expected_fixture, tmp_dir, sample): """ Test :py:mod:`riboviz.tools.trim_5p_mismatch` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.TRIM_5P_MISMATCH_TSV), os.path.join(tmp_dir, sample, workflow_files.TRIM_5P_MISMATCH_TSV)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_samtools_view_sort_index_pre_dedup_bam( dedup_umis, expected_fixture, tmp_dir, sample, nextflow_fixture): """ Test ``samtools view | samtools sort`` BAM and ``samtools index`` BAI files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI deduplication was not enabled in the configuration that produced the data then this test is skipped. If Nextflow tests were requested then this test is skipped. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param nextflow_fixture: Should Nextflow tests be run? :type nextflow_fixture: bool """ if not dedup_umis: pytest.skip('Skipped test applicable to UMI deduplication') if nextflow_fixture: pytest.skip('Skipped test not applicable to Nextflow') tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.PRE_DEDUP_BAM), os.path.join(tmp_dir, sample, workflow_files.PRE_DEDUP_BAM)) bai_file_name = sam_bam.BAI_FORMAT.format(workflow_files.PRE_DEDUP_BAM) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, bai_file_name), os.path.join(tmp_dir, sample, bai_file_name)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_samtools_view_sort_index_orf_map_clean_bam( expected_fixture, tmp_dir, sample, nextflow_fixture): """ Test ``samtools view | samtools sort`` BAM and ``samtools index`` BAI files for equality. See :py:func:`riboviz.compare_files.compare_files`. If Nextflow tests were requested then this test is run. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param nextflow_fixture: Should Nextflow tests be run? :type nextflow_fixture: bool """ if not nextflow_fixture: pytest.skip('Skipped test applicable to Nextflow only') tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.ORF_MAP_CLEAN_BAM), os.path.join(tmp_dir, sample, workflow_files.ORF_MAP_CLEAN_BAM)) bai_file_name = sam_bam.BAI_FORMAT.format(workflow_files.ORF_MAP_CLEAN_BAM) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, bai_file_name), os.path.join(tmp_dir, sample, bai_file_name)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_samtools_index_dedup_bam(dedup_umis, tmp_dir, sample, nextflow_fixture): """ Test ``samtools index`` BAI files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI deduplication was not enabled in the configuration that produced the data then this test is skipped. If Nextflow tests were requested then this test is run. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param nextflow_fixture: Should Nextflow tests be run? :type nextflow_fixture: bool """ if not dedup_umis: pytest.skip('Skipped test applicable to UMI deduplication') if not nextflow_fixture: pytest.skip('Skipped test applicable to Nextflow only') assert os.path.exists(os.path.join(tmp_dir, sample, workflow_files.DEDUP_BAM)) assert os.path.exists(os.path.join( tmp_dir, sample, sam_bam.BAI_FORMAT.format(workflow_files.DEDUP_BAM))) @pytest.mark.usefixtures("prep_riboviz_fixture") def test_samtools_view_sort_index(dedup_umis, expected_fixture, output_dir, sample): """ Test ``samtools view | samtools sort`` BAM and ``samtools index`` BAI files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI deduplication was enabled in the configuration that produced the data then the only the existence of the files are checked as these files can differ between runs depending on which reads are removed by ``umi_tools dedup``. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode """ file_name = sam_bam.BAM_FORMAT.format(sample) bai_file_name = sam_bam.BAI_FORMAT.format(file_name) output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join( expected_fixture, output_dir_name, sample, file_name) expected_bai_file = os.path.join( expected_fixture, output_dir_name, sample, bai_file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') actual_file = os.path.join(output_dir, sample, file_name) actual_bai_file = os.path.join(output_dir, sample, bai_file_name) assert os.path.exists(actual_file) assert os.path.exists(actual_bai_file) if dedup_umis: return compare_files.compare_files(expected_file, actual_file) compare_files.compare_files(expected_bai_file, actual_bai_file) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("stats_file", ["edit_distance.tsv", "per_umi_per_position.tsv", "per_umi.tsv"]) def test_umitools_dedup_stats_tsv( dedup_umis, dedup_stats, expected_fixture, tmp_dir, sample, stats_file): """ Test ``umi_tools dedup --output-stats`` TSV files exist. If UMI deduplication was not enabled in the configuration that produced the data then this test is skipped. If UMI deduplication statistics were not enabled in the configuration that produced the data then this test is skipped. As these files can differ between runs depending on which reads are removed by ``umi_tools dedup``, only the existence of the files is checked. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param dedup_stats: Were UMI deduplication statistics enabled? :type dedup_stats: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param stats_file: statistics file name :type stats_file: str or unicode """ if not dedup_umis: pytest.skip('Skipped test applicable to UMI deduplication') if not dedup_stats: pytest.skip('Skipped test applicable to UMI deduplication statistics') file_name = os.path.join(sample, workflow_files.DEDUP_STATS_FORMAT.format( stats_file)) tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) expected_file = os.path.join(expected_fixture, tmp_dir_name, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') actual_file = os.path.join(tmp_dir, file_name) assert os.path.exists(actual_file) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_umitools_pre_dedup_group_tsv( group_umis, expected_fixture, tmp_dir, sample): """ Test ``umi_tools group`` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI grouping was not enabled in the configuration that produced the data then this test is skipped. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ if not group_umis: pytest.skip('Skipped test applicable to UMI groups') tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.PRE_DEDUP_GROUPS_TSV), os.path.join(tmp_dir, sample, workflow_files.PRE_DEDUP_GROUPS_TSV)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_umitools_post_dedup_group_tsv(group_umis, tmp_dir, sample): """ Test ``umi_tools group`` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. As these files can differ between runs depending on which reads are removed by ``umi_tools dedup``, only the existence of the file is checked. If UMI grouping was not enabled in the configuration that produced the data then this test is skipped. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ if not group_umis: pytest.skip('Skipped test applicable to UMI groups') assert os.path.exists( os.path.join(tmp_dir, sample, workflow_files.POST_DEDUP_GROUPS_TSV)) @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [ workflow_files.MINUS_BEDGRAPH, workflow_files.PLUS_BEDGRAPH]) def test_bedtools_bedgraph(expected_fixture, output_dir, sample, file_name): """ Test ``bedtools genomecov`` bedgraph files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, sample, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, sample, file_name)) @pytest.mark.usefixtures("prep_riboviz_fixture") def test_bam_to_h5_h5(expected_fixture, output_dir, sample): """ Test ``bam_to_h5.R`` H5 files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode """ file_name = h5.H5_FORMAT.format(sample) output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, sample, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, sample, file_name)) @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [workflow_r.THREE_NT_PERIODICITY_TSV, workflow_r.CODON_RIBODENS_TSV, workflow_r.POS_SP_NT_FREQ_TSV, workflow_r.POS_SP_RPF_NORM_READS_TSV, workflow_r.READ_LENGTHS_TSV, workflow_r.THREE_NT_FRAME_BY_GENE_TSV, workflow_r.TPMS_TSV]) def test_generate_stats_figs_tsv(expected_fixture, output_dir, sample, file_name): """ Test ``generate_stats_figs.R`` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, sample, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, sample, file_name)) @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [workflow_r.THREE_NT_PERIODICITY_PDF, workflow_r.CODON_RIBODENS_PDF, workflow_r.FEATURES_PDF, workflow_r.POS_SP_RPF_NORM_READS_PDF, workflow_r.READ_LENGTHS_PDF, workflow_r.START_CODON_RIBOGRID_BAR_PDF, workflow_r.START_CODON_RIBOGRID_PDF, workflow_r.THREE_NT_FRAME_PROP_BY_GENE_PDF]) def test_generate_stats_figs_pdf(expected_fixture, output_dir, sample, file_name): """ Test ``generate_stats_figs.R`` PDF files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, sample, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, sample, file_name)) @pytest.mark.usefixtures("prep_riboviz_fixture") def test_collate_tpms_tsv(expected_fixture, output_dir): """ Test ``collate_tpms.R`` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. Test non-sample-specific output TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, workflow_r.TPMS_COLLATED_TSV) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, workflow_r.TPMS_COLLATED_TSV)) @pytest.mark.usefixtures("prep_riboviz_fixture") def test_read_counts_tsv(expected_fixture, output_dir): """ Test :py:mod:`riboviz.tools.count_reads` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) count_reads.equal_read_counts( os.path.join(expected_fixture, output_dir_name, workflow_files.READ_COUNTS_FILE), os.path.join(output_dir, workflow_files.READ_COUNTS_FILE))
riboviz/test/regression/test_regression.py
import os import shutil import subprocess import tempfile import pytest import pysam from riboviz import h5 from riboviz import hisat2 from riboviz import sam_bam from riboviz import compare_files from riboviz import count_reads from riboviz import workflow_files from riboviz import workflow_r from riboviz.tools import prep_riboviz from riboviz import test @pytest.fixture(scope="module") def prep_riboviz_fixture(skip_workflow_fixture, config_fixture, nextflow_fixture): """ Run :py:mod:`riboviz.tools.prep_riboviz` if ``skip_workflow_fixture`` is not ``True``. :param skip_workflow_fixture: Should workflow not be run? :type skip_workflow_fixture: bool :param config_fixture: Configuration file :type config_fixture: str or unicode :param nextflow_fixture: Should Nextflow be run? :type nextflow_fixture: bool """ if not skip_workflow_fixture: if not nextflow_fixture: exit_code = prep_riboviz.prep_riboviz(config_fixture) else: cmd = ["nextflow", "run", test.NEXTFLOW_WORKFLOW, "-params-file", config_fixture, "-ansi-log", "false"] exit_code = subprocess.call(cmd) assert exit_code == 0, \ "prep_riboviz returned non-zero exit code %d" % exit_code @pytest.fixture(scope="function") def scratch_directory(): """ Create a scratch directory. :return: directory :rtype: str or unicode """ scratch_dir = tempfile.mkdtemp("tmp_scratch") yield scratch_dir shutil.rmtree(scratch_dir) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("index", list(range(1, test.NUM_INDICES))) def test_hisat2_build_index(expected_fixture, index_dir, index_prefix, index): """ Test ``hisat2-build`` index files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param index_dir: Index files directory, from configuration file :type index_dir: str or unicode :param index_prefix: Index file name prefix :type index_prefix: str or unicode :param index: File name index :type index: int """ file_name = hisat2.HT2_FORMAT.format(index_prefix, index) index_dir_name = os.path.basename(os.path.normpath(index_dir)) compare_files.compare_files( os.path.join(expected_fixture, index_dir_name, file_name), os.path.join(index_dir, file_name)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_cutadapt_fq(expected_fixture, tmp_dir, sample): """ Test ``cutadapt`` FASTQ files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.ADAPTER_TRIM_FQ), os.path.join(tmp_dir, sample, workflow_files.ADAPTER_TRIM_FQ)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_umitools_extract_fq(extract_umis, expected_fixture, tmp_dir, sample): """ Test ``umi_tools extract`` FASTQ files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI extraction was not enabled in the configuration that produced the data then this test is skipped. :param extract_umi: Was UMI extraction configured? :type extract_umis: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ if not extract_umis: pytest.skip('Skipped test applicable to UMI extraction') tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.UMI_EXTRACT_FQ), os.path.join(tmp_dir, sample, workflow_files.UMI_EXTRACT_FQ)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [ workflow_files.NON_RRNA_FQ, workflow_files.UNALIGNED_FQ]) def test_hisat_fq(expected_fixture, tmp_dir, sample, file_name): """ Test ``hisat`` FASTQ files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, file_name), os.path.join(tmp_dir, sample, file_name)) def compare_sam_files(expected_directory, directory, scratch_directory, sample, file_name): """ Test SAM files for equality. The SAM files are sorted into temporary SAM files which are then compared. See :py:func:`riboviz.compare_files.compare_files`. :param expected_directory: Expected data directory :type expected_directory: str or unicode :param directory: Data directory :type directory: str or unicode :param scratch_directory: scratch files directory :type scratch_directory: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ dir_name = os.path.basename(os.path.normpath(directory)) expected_file = os.path.join( expected_directory, dir_name, sample, file_name) actual_file = os.path.join(directory, sample, file_name) expected_copy_dir = os.path.join(scratch_directory, "expected") os.mkdir(expected_copy_dir) actual_copy_dir = os.path.join(scratch_directory, "actual") os.mkdir(actual_copy_dir) expected_copy_file = os.path.join(expected_copy_dir, file_name) actual_copy_file = os.path.join(actual_copy_dir, file_name) pysam.sort("-o", expected_copy_file, expected_file) pysam.sort("-o", actual_copy_file, actual_file) compare_files.compare_files(expected_copy_file, actual_copy_file) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [ workflow_files.ORF_MAP_SAM, workflow_files.RRNA_MAP_SAM]) def test_hisat2_sam(expected_fixture, tmp_dir, scratch_directory, sample, file_name): """ Test ``hisat`` SAM files for equality. The SAM files are sorted into temporary SAM files which are then compared. See :py:func:`compare_sam_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param scratch_directory: scratch files directory :type scratch_directory: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ compare_sam_files(expected_fixture, tmp_dir, scratch_directory, sample, file_name) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_trim5p_mismatch_sam(expected_fixture, tmp_dir, scratch_directory, sample): """ Test :py:mod:`riboviz.tools.trim_5p_mismatch` SAM files for equality. The SAM files are sorted into temporary SAM files which are then compared. See :py:func:`compare_files.compare_sam_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param scratch_directory: scratch files directory :type scratch_directory: str or unicode :param sample: sample name :type sample: str or unicode """ compare_sam_files(expected_fixture, tmp_dir, scratch_directory, sample, workflow_files.ORF_MAP_CLEAN_SAM) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_trim5p_mismatch_tsv(expected_fixture, tmp_dir, sample): """ Test :py:mod:`riboviz.tools.trim_5p_mismatch` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.TRIM_5P_MISMATCH_TSV), os.path.join(tmp_dir, sample, workflow_files.TRIM_5P_MISMATCH_TSV)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_samtools_view_sort_index_pre_dedup_bam( dedup_umis, expected_fixture, tmp_dir, sample, nextflow_fixture): """ Test ``samtools view | samtools sort`` BAM and ``samtools index`` BAI files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI deduplication was not enabled in the configuration that produced the data then this test is skipped. If Nextflow tests were requested then this test is skipped. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param nextflow_fixture: Should Nextflow tests be run? :type nextflow_fixture: bool """ if not dedup_umis: pytest.skip('Skipped test applicable to UMI deduplication') if nextflow_fixture: pytest.skip('Skipped test not applicable to Nextflow') tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.PRE_DEDUP_BAM), os.path.join(tmp_dir, sample, workflow_files.PRE_DEDUP_BAM)) bai_file_name = sam_bam.BAI_FORMAT.format(workflow_files.PRE_DEDUP_BAM) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, bai_file_name), os.path.join(tmp_dir, sample, bai_file_name)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_samtools_view_sort_index_orf_map_clean_bam( expected_fixture, tmp_dir, sample, nextflow_fixture): """ Test ``samtools view | samtools sort`` BAM and ``samtools index`` BAI files for equality. See :py:func:`riboviz.compare_files.compare_files`. If Nextflow tests were requested then this test is run. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param nextflow_fixture: Should Nextflow tests be run? :type nextflow_fixture: bool """ if not nextflow_fixture: pytest.skip('Skipped test applicable to Nextflow only') tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.ORF_MAP_CLEAN_BAM), os.path.join(tmp_dir, sample, workflow_files.ORF_MAP_CLEAN_BAM)) bai_file_name = sam_bam.BAI_FORMAT.format(workflow_files.ORF_MAP_CLEAN_BAM) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, bai_file_name), os.path.join(tmp_dir, sample, bai_file_name)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_samtools_index_dedup_bam(dedup_umis, tmp_dir, sample, nextflow_fixture): """ Test ``samtools index`` BAI files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI deduplication was not enabled in the configuration that produced the data then this test is skipped. If Nextflow tests were requested then this test is run. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param nextflow_fixture: Should Nextflow tests be run? :type nextflow_fixture: bool """ if not dedup_umis: pytest.skip('Skipped test applicable to UMI deduplication') if not nextflow_fixture: pytest.skip('Skipped test applicable to Nextflow only') assert os.path.exists(os.path.join(tmp_dir, sample, workflow_files.DEDUP_BAM)) assert os.path.exists(os.path.join( tmp_dir, sample, sam_bam.BAI_FORMAT.format(workflow_files.DEDUP_BAM))) @pytest.mark.usefixtures("prep_riboviz_fixture") def test_samtools_view_sort_index(dedup_umis, expected_fixture, output_dir, sample): """ Test ``samtools view | samtools sort`` BAM and ``samtools index`` BAI files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI deduplication was enabled in the configuration that produced the data then the only the existence of the files are checked as these files can differ between runs depending on which reads are removed by ``umi_tools dedup``. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode """ file_name = sam_bam.BAM_FORMAT.format(sample) bai_file_name = sam_bam.BAI_FORMAT.format(file_name) output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join( expected_fixture, output_dir_name, sample, file_name) expected_bai_file = os.path.join( expected_fixture, output_dir_name, sample, bai_file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') actual_file = os.path.join(output_dir, sample, file_name) actual_bai_file = os.path.join(output_dir, sample, bai_file_name) assert os.path.exists(actual_file) assert os.path.exists(actual_bai_file) if dedup_umis: return compare_files.compare_files(expected_file, actual_file) compare_files.compare_files(expected_bai_file, actual_bai_file) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("stats_file", ["edit_distance.tsv", "per_umi_per_position.tsv", "per_umi.tsv"]) def test_umitools_dedup_stats_tsv( dedup_umis, dedup_stats, expected_fixture, tmp_dir, sample, stats_file): """ Test ``umi_tools dedup --output-stats`` TSV files exist. If UMI deduplication was not enabled in the configuration that produced the data then this test is skipped. If UMI deduplication statistics were not enabled in the configuration that produced the data then this test is skipped. As these files can differ between runs depending on which reads are removed by ``umi_tools dedup``, only the existence of the files is checked. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param dedup_stats: Were UMI deduplication statistics enabled? :type dedup_stats: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode :param stats_file: statistics file name :type stats_file: str or unicode """ if not dedup_umis: pytest.skip('Skipped test applicable to UMI deduplication') if not dedup_stats: pytest.skip('Skipped test applicable to UMI deduplication statistics') file_name = os.path.join(sample, workflow_files.DEDUP_STATS_FORMAT.format( stats_file)) tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) expected_file = os.path.join(expected_fixture, tmp_dir_name, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') actual_file = os.path.join(tmp_dir, file_name) assert os.path.exists(actual_file) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_umitools_pre_dedup_group_tsv( group_umis, expected_fixture, tmp_dir, sample): """ Test ``umi_tools group`` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. If UMI grouping was not enabled in the configuration that produced the data then this test is skipped. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ if not group_umis: pytest.skip('Skipped test applicable to UMI groups') tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir)) compare_files.compare_files( os.path.join(expected_fixture, tmp_dir_name, sample, workflow_files.PRE_DEDUP_GROUPS_TSV), os.path.join(tmp_dir, sample, workflow_files.PRE_DEDUP_GROUPS_TSV)) @pytest.mark.usefixtures("skip_index_tmp_fixture") @pytest.mark.usefixtures("prep_riboviz_fixture") def test_umitools_post_dedup_group_tsv(group_umis, tmp_dir, sample): """ Test ``umi_tools group`` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. As these files can differ between runs depending on which reads are removed by ``umi_tools dedup``, only the existence of the file is checked. If UMI grouping was not enabled in the configuration that produced the data then this test is skipped. :param dedup_umi: Was UMI deduplication configured? :type dedup_umis: bool :param tmp_dir: Temporary directory, from configuration file :type tmp_dir: str or unicode :param sample: sample name :type sample: str or unicode """ if not group_umis: pytest.skip('Skipped test applicable to UMI groups') assert os.path.exists( os.path.join(tmp_dir, sample, workflow_files.POST_DEDUP_GROUPS_TSV)) @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [ workflow_files.MINUS_BEDGRAPH, workflow_files.PLUS_BEDGRAPH]) def test_bedtools_bedgraph(expected_fixture, output_dir, sample, file_name): """ Test ``bedtools genomecov`` bedgraph files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, sample, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, sample, file_name)) @pytest.mark.usefixtures("prep_riboviz_fixture") def test_bam_to_h5_h5(expected_fixture, output_dir, sample): """ Test ``bam_to_h5.R`` H5 files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode """ file_name = h5.H5_FORMAT.format(sample) output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, sample, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, sample, file_name)) @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [workflow_r.THREE_NT_PERIODICITY_TSV, workflow_r.CODON_RIBODENS_TSV, workflow_r.POS_SP_NT_FREQ_TSV, workflow_r.POS_SP_RPF_NORM_READS_TSV, workflow_r.READ_LENGTHS_TSV, workflow_r.THREE_NT_FRAME_BY_GENE_TSV, workflow_r.TPMS_TSV]) def test_generate_stats_figs_tsv(expected_fixture, output_dir, sample, file_name): """ Test ``generate_stats_figs.R`` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, sample, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, sample, file_name)) @pytest.mark.usefixtures("prep_riboviz_fixture") @pytest.mark.parametrize("file_name", [workflow_r.THREE_NT_PERIODICITY_PDF, workflow_r.CODON_RIBODENS_PDF, workflow_r.FEATURES_PDF, workflow_r.POS_SP_RPF_NORM_READS_PDF, workflow_r.READ_LENGTHS_PDF, workflow_r.START_CODON_RIBOGRID_BAR_PDF, workflow_r.START_CODON_RIBOGRID_PDF, workflow_r.THREE_NT_FRAME_PROP_BY_GENE_PDF]) def test_generate_stats_figs_pdf(expected_fixture, output_dir, sample, file_name): """ Test ``generate_stats_figs.R`` PDF files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode :param sample: sample name :type sample: str or unicode :param file_name: file name :type file_name: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, sample, file_name) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, sample, file_name)) @pytest.mark.usefixtures("prep_riboviz_fixture") def test_collate_tpms_tsv(expected_fixture, output_dir): """ Test ``collate_tpms.R`` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. Test non-sample-specific output TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) expected_file = os.path.join(expected_fixture, output_dir_name, workflow_r.TPMS_COLLATED_TSV) if not os.path.exists(expected_file): pytest.skip('Skipped as expected file does not exist') compare_files.compare_files( expected_file, os.path.join(output_dir, workflow_r.TPMS_COLLATED_TSV)) @pytest.mark.usefixtures("prep_riboviz_fixture") def test_read_counts_tsv(expected_fixture, output_dir): """ Test :py:mod:`riboviz.tools.count_reads` TSV files for equality. See :py:func:`riboviz.compare_files.compare_files`. :param expected_fixture: Expected data directory :type expected_fixture: str or unicode :param output_dir: Output directory, from configuration file :type output_dir: str or unicode """ output_dir_name = os.path.basename(os.path.normpath(output_dir)) count_reads.equal_read_counts( os.path.join(expected_fixture, output_dir_name, workflow_files.READ_COUNTS_FILE), os.path.join(output_dir, workflow_files.READ_COUNTS_FILE))
0.548915
0.445891
import roslib import rospy from geometry_msgs.msg import PoseStamped from moveit_msgs.srv import * from moveit_msgs.msg import RobotState from std_msgs.msg import Float64MultiArray from std_msgs.msg import String from std_msgs.msg import Float32 from std_msgs.msg import MultiArrayDimension from sensor_msgs.msg import JointState from visualization_msgs.msg import ( Marker, InteractiveMarker, InteractiveMarkerControl, InteractiveMarkerFeedback) from interactive_markers.interactive_marker_server import InteractiveMarkerServer server = None def makeInteractiveMarker(name, description): global fixed_frame interactive_marker = InteractiveMarker() interactive_marker.header.frame_id = fixed_frame interactive_marker.name = name interactive_marker.description = description return interactive_marker def makeInteractiveMarkerControl(interactive_marker, mode): interactive_marker_control = InteractiveMarkerControl() interactive_marker_control.always_visible = True interactive_marker_control.interaction_mode = mode interactive_marker.controls.append(interactive_marker_control) return interactive_marker_control def setOrientation(w, x, y, z, marker): marker.orientation.w = w marker.orientation.x = x marker.orientation.y = y marker.orientation.z = z def setColor(red, green, blue, alpha, marker): marker.color.r = red marker.color.g = green marker.color.b = blue marker.color.a = alpha def callback(req): print req return GetPositionIKResponse def initial_callback(msg): global joint_states,joint_names, initial_joint_position, group_name group_name = msg.data joint_names = rospy.get_param("/link_group/" + group_name) initial_joint_position = [0] * len(joint_names) def im_size_callback(msg): global interactive_marker interactive_marker.scale = msg.data server.clear() server.insert(interactive_marker, feedback) server.applyChanges() def joint_state_callback(msg): global joint_states joint_states = msg def feedback(feedback): global pub, initial_joint_position, fixed_frame, joint_names, group_name server.setPose(feedback.marker_name, feedback.pose) server.applyChanges() if feedback.event_type == 0: return rospy.wait_for_service('compute_ik') try: service = rospy.ServiceProxy('compute_ik', GetPositionIK) request = GetPositionIKRequest() request.ik_request.group_name = group_name request.ik_request.timeout = rospy.Duration.from_sec(0.0001) # initial robot state robot_state = RobotState() robot_state.joint_state.header.frame_id = fixed_frame robot_state.joint_state.name = joint_names robot_state.joint_state.position = initial_joint_position robot_state.joint_state.velocity = [] request.ik_request.robot_state = robot_state # goal end pose pose_stamped = PoseStamped() pose_stamped.header.frame_id = fixed_frame pose_stamped.pose.position.x = feedback.pose.position.x pose_stamped.pose.position.y = feedback.pose.position.y pose_stamped.pose.position.z = feedback.pose.position.z pose_stamped.pose.orientation.x = feedback.pose.orientation.x pose_stamped.pose.orientation.y = feedback.pose.orientation.y pose_stamped.pose.orientation.z = feedback.pose.orientation.z pose_stamped.pose.orientation.w = feedback.pose.orientation.w request.ik_request.pose_stamped = pose_stamped response = service(request) print response if len(response.solution.joint_state.position) != 0: print "success" msg = Float64MultiArray() for i,joint_name in enumerate(response.solution.joint_state.name): for j, name in enumerate(joint_names): if joint_name == name: initial_joint_position[j] = response.solution.joint_state.position[i] dim = MultiArrayDimension() dim.label = name msg.layout.dim.append(dim) msg.data.append(response.solution.joint_state.position[i]) pub.publish(msg) except rospy.ServiceException, e: print "Service call failed: %s"%e if __name__ == '__main__': rospy.init_node("moveit_interactive_marker") global pub, fixed_frame, interactive_marker fixed_frame = rospy.get_param("/fixed_frame") prefix = rospy.get_param("~prefix") pub = rospy.Publisher('update_' + prefix + '_joint_position',Float64MultiArray) rospy.Subscriber('/' + prefix + '/initial_marker', String, initial_callback) rospy.Subscriber('/' + prefix + '_joint_states', JointState, joint_state_callback) rospy.Subscriber('/im_size/update', Float32, im_size_callback) server = InteractiveMarkerServer("/" + prefix + "/marker") interactive_marker = makeInteractiveMarker(prefix, "") interactive_marker.scale = 0.3 interactive_marker.pose.position.x = 0 interactive_marker.pose.position.y = 0 interactive_marker.pose.position.z = 1.0 control_slide_x = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.MOVE_AXIS) control_slide_y = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.MOVE_AXIS) control_slide_z = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.MOVE_AXIS) control_rotate_x = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.ROTATE_AXIS) control_rotate_y = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.ROTATE_AXIS) control_rotate_z = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.ROTATE_AXIS) control_sphere = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.MOVE_ROTATE_3D) marker = Marker() marker.color.r = 0.2 marker.color.g = 0.3 marker.color.b = 0.7 marker.color.a = 0.5 marker.type = Marker.SPHERE marker.scale.x = 0.2 marker.scale.y = 0.2 marker.scale.z = 0.2 control_sphere.markers.append(marker) setOrientation(1,1,0,0, control_slide_x) setOrientation(1,0,1,0, control_slide_y) setOrientation(1,0,0,1, control_slide_z) setOrientation(1,1,0,0, control_rotate_x) setOrientation(1,0,1,0, control_rotate_y) setOrientation(1,0,0,1, control_rotate_z) setOrientation(1,1,0,0, control_sphere) server.insert(interactive_marker, feedback) server.applyChanges() rospy.spin()
rwt_moveit/nodes/interactive_moveit.py
import roslib import rospy from geometry_msgs.msg import PoseStamped from moveit_msgs.srv import * from moveit_msgs.msg import RobotState from std_msgs.msg import Float64MultiArray from std_msgs.msg import String from std_msgs.msg import Float32 from std_msgs.msg import MultiArrayDimension from sensor_msgs.msg import JointState from visualization_msgs.msg import ( Marker, InteractiveMarker, InteractiveMarkerControl, InteractiveMarkerFeedback) from interactive_markers.interactive_marker_server import InteractiveMarkerServer server = None def makeInteractiveMarker(name, description): global fixed_frame interactive_marker = InteractiveMarker() interactive_marker.header.frame_id = fixed_frame interactive_marker.name = name interactive_marker.description = description return interactive_marker def makeInteractiveMarkerControl(interactive_marker, mode): interactive_marker_control = InteractiveMarkerControl() interactive_marker_control.always_visible = True interactive_marker_control.interaction_mode = mode interactive_marker.controls.append(interactive_marker_control) return interactive_marker_control def setOrientation(w, x, y, z, marker): marker.orientation.w = w marker.orientation.x = x marker.orientation.y = y marker.orientation.z = z def setColor(red, green, blue, alpha, marker): marker.color.r = red marker.color.g = green marker.color.b = blue marker.color.a = alpha def callback(req): print req return GetPositionIKResponse def initial_callback(msg): global joint_states,joint_names, initial_joint_position, group_name group_name = msg.data joint_names = rospy.get_param("/link_group/" + group_name) initial_joint_position = [0] * len(joint_names) def im_size_callback(msg): global interactive_marker interactive_marker.scale = msg.data server.clear() server.insert(interactive_marker, feedback) server.applyChanges() def joint_state_callback(msg): global joint_states joint_states = msg def feedback(feedback): global pub, initial_joint_position, fixed_frame, joint_names, group_name server.setPose(feedback.marker_name, feedback.pose) server.applyChanges() if feedback.event_type == 0: return rospy.wait_for_service('compute_ik') try: service = rospy.ServiceProxy('compute_ik', GetPositionIK) request = GetPositionIKRequest() request.ik_request.group_name = group_name request.ik_request.timeout = rospy.Duration.from_sec(0.0001) # initial robot state robot_state = RobotState() robot_state.joint_state.header.frame_id = fixed_frame robot_state.joint_state.name = joint_names robot_state.joint_state.position = initial_joint_position robot_state.joint_state.velocity = [] request.ik_request.robot_state = robot_state # goal end pose pose_stamped = PoseStamped() pose_stamped.header.frame_id = fixed_frame pose_stamped.pose.position.x = feedback.pose.position.x pose_stamped.pose.position.y = feedback.pose.position.y pose_stamped.pose.position.z = feedback.pose.position.z pose_stamped.pose.orientation.x = feedback.pose.orientation.x pose_stamped.pose.orientation.y = feedback.pose.orientation.y pose_stamped.pose.orientation.z = feedback.pose.orientation.z pose_stamped.pose.orientation.w = feedback.pose.orientation.w request.ik_request.pose_stamped = pose_stamped response = service(request) print response if len(response.solution.joint_state.position) != 0: print "success" msg = Float64MultiArray() for i,joint_name in enumerate(response.solution.joint_state.name): for j, name in enumerate(joint_names): if joint_name == name: initial_joint_position[j] = response.solution.joint_state.position[i] dim = MultiArrayDimension() dim.label = name msg.layout.dim.append(dim) msg.data.append(response.solution.joint_state.position[i]) pub.publish(msg) except rospy.ServiceException, e: print "Service call failed: %s"%e if __name__ == '__main__': rospy.init_node("moveit_interactive_marker") global pub, fixed_frame, interactive_marker fixed_frame = rospy.get_param("/fixed_frame") prefix = rospy.get_param("~prefix") pub = rospy.Publisher('update_' + prefix + '_joint_position',Float64MultiArray) rospy.Subscriber('/' + prefix + '/initial_marker', String, initial_callback) rospy.Subscriber('/' + prefix + '_joint_states', JointState, joint_state_callback) rospy.Subscriber('/im_size/update', Float32, im_size_callback) server = InteractiveMarkerServer("/" + prefix + "/marker") interactive_marker = makeInteractiveMarker(prefix, "") interactive_marker.scale = 0.3 interactive_marker.pose.position.x = 0 interactive_marker.pose.position.y = 0 interactive_marker.pose.position.z = 1.0 control_slide_x = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.MOVE_AXIS) control_slide_y = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.MOVE_AXIS) control_slide_z = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.MOVE_AXIS) control_rotate_x = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.ROTATE_AXIS) control_rotate_y = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.ROTATE_AXIS) control_rotate_z = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.ROTATE_AXIS) control_sphere = makeInteractiveMarkerControl( interactive_marker, InteractiveMarkerControl.MOVE_ROTATE_3D) marker = Marker() marker.color.r = 0.2 marker.color.g = 0.3 marker.color.b = 0.7 marker.color.a = 0.5 marker.type = Marker.SPHERE marker.scale.x = 0.2 marker.scale.y = 0.2 marker.scale.z = 0.2 control_sphere.markers.append(marker) setOrientation(1,1,0,0, control_slide_x) setOrientation(1,0,1,0, control_slide_y) setOrientation(1,0,0,1, control_slide_z) setOrientation(1,1,0,0, control_rotate_x) setOrientation(1,0,1,0, control_rotate_y) setOrientation(1,0,0,1, control_rotate_z) setOrientation(1,1,0,0, control_sphere) server.insert(interactive_marker, feedback) server.applyChanges() rospy.spin()
0.478285
0.125012
import argparse from smd.data.data_generator import DataGenerator from smd.data.dataset_loader import DatasetLoader from smd.models.model_loader import load_model, compile_model from smd.data.data_augmentation import random_loudness_spec, random_filter_spec, block_mixing_spec, pitch_time_deformation_spec from smd.data import preprocessing import smd.utils as utils import numpy as np from keras.callbacks import ModelCheckpoint, CSVLogger, EarlyStopping, ReduceLROnPlateau import keras.models import os def training_data_processing(spec_file, annotation_file, mean, std, spec_file2=None, annotation_file2=None): spec = np.load(spec_file) spec, stretching_rate = pitch_time_deformation_spec(spec) spec = random_filter_spec(spec) spec = random_loudness_spec(spec) label = preprocessing.get_label( annotation_file, spec.shape[1], stretching_rate=stretching_rate) if not(spec_file2 is None): spec2 = np.load(spec_file2) spec2, stretching_rate2 = pitch_time_deformation_spec(spec2) spec2 = random_filter_spec(spec2) spec2 = random_loudness_spec(spec2) label2 = preprocessing.get_label( annotation_file2, spec2.shape[1], stretching_rate=stretching_rate2) spec, label = block_mixing_spec(spec, spec2, label, label2) mels = preprocessing.get_scaled_mel_bands(spec) mels = preprocessing.normalize(mels, mean, std) return mels, label def validation_data_processing(spec_file, annotation_file, mean, std): spec = np.load(spec_file) mels = preprocessing.get_scaled_mel_bands(spec) mels = preprocessing.normalize(mels, mean, std) n_frame = mels.shape[1] label = preprocessing.get_label( annotation_file, n_frame, stretching_rate=1) return mels, label def train(train_set, val_set, cfg, config_name, resume, model_path): if not(model_path is None): if resume: print("Loading compiled model: " + model_path) model = keras.models.load_model(model_path, compile=True) else: print("Loading uncompiled model: " + model_path) model = keras.models.load_model(model_path, compile=False) model = compile_model(model, cfg["model"]) else: print("Loading the network..") model = load_model(cfg["model"]) csv_logger = CSVLogger('checkpoint/' + config_name + '-training.log', append=resume) save_ckpt = ModelCheckpoint("checkpoint/weights.{epoch:02d}-{val_loss:.2f}" + config_name + ".hdf5", monitor='val_loss', verbose=1, save_best_only=True, period=1) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto') lr_schedule = ReduceLROnPlateau( monitor='val_loss', factor=0.1, patience=3, verbose=1, mode='auto', min_lr=10e-7) callback_list = [save_ckpt, early_stopping, lr_schedule, csv_logger] print("Start the training..") model.fit_generator(train_set, epochs=cfg["nb_epoch"], callbacks=callback_list, validation_data=val_set, workers=cfg["workers"], use_multiprocessing=cfg["use_multiprocessing"], shuffle=True ) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Script to train a neural network for speech and music detection.") parser.add_argument('--config', type=str, default="test1", help='the configuration of the training') parser.add_argument('--data_location', type=str, default="/Users/quentin/Computer/DataSet/Music/speech_music_detection/", help='the location of the data') parser.add_argument('--resume', type=bool, default=False, help='set to true to restart a previous starning') parser.add_argument('--model', type=str, default=None, help='path of the model to load when the starting is resumed') args = parser.parse_args() experiments = utils.load_json('experiments.json') cfg = experiments[args.config] if not(os.path.isdir("checkpoint")): os.makedirs("checkpoint") print("Checkpoint folder created.") print("Creating the dataset..") datasets_config = utils.load_json("datasets.json") dataset = DatasetLoader( cfg["dataset"], args.data_location, datasets_config) print("Creating the data generator..") train_set = DataGenerator(dataset.get_train_set(), cfg["batch_size"], cfg["target_seq_length"], training_data_processing, dataset.get_training_mean(), dataset.get_training_std(), set_type="train") val_set = DataGenerator(dataset.get_val_set(), cfg["batch_size"], cfg["target_seq_length"], validation_data_processing, dataset.get_training_mean(), dataset.get_training_std(), set_type="val") train(train_set, val_set, cfg, args.config, args.resume, args.model)
train.py
import argparse from smd.data.data_generator import DataGenerator from smd.data.dataset_loader import DatasetLoader from smd.models.model_loader import load_model, compile_model from smd.data.data_augmentation import random_loudness_spec, random_filter_spec, block_mixing_spec, pitch_time_deformation_spec from smd.data import preprocessing import smd.utils as utils import numpy as np from keras.callbacks import ModelCheckpoint, CSVLogger, EarlyStopping, ReduceLROnPlateau import keras.models import os def training_data_processing(spec_file, annotation_file, mean, std, spec_file2=None, annotation_file2=None): spec = np.load(spec_file) spec, stretching_rate = pitch_time_deformation_spec(spec) spec = random_filter_spec(spec) spec = random_loudness_spec(spec) label = preprocessing.get_label( annotation_file, spec.shape[1], stretching_rate=stretching_rate) if not(spec_file2 is None): spec2 = np.load(spec_file2) spec2, stretching_rate2 = pitch_time_deformation_spec(spec2) spec2 = random_filter_spec(spec2) spec2 = random_loudness_spec(spec2) label2 = preprocessing.get_label( annotation_file2, spec2.shape[1], stretching_rate=stretching_rate2) spec, label = block_mixing_spec(spec, spec2, label, label2) mels = preprocessing.get_scaled_mel_bands(spec) mels = preprocessing.normalize(mels, mean, std) return mels, label def validation_data_processing(spec_file, annotation_file, mean, std): spec = np.load(spec_file) mels = preprocessing.get_scaled_mel_bands(spec) mels = preprocessing.normalize(mels, mean, std) n_frame = mels.shape[1] label = preprocessing.get_label( annotation_file, n_frame, stretching_rate=1) return mels, label def train(train_set, val_set, cfg, config_name, resume, model_path): if not(model_path is None): if resume: print("Loading compiled model: " + model_path) model = keras.models.load_model(model_path, compile=True) else: print("Loading uncompiled model: " + model_path) model = keras.models.load_model(model_path, compile=False) model = compile_model(model, cfg["model"]) else: print("Loading the network..") model = load_model(cfg["model"]) csv_logger = CSVLogger('checkpoint/' + config_name + '-training.log', append=resume) save_ckpt = ModelCheckpoint("checkpoint/weights.{epoch:02d}-{val_loss:.2f}" + config_name + ".hdf5", monitor='val_loss', verbose=1, save_best_only=True, period=1) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto') lr_schedule = ReduceLROnPlateau( monitor='val_loss', factor=0.1, patience=3, verbose=1, mode='auto', min_lr=10e-7) callback_list = [save_ckpt, early_stopping, lr_schedule, csv_logger] print("Start the training..") model.fit_generator(train_set, epochs=cfg["nb_epoch"], callbacks=callback_list, validation_data=val_set, workers=cfg["workers"], use_multiprocessing=cfg["use_multiprocessing"], shuffle=True ) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Script to train a neural network for speech and music detection.") parser.add_argument('--config', type=str, default="test1", help='the configuration of the training') parser.add_argument('--data_location', type=str, default="/Users/quentin/Computer/DataSet/Music/speech_music_detection/", help='the location of the data') parser.add_argument('--resume', type=bool, default=False, help='set to true to restart a previous starning') parser.add_argument('--model', type=str, default=None, help='path of the model to load when the starting is resumed') args = parser.parse_args() experiments = utils.load_json('experiments.json') cfg = experiments[args.config] if not(os.path.isdir("checkpoint")): os.makedirs("checkpoint") print("Checkpoint folder created.") print("Creating the dataset..") datasets_config = utils.load_json("datasets.json") dataset = DatasetLoader( cfg["dataset"], args.data_location, datasets_config) print("Creating the data generator..") train_set = DataGenerator(dataset.get_train_set(), cfg["batch_size"], cfg["target_seq_length"], training_data_processing, dataset.get_training_mean(), dataset.get_training_std(), set_type="train") val_set = DataGenerator(dataset.get_val_set(), cfg["batch_size"], cfg["target_seq_length"], validation_data_processing, dataset.get_training_mean(), dataset.get_training_std(), set_type="val") train(train_set, val_set, cfg, args.config, args.resume, args.model)
0.650134
0.283214
from __future__ import unicode_literals from django.contrib.auth.models import AbstractUser, UserManager from django.db import models from datetime import date # Create your models here. class Coach(models.Model): first_name = models.CharField( max_length=255, ) last_name = models.CharField( max_length=255, ) def as_dict(self): return { "first_name": self.first_name, "last_name": self.last_name } def __str__(self): return ' '.join([ self.first_name, self.last_name, ]) class Sport(models.Model): name = models.CharField( max_length=255, ) img_url = models.ImageField( null=True, upload_to='sports', max_length=1000, ) def as_dict(self): return { "id": self.id, "name": self.name, "img_url": str(self.img_url) } def __str__(self): return ' '.join([ self.name, ]) class Athlete(models.Model): img_url = models.ImageField( null=True, upload_to='athletes', max_length=1000, ) sport = models.ForeignKey(Sport) first_name = models.CharField( max_length=255, ) last_name = models.CharField( max_length=255, ) birth_place = models.CharField( max_length=400, ) birth_date = models.DateField() weight = models.FloatField() height = models.FloatField() coach = models.ForeignKey(Coach) def age(self): born = self.birth_date today = date.today() return today.year - born.year # - ((today.month, today.day) < (born.month, born.day)) def as_dict(self): return { "id": self.id, "sport": self.sport.as_dict(), "first_name": self.first_name, "last_name": self.last_name, "birth_place": self.birth_place, "birth_date": self.birth_place, "weight": self.weight, "height": self.height, "coach": self.coach.as_dict(), "img_url":str(self.img_url) } def __str__(self): return ' '.join([ self.first_name, self.last_name + ' - ', self.sport.name, ]) class SportEvent(models.Model): athletes = models.ManyToManyField(Athlete, blank=True) date = models.DateField() time = models.TimeField() sport_event = models.CharField( max_length=400, ) sport = models.ForeignKey(Sport) result = models.CharField( max_length=255, null=True, blank=True, ) video = models.FileField( null=True, blank=True, upload_to='events', max_length=1000, ) def __str__(self): return ' '.join([ '{:%m/%d/%Y}'.format(self.date) + ' ', '{:%H:%M:%S}'.format(self.time) + ' - ', self.sport.name + ' - ', self.sport_event, ]) def save(self, *args, **kwargs): if not self.result: self.result = None if not self.video: self.video = None super(SportEvent, self).save(*args, **kwargs) class Student (AbstractUser): uid=models.CharField(blank = True, max_length=500) @classmethod def authenticate(cls, token=None): try: student = Student.objects.get(uid=token) return student except Student.DoesNotExist: return None @classmethod def get_or_set_email(cls, student): email = "@facebook.com" try: email = student["email"] except KeyError: email = student["id"]+email finally: student["email"] = email # do something with the book
OlimpiColombiaApp/models.py
from __future__ import unicode_literals from django.contrib.auth.models import AbstractUser, UserManager from django.db import models from datetime import date # Create your models here. class Coach(models.Model): first_name = models.CharField( max_length=255, ) last_name = models.CharField( max_length=255, ) def as_dict(self): return { "first_name": self.first_name, "last_name": self.last_name } def __str__(self): return ' '.join([ self.first_name, self.last_name, ]) class Sport(models.Model): name = models.CharField( max_length=255, ) img_url = models.ImageField( null=True, upload_to='sports', max_length=1000, ) def as_dict(self): return { "id": self.id, "name": self.name, "img_url": str(self.img_url) } def __str__(self): return ' '.join([ self.name, ]) class Athlete(models.Model): img_url = models.ImageField( null=True, upload_to='athletes', max_length=1000, ) sport = models.ForeignKey(Sport) first_name = models.CharField( max_length=255, ) last_name = models.CharField( max_length=255, ) birth_place = models.CharField( max_length=400, ) birth_date = models.DateField() weight = models.FloatField() height = models.FloatField() coach = models.ForeignKey(Coach) def age(self): born = self.birth_date today = date.today() return today.year - born.year # - ((today.month, today.day) < (born.month, born.day)) def as_dict(self): return { "id": self.id, "sport": self.sport.as_dict(), "first_name": self.first_name, "last_name": self.last_name, "birth_place": self.birth_place, "birth_date": self.birth_place, "weight": self.weight, "height": self.height, "coach": self.coach.as_dict(), "img_url":str(self.img_url) } def __str__(self): return ' '.join([ self.first_name, self.last_name + ' - ', self.sport.name, ]) class SportEvent(models.Model): athletes = models.ManyToManyField(Athlete, blank=True) date = models.DateField() time = models.TimeField() sport_event = models.CharField( max_length=400, ) sport = models.ForeignKey(Sport) result = models.CharField( max_length=255, null=True, blank=True, ) video = models.FileField( null=True, blank=True, upload_to='events', max_length=1000, ) def __str__(self): return ' '.join([ '{:%m/%d/%Y}'.format(self.date) + ' ', '{:%H:%M:%S}'.format(self.time) + ' - ', self.sport.name + ' - ', self.sport_event, ]) def save(self, *args, **kwargs): if not self.result: self.result = None if not self.video: self.video = None super(SportEvent, self).save(*args, **kwargs) class Student (AbstractUser): uid=models.CharField(blank = True, max_length=500) @classmethod def authenticate(cls, token=None): try: student = Student.objects.get(uid=token) return student except Student.DoesNotExist: return None @classmethod def get_or_set_email(cls, student): email = "@facebook.com" try: email = student["email"] except KeyError: email = student["id"]+email finally: student["email"] = email # do something with the book
0.444324
0.183301
import os import sys if __name__ == '__main__': sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..')) import unittest from grit import util class UtilUnittest(unittest.TestCase): ''' Tests functions from util ''' def testNewClassInstance(self): # Test short class name with no fully qualified package name # Should fail, it is not supported by the function now (as documented) cls = util.NewClassInstance('grit.util.TestClassToLoad', TestBaseClassToLoad) self.failUnless(cls == None) # Test non existent class name cls = util.NewClassInstance('grit.util_unittest.NotExistingClass', TestBaseClassToLoad) self.failUnless(cls == None) # Test valid class name and valid base class cls = util.NewClassInstance('grit.util_unittest.TestClassToLoad', TestBaseClassToLoad) self.failUnless(isinstance(cls, TestBaseClassToLoad)) # Test valid class name with wrong hierarchy cls = util.NewClassInstance('grit.util_unittest.TestClassNoBase', TestBaseClassToLoad) self.failUnless(cls == None) def testCanonicalLanguage(self): self.failUnless(util.CanonicalLanguage('en') == 'en') self.failUnless(util.CanonicalLanguage('pt_br') == 'pt-BR') self.failUnless(util.CanonicalLanguage('pt-br') == 'pt-BR') self.failUnless(util.CanonicalLanguage('pt-BR') == 'pt-BR') self.failUnless(util.CanonicalLanguage('pt/br') == 'pt-BR') self.failUnless(util.CanonicalLanguage('pt/BR') == 'pt-BR') self.failUnless(util.CanonicalLanguage('no_no_bokmal') == 'no-NO-BOKMAL') def testUnescapeHtml(self): self.failUnless(util.UnescapeHtml('&#1010;') == unichr(1010)) self.failUnless(util.UnescapeHtml('&#xABcd;') == unichr(43981)) def testRelativePath(self): """ Verify that MakeRelativePath works in some tricky cases.""" def TestRelativePathCombinations(base_path, other_path, expected_result): """ Verify that the relative path function works for the given paths regardless of whether or not they end with a trailing slash.""" for path1 in [base_path, base_path + os.path.sep]: for path2 in [other_path, other_path + os.path.sep]: result = util.MakeRelativePath(path1, path2) self.failUnless(result == expected_result) # set-up variables root_dir = 'c:%sa' % os.path.sep result1 = '..%sabc' % os.path.sep path1 = root_dir + 'bc' result2 = 'bc' path2 = '%s%s%s' % (root_dir, os.path.sep, result2) # run the tests TestRelativePathCombinations(root_dir, path1, result1) TestRelativePathCombinations(root_dir, path2, result2) class TestBaseClassToLoad(object): pass class TestClassToLoad(TestBaseClassToLoad): pass class TestClassNoBase(object): pass if __name__ == '__main__': unittest.main()
tools/grit/grit/util_unittest.py
import os import sys if __name__ == '__main__': sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..')) import unittest from grit import util class UtilUnittest(unittest.TestCase): ''' Tests functions from util ''' def testNewClassInstance(self): # Test short class name with no fully qualified package name # Should fail, it is not supported by the function now (as documented) cls = util.NewClassInstance('grit.util.TestClassToLoad', TestBaseClassToLoad) self.failUnless(cls == None) # Test non existent class name cls = util.NewClassInstance('grit.util_unittest.NotExistingClass', TestBaseClassToLoad) self.failUnless(cls == None) # Test valid class name and valid base class cls = util.NewClassInstance('grit.util_unittest.TestClassToLoad', TestBaseClassToLoad) self.failUnless(isinstance(cls, TestBaseClassToLoad)) # Test valid class name with wrong hierarchy cls = util.NewClassInstance('grit.util_unittest.TestClassNoBase', TestBaseClassToLoad) self.failUnless(cls == None) def testCanonicalLanguage(self): self.failUnless(util.CanonicalLanguage('en') == 'en') self.failUnless(util.CanonicalLanguage('pt_br') == 'pt-BR') self.failUnless(util.CanonicalLanguage('pt-br') == 'pt-BR') self.failUnless(util.CanonicalLanguage('pt-BR') == 'pt-BR') self.failUnless(util.CanonicalLanguage('pt/br') == 'pt-BR') self.failUnless(util.CanonicalLanguage('pt/BR') == 'pt-BR') self.failUnless(util.CanonicalLanguage('no_no_bokmal') == 'no-NO-BOKMAL') def testUnescapeHtml(self): self.failUnless(util.UnescapeHtml('&#1010;') == unichr(1010)) self.failUnless(util.UnescapeHtml('&#xABcd;') == unichr(43981)) def testRelativePath(self): """ Verify that MakeRelativePath works in some tricky cases.""" def TestRelativePathCombinations(base_path, other_path, expected_result): """ Verify that the relative path function works for the given paths regardless of whether or not they end with a trailing slash.""" for path1 in [base_path, base_path + os.path.sep]: for path2 in [other_path, other_path + os.path.sep]: result = util.MakeRelativePath(path1, path2) self.failUnless(result == expected_result) # set-up variables root_dir = 'c:%sa' % os.path.sep result1 = '..%sabc' % os.path.sep path1 = root_dir + 'bc' result2 = 'bc' path2 = '%s%s%s' % (root_dir, os.path.sep, result2) # run the tests TestRelativePathCombinations(root_dir, path1, result1) TestRelativePathCombinations(root_dir, path2, result2) class TestBaseClassToLoad(object): pass class TestClassToLoad(TestBaseClassToLoad): pass class TestClassNoBase(object): pass if __name__ == '__main__': unittest.main()
0.363647
0.24638
import aiohttp import asyncio from Crypto.Hash import SHA import json class AuthError(Exception): pass class AuthAPIError(AuthError): def __init__(self, error, message, cause): self.error = error self.message = message if cause is not None: self.cause = cause class AuthMigratedError(AuthError): def __init__(self, error, message, cause): self.error = error self.message = message self.cause = cause class AuthInvalidCredsError(AuthError): def __init__(self, error, message): self.error = error self.message = message class AuthTooManyAttemptsError(AuthError): def __init__(self, error, message): self.error = error self.message = message class AuthInvalidTokenError(AuthError): def __init__(self, error, message): self.error = error self.message = message class AuthAPI(object): def __init__(self, proxy_generator=None): self.__default_agent = {"name": "Minecraft", "version": 1} self.__authurl = "https://authserver.mojang.com" self.__sessionurl = "https://sessionserver.mojang.com" self.__session = None def __enter__(self): return self def __exit__(self, type, value, traceback): if self.__session is not None: asyncio.ensure_future(self.__session.close()) async def __force_session(self): if self.__session is None: self.__session = aiohttp.ClientSession() async def make_auth_request(self, endpoint, payload): return await self.make_api_request(self.__authurl, endpoint, payload) async def make_session_request(self, endpoint, payload): return await self.make_api_request(self.__sessionurl, endpoint, payload) async def make_api_request(self, base, endpoint, payload): await self.__force_session() async with self.__session.post(base + endpoint, headers={"Content-Type": "application/json"}, data=json.dumps(payload)) as response: if response.status >= 400: response_json = await response.json() if "cause" in response_json: ex = (response_json["error"], response_json["errorMessage"], response_json["cause"]) else: ex = (response_json["error"], response_json["errorMessage"]) if response_json["error"] == "ForbiddenOperationException": if "cause" in response_json: if response_json["cause"] == "UserMigratedException": raise AuthMigratedError(*ex) elif response_json["errorMessage"] == "Invalid credentials.": raise AuthTooManyAttemptsError(*ex) elif response_json["errorMessage"] == "Invalide token.": raise AuthInvalidTokenError(*ex) else: raise AuthInvalidCredsError(*ex) raise AuthAPIError(*ex) return response async def authenticate(self, username, password, client_token=None, agent=None, user=True): if agent is None: agent = self.__default_agent request = { "agent": agent, "username": username, "password": password } if client_token is not None: request["clientToken"] = client_token if user is True: request["requestUser"] = True request = await self.make_auth_request("/authenticate", request) profile_data = await request.json() return Profile(self, username=username, **profile_data) async def signout(username, password): request = { "username": username, "password": password } await self.make_auth_request("/signout", request) async def has_joined(self, username, server_hash): await self.__force_session() async with self.__session.get(self.__sessionurl + "/session/minecraft/hasJoined?username={}&serverId={}" .format(username, server_hash)) as response: if response.status >= 400: return None return await response.json() def gen_server_id(self, server_id, secret, public_key): h = SHA.new() h.update(server_id.encode("ASCII")) h.update(secret) h.update(public_key) d = int(h.hexdigest(), 16) if d >> 39 * 4 & 0x8: d = "-{:x}".format((-d) & (2 ** (40 * 4) - 1)) else: d = "{:x}".format(d) return d class Profile(object): def __init__(self, api, **kwargs): self.online = True self.__api = api for kw in kwargs: setattr(self, kw, kwargs[kw]) def __create_token_payload(self): return { "accessToken": self.accessToken, "clientToken": self.clientToken } async def refresh(self, user=False): refresh_payload = self.__create_token_payload() if user is True: refresh_payload["requestUser"] = True response = await self.__api.make_auth_request("/refresh", refresh_payload) response_data = await response.json() for kw in response_data: setattr(self, kw, response_data[kw]) async def validate(self): validate_payload = self.__create_token_payload() await self.__api.make_auth_request("/validate", validate_payload) async def join(self, server_hash): join_payload = { "accessToken": self.accessToken, "selectedProfile": self.selectedProfile['id'], "serverId": server_hash } await self.__api.make_session_request("/session/minecraft/join", join_payload) class OfflineProfile(object): def __init__(self, username): self.username = username
mcpy/auth.py
import aiohttp import asyncio from Crypto.Hash import SHA import json class AuthError(Exception): pass class AuthAPIError(AuthError): def __init__(self, error, message, cause): self.error = error self.message = message if cause is not None: self.cause = cause class AuthMigratedError(AuthError): def __init__(self, error, message, cause): self.error = error self.message = message self.cause = cause class AuthInvalidCredsError(AuthError): def __init__(self, error, message): self.error = error self.message = message class AuthTooManyAttemptsError(AuthError): def __init__(self, error, message): self.error = error self.message = message class AuthInvalidTokenError(AuthError): def __init__(self, error, message): self.error = error self.message = message class AuthAPI(object): def __init__(self, proxy_generator=None): self.__default_agent = {"name": "Minecraft", "version": 1} self.__authurl = "https://authserver.mojang.com" self.__sessionurl = "https://sessionserver.mojang.com" self.__session = None def __enter__(self): return self def __exit__(self, type, value, traceback): if self.__session is not None: asyncio.ensure_future(self.__session.close()) async def __force_session(self): if self.__session is None: self.__session = aiohttp.ClientSession() async def make_auth_request(self, endpoint, payload): return await self.make_api_request(self.__authurl, endpoint, payload) async def make_session_request(self, endpoint, payload): return await self.make_api_request(self.__sessionurl, endpoint, payload) async def make_api_request(self, base, endpoint, payload): await self.__force_session() async with self.__session.post(base + endpoint, headers={"Content-Type": "application/json"}, data=json.dumps(payload)) as response: if response.status >= 400: response_json = await response.json() if "cause" in response_json: ex = (response_json["error"], response_json["errorMessage"], response_json["cause"]) else: ex = (response_json["error"], response_json["errorMessage"]) if response_json["error"] == "ForbiddenOperationException": if "cause" in response_json: if response_json["cause"] == "UserMigratedException": raise AuthMigratedError(*ex) elif response_json["errorMessage"] == "Invalid credentials.": raise AuthTooManyAttemptsError(*ex) elif response_json["errorMessage"] == "Invalide token.": raise AuthInvalidTokenError(*ex) else: raise AuthInvalidCredsError(*ex) raise AuthAPIError(*ex) return response async def authenticate(self, username, password, client_token=None, agent=None, user=True): if agent is None: agent = self.__default_agent request = { "agent": agent, "username": username, "password": password } if client_token is not None: request["clientToken"] = client_token if user is True: request["requestUser"] = True request = await self.make_auth_request("/authenticate", request) profile_data = await request.json() return Profile(self, username=username, **profile_data) async def signout(username, password): request = { "username": username, "password": password } await self.make_auth_request("/signout", request) async def has_joined(self, username, server_hash): await self.__force_session() async with self.__session.get(self.__sessionurl + "/session/minecraft/hasJoined?username={}&serverId={}" .format(username, server_hash)) as response: if response.status >= 400: return None return await response.json() def gen_server_id(self, server_id, secret, public_key): h = SHA.new() h.update(server_id.encode("ASCII")) h.update(secret) h.update(public_key) d = int(h.hexdigest(), 16) if d >> 39 * 4 & 0x8: d = "-{:x}".format((-d) & (2 ** (40 * 4) - 1)) else: d = "{:x}".format(d) return d class Profile(object): def __init__(self, api, **kwargs): self.online = True self.__api = api for kw in kwargs: setattr(self, kw, kwargs[kw]) def __create_token_payload(self): return { "accessToken": self.accessToken, "clientToken": self.clientToken } async def refresh(self, user=False): refresh_payload = self.__create_token_payload() if user is True: refresh_payload["requestUser"] = True response = await self.__api.make_auth_request("/refresh", refresh_payload) response_data = await response.json() for kw in response_data: setattr(self, kw, response_data[kw]) async def validate(self): validate_payload = self.__create_token_payload() await self.__api.make_auth_request("/validate", validate_payload) async def join(self, server_hash): join_payload = { "accessToken": self.accessToken, "selectedProfile": self.selectedProfile['id'], "serverId": server_hash } await self.__api.make_session_request("/session/minecraft/join", join_payload) class OfflineProfile(object): def __init__(self, username): self.username = username
0.415847
0.069352
# -*- coding:UTF-8 -*- # 识别一张手写数字的图片中的数字 # 首先对图片进行处理, 通过`findContours`确定包含单个数字的`轮廓` # 根据`轮廓`选取ROI, 通过之前训练的模型`predict`数字 from sklearn.externals import joblib from skimage import feature import numpy as np import mahotas import cv2 import datasets import mahotas args = {} args["model"] = r"model.pickle" args["image"] = r"o.jpg" model = joblib.load(args["model"]) image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(blurred, 30, 150) ret, edged = cv2.threshold(blurred,127,255,cv2.THRESH_BINARY_INV) ## 腐蚀与膨胀 #kernel = np.uint8(np.zeros((3,3))) #gray = cv2.erode(gray, kernel) #gray = cv2.dilate(gray, kernel) cv2.imwrite("gray.jpg", edged) ## 试图使用轮廓加强一下 (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(edged,cnts,-1,(255,255,255),5) (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(edged,cnts,-1,(255,255,255),5) (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = sorted([(c, cv2.boundingRect(c)[0]) for c in cnts], key = lambda x: x[1]) for cnt, _ in cnts: [x,y,w,h] = cv2.boundingRect(cnt) cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2) cv2.imwrite("x.jpg", edged) for (c, _) in cnts: (x, y, w, h) = cv2.boundingRect(c) if w >= 7 and h >= 20: roi = gray[y:y + h, x:x + w] thresh = roi.copy() T = mahotas.thresholding.otsu(roi) thresh[thresh > T] = 255 thresh = cv2.bitwise_not(thresh) thresh = datasets.deskew(thresh, 20) thresh = datasets.center_extent(thresh, (20, 20)) #cv2.imshow("thresh", roi) #cv2.waitKey(0) hist = feature.hog(thresh, orientations = 18, pixels_per_cell = (10, 10), cells_per_block = (1, 1), transform_sqrt = True) digit = model.predict([hist])[0] print("number is: {}".format(digit)) cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 1) cv2.putText(image, str(digit), (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2) #cv2.imshow("image", image) #cv2.waitKey(0) cv2.imwrite("result.jpg", image) cv2.destroyAllWindows()
HandwritingDigits/classify.py
# -*- coding:UTF-8 -*- # 识别一张手写数字的图片中的数字 # 首先对图片进行处理, 通过`findContours`确定包含单个数字的`轮廓` # 根据`轮廓`选取ROI, 通过之前训练的模型`predict`数字 from sklearn.externals import joblib from skimage import feature import numpy as np import mahotas import cv2 import datasets import mahotas args = {} args["model"] = r"model.pickle" args["image"] = r"o.jpg" model = joblib.load(args["model"]) image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(blurred, 30, 150) ret, edged = cv2.threshold(blurred,127,255,cv2.THRESH_BINARY_INV) ## 腐蚀与膨胀 #kernel = np.uint8(np.zeros((3,3))) #gray = cv2.erode(gray, kernel) #gray = cv2.dilate(gray, kernel) cv2.imwrite("gray.jpg", edged) ## 试图使用轮廓加强一下 (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(edged,cnts,-1,(255,255,255),5) (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(edged,cnts,-1,(255,255,255),5) (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = sorted([(c, cv2.boundingRect(c)[0]) for c in cnts], key = lambda x: x[1]) for cnt, _ in cnts: [x,y,w,h] = cv2.boundingRect(cnt) cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2) cv2.imwrite("x.jpg", edged) for (c, _) in cnts: (x, y, w, h) = cv2.boundingRect(c) if w >= 7 and h >= 20: roi = gray[y:y + h, x:x + w] thresh = roi.copy() T = mahotas.thresholding.otsu(roi) thresh[thresh > T] = 255 thresh = cv2.bitwise_not(thresh) thresh = datasets.deskew(thresh, 20) thresh = datasets.center_extent(thresh, (20, 20)) #cv2.imshow("thresh", roi) #cv2.waitKey(0) hist = feature.hog(thresh, orientations = 18, pixels_per_cell = (10, 10), cells_per_block = (1, 1), transform_sqrt = True) digit = model.predict([hist])[0] print("number is: {}".format(digit)) cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 1) cv2.putText(image, str(digit), (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2) #cv2.imshow("image", image) #cv2.waitKey(0) cv2.imwrite("result.jpg", image) cv2.destroyAllWindows()
0.402744
0.208682
import numpy as np from .fdistance import fl1_distance, fl2_distance from .fdistance import fp_distance_integer, fp_distance_double def l1_distance(A, B): """ Calculates the Manhattan distances, D, between two Numpy arrays of representations. :math:`D_{ij} = \\|A_i - B_j\\|_1` Where :math:`A_{i}` and :math:`B_{j}` are representation vectors. D is calculated using an OpenMP parallel Fortran routine. :param A: 2D array of descriptors - shape (N, representation size). :type A: numpy array :param B: 2D array of descriptors - shape (M, representation size). :type B: numpy array :return: The Manhattan-distance matrix. :rtype: numpy array """ if len(A.shape) != 2 or len(B.shape) != 2: raise ValueError('expected matrices of dimension=2') if B.shape[1] != A.shape[1]: raise ValueError('expected matrices containing vectors of same size') na = A.shape[0] nb = B.shape[0] D = np.empty((na, nb), order='F') fl1_distance(A.T, B.T, D) return D def l2_distance(A, B): """ Calculates the L2 distances, D, between two Numpy arrays of representations. :math:`D_{ij} = \\|A_i - B_j\\|_2` Where :math:`A_{i}` and :math:`B_{j}` are representation vectors. D is calculated using an OpenMP parallel Fortran routine. :param A: 2D array of descriptors - shape (N, representation size). :type A: numpy array :param B: 2D array of descriptors - shape (M, representation size). :type B: numpy array :return: The L2-distance matrix. :rtype: numpy array """ if len(A.shape) != 2 or len(B.shape) != 2: raise ValueError('expected matrices of dimension=2') if B.shape[1] != A.shape[1]: raise ValueError('expected matrices containing vectors of same size') na = A.shape[0] nb = B.shape[0] #D = np.empty((na, nb), order='F') D = fl2_distance(A.T, B.T) #, D) return D def p_distance(A, B, p = 2): """ Calculates the p-norm distances between two Numpy arrays of representations. The value of the keyword argument ``p =`` sets the norm order. E.g. ``p = 1.0`` and ``p = 2.0`` with yield the Manhattan and L2 distances, respectively. .. math:: D_{ij} = \|A_i - B_j\|_p Where :math:`A_{i}` and :math:`B_{j}` are representation vectors. D is calculated using an OpenMP parallel Fortran routine. :param A: 2D array of descriptors - shape (N, representation size). :type A: numpy array :param B: 2D array of descriptors - shape (M, representation size). :type B: numpy array :param p: The norm order :type p: float :return: The distance matrix. :rtype: numpy array """ if len(A.shape) != 2 or len(B.shape) != 2: raise ValueError('expected matrices of dimension=2') if B.shape[1] != A.shape[1]: raise ValueError('expected matrices containing vectors of same size') na = A.shape[0] nb = B.shape[0] D = np.empty((na, nb), order='F') if (type(p) == type(1)): if (p == 2): fl2_distance(A, B, D) else: fp_distance_integer(A.T, B.T, D, p) elif (type(p) == type(1.0)): if p.is_integer(): p = int(p) if (p == 2): fl2_distance(A, B, D) else: fp_distance_integer(A.T, B.T, D, p) else: fp_distance_double(A.T, B.T, D, p) else: raise ValueError('expected exponent of integer or float type') return D
coreml/cml/distance.py
import numpy as np from .fdistance import fl1_distance, fl2_distance from .fdistance import fp_distance_integer, fp_distance_double def l1_distance(A, B): """ Calculates the Manhattan distances, D, between two Numpy arrays of representations. :math:`D_{ij} = \\|A_i - B_j\\|_1` Where :math:`A_{i}` and :math:`B_{j}` are representation vectors. D is calculated using an OpenMP parallel Fortran routine. :param A: 2D array of descriptors - shape (N, representation size). :type A: numpy array :param B: 2D array of descriptors - shape (M, representation size). :type B: numpy array :return: The Manhattan-distance matrix. :rtype: numpy array """ if len(A.shape) != 2 or len(B.shape) != 2: raise ValueError('expected matrices of dimension=2') if B.shape[1] != A.shape[1]: raise ValueError('expected matrices containing vectors of same size') na = A.shape[0] nb = B.shape[0] D = np.empty((na, nb), order='F') fl1_distance(A.T, B.T, D) return D def l2_distance(A, B): """ Calculates the L2 distances, D, between two Numpy arrays of representations. :math:`D_{ij} = \\|A_i - B_j\\|_2` Where :math:`A_{i}` and :math:`B_{j}` are representation vectors. D is calculated using an OpenMP parallel Fortran routine. :param A: 2D array of descriptors - shape (N, representation size). :type A: numpy array :param B: 2D array of descriptors - shape (M, representation size). :type B: numpy array :return: The L2-distance matrix. :rtype: numpy array """ if len(A.shape) != 2 or len(B.shape) != 2: raise ValueError('expected matrices of dimension=2') if B.shape[1] != A.shape[1]: raise ValueError('expected matrices containing vectors of same size') na = A.shape[0] nb = B.shape[0] #D = np.empty((na, nb), order='F') D = fl2_distance(A.T, B.T) #, D) return D def p_distance(A, B, p = 2): """ Calculates the p-norm distances between two Numpy arrays of representations. The value of the keyword argument ``p =`` sets the norm order. E.g. ``p = 1.0`` and ``p = 2.0`` with yield the Manhattan and L2 distances, respectively. .. math:: D_{ij} = \|A_i - B_j\|_p Where :math:`A_{i}` and :math:`B_{j}` are representation vectors. D is calculated using an OpenMP parallel Fortran routine. :param A: 2D array of descriptors - shape (N, representation size). :type A: numpy array :param B: 2D array of descriptors - shape (M, representation size). :type B: numpy array :param p: The norm order :type p: float :return: The distance matrix. :rtype: numpy array """ if len(A.shape) != 2 or len(B.shape) != 2: raise ValueError('expected matrices of dimension=2') if B.shape[1] != A.shape[1]: raise ValueError('expected matrices containing vectors of same size') na = A.shape[0] nb = B.shape[0] D = np.empty((na, nb), order='F') if (type(p) == type(1)): if (p == 2): fl2_distance(A, B, D) else: fp_distance_integer(A.T, B.T, D, p) elif (type(p) == type(1.0)): if p.is_integer(): p = int(p) if (p == 2): fl2_distance(A, B, D) else: fp_distance_integer(A.T, B.T, D, p) else: fp_distance_double(A.T, B.T, D, p) else: raise ValueError('expected exponent of integer or float type') return D
0.885761
0.869493
import getopt import logging import sys from objects.InputParams import InputParams from services.AFWordsClustering import AFWordsClustering from services.ExecutionTime import LoggingTime from services.FileService import ProcessingFilesService def get_input_params(argv): """ Reads cmd provided parameters for ProcessingEntry """ input_file_path = 'console/example_dataset/example.txt' output_file_path = 'console/example_dataset/example_output.txt' compute_silhouette = True affinity_preference_factor = 'auto' try: opts, args = getopt.getopt(argv, "i:o:p:s:a:", ["ifile=", "ofile="]) except getopt.GetoptError as e: logging.error("Invalid params! Try: -i <inputfile> -o <outputfile> -s <True|False " "- compute silhouette coefficient> -a <affinity_preference_factor>") logging.error(e) sys.exit(2) for opt, arg in opts: if opt in ['-i', "--ifile"]: input_file_path = arg elif opt in ['-o', '--ofile']: output_file_path = arg elif opt in ['-s']: compute_silhouette = arg == "True" elif opt in ['-a']: affinity_preference_factor = arg return InputParams(input_file_path, output_file_path, compute_silhouette, affinity_preference_factor) def log_coefficient(coefficient): """ Logs Silhouette coefficient that was computed """ if coefficient: logging.info("Silhouette Coefficient: %0.3f" % coefficient) def run(): """ Runs AF clustering processing using AFWordsClustering and FileService as a cmd app """ logging.info("Started processing with following params" + str(sys.argv[1:])) input_params = get_input_params(sys.argv[1:]) file_service = ProcessingFilesService(input_params, AFWordsClustering.is_word_eligible()) input_data_dic = file_service.get_data() af_clustering = AFWordsClustering(input_data_dic, input_params.affinity_preference) results = af_clustering.process(input_params.compute_silhouette) file_service.save_results(results.processing_dictonary_results) log_coefficient(results.silhouette_coefficient) if __name__ == "__main__": with LoggingTime("Total run time: "): run()
console/af_cluster.py
import getopt import logging import sys from objects.InputParams import InputParams from services.AFWordsClustering import AFWordsClustering from services.ExecutionTime import LoggingTime from services.FileService import ProcessingFilesService def get_input_params(argv): """ Reads cmd provided parameters for ProcessingEntry """ input_file_path = 'console/example_dataset/example.txt' output_file_path = 'console/example_dataset/example_output.txt' compute_silhouette = True affinity_preference_factor = 'auto' try: opts, args = getopt.getopt(argv, "i:o:p:s:a:", ["ifile=", "ofile="]) except getopt.GetoptError as e: logging.error("Invalid params! Try: -i <inputfile> -o <outputfile> -s <True|False " "- compute silhouette coefficient> -a <affinity_preference_factor>") logging.error(e) sys.exit(2) for opt, arg in opts: if opt in ['-i', "--ifile"]: input_file_path = arg elif opt in ['-o', '--ofile']: output_file_path = arg elif opt in ['-s']: compute_silhouette = arg == "True" elif opt in ['-a']: affinity_preference_factor = arg return InputParams(input_file_path, output_file_path, compute_silhouette, affinity_preference_factor) def log_coefficient(coefficient): """ Logs Silhouette coefficient that was computed """ if coefficient: logging.info("Silhouette Coefficient: %0.3f" % coefficient) def run(): """ Runs AF clustering processing using AFWordsClustering and FileService as a cmd app """ logging.info("Started processing with following params" + str(sys.argv[1:])) input_params = get_input_params(sys.argv[1:]) file_service = ProcessingFilesService(input_params, AFWordsClustering.is_word_eligible()) input_data_dic = file_service.get_data() af_clustering = AFWordsClustering(input_data_dic, input_params.affinity_preference) results = af_clustering.process(input_params.compute_silhouette) file_service.save_results(results.processing_dictonary_results) log_coefficient(results.silhouette_coefficient) if __name__ == "__main__": with LoggingTime("Total run time: "): run()
0.296145
0.177223
import importlib import inspect import pkgutil import sys from pathlib import Path from typing import Iterator, Sequence import click from .stubgenc import generate_stub_for_c_module, is_c_module @click.command(context_settings={"help_option_names": ["-h", "--help"]}) @click.argument("output_dir") @click.argument("modules", nargs=-1) @click.option("-r", "--recursive", is_flag=True, help="Generate stubs for submodules as well.") @click.option("--ignore-errors", is_flag=True, help="Ignore errors during stub generation.") def gen_pybind11_stubs( output_dir: str, modules: Sequence[str], recursive: bool, ignore_errors: bool ) -> None: """Generate Python stubs for pybind11 modules MODULES and output them in OUTPUT_DIR.""" output_path = Path(output_dir).resolve() if not output_path.is_dir(): raise SystemExit(f"Cannot find directory: {output_dir}") # NOTE: add output path to PYTHONPATH so we can import the module. sys.path.insert(0, str(output_path)) for module in walk_packages(modules) if recursive else modules: try: target = generate_stub_for_c_module(module, output_path) print(f"Created stub: {target}") except Exception as e: if not ignore_errors: raise e else: print("Stub generation failed for: ", module, file=sys.stderr) def walk_packages(packages: Sequence[str]) -> Iterator[str]: """Iterates through all packages and sub-packages in the given list. Python packages have a __path__ attribute defined, which pkgutil uses to determine the package hierarchy. However, packages in C extensions do not have this attribute, so we have to roll out our own. """ for package_name in packages: package = importlib.import_module(package_name) yield package.__name__ # get the path of the object (needed by pkgutil) path = getattr(package, "__path__", None) if path is None: # object has no path; this means it's either a module inside a package # (and thus no sub-packages), or it could be a C extension package. if is_c_module(package): # This is a C extension module, now get the list of all sub-packages # using the inspect module subpackages = [ package.__name__ + "." + name for name, val in inspect.getmembers(package) if inspect.ismodule(val) ] # recursively iterate through the subpackages for submodule in walk_packages(subpackages): yield submodule # It's a module inside a package. There's nothing else to walk/yield. else: all_packages = pkgutil.walk_packages( path, prefix=package.__name__ + ".", onerror=lambda r: None ) for _, qualified_name, _ in all_packages: yield qualified_name if __name__ == "__main__": gen_pybind11_stubs()
src/pybind11_generics/stubgen/__main__.py
import importlib import inspect import pkgutil import sys from pathlib import Path from typing import Iterator, Sequence import click from .stubgenc import generate_stub_for_c_module, is_c_module @click.command(context_settings={"help_option_names": ["-h", "--help"]}) @click.argument("output_dir") @click.argument("modules", nargs=-1) @click.option("-r", "--recursive", is_flag=True, help="Generate stubs for submodules as well.") @click.option("--ignore-errors", is_flag=True, help="Ignore errors during stub generation.") def gen_pybind11_stubs( output_dir: str, modules: Sequence[str], recursive: bool, ignore_errors: bool ) -> None: """Generate Python stubs for pybind11 modules MODULES and output them in OUTPUT_DIR.""" output_path = Path(output_dir).resolve() if not output_path.is_dir(): raise SystemExit(f"Cannot find directory: {output_dir}") # NOTE: add output path to PYTHONPATH so we can import the module. sys.path.insert(0, str(output_path)) for module in walk_packages(modules) if recursive else modules: try: target = generate_stub_for_c_module(module, output_path) print(f"Created stub: {target}") except Exception as e: if not ignore_errors: raise e else: print("Stub generation failed for: ", module, file=sys.stderr) def walk_packages(packages: Sequence[str]) -> Iterator[str]: """Iterates through all packages and sub-packages in the given list. Python packages have a __path__ attribute defined, which pkgutil uses to determine the package hierarchy. However, packages in C extensions do not have this attribute, so we have to roll out our own. """ for package_name in packages: package = importlib.import_module(package_name) yield package.__name__ # get the path of the object (needed by pkgutil) path = getattr(package, "__path__", None) if path is None: # object has no path; this means it's either a module inside a package # (and thus no sub-packages), or it could be a C extension package. if is_c_module(package): # This is a C extension module, now get the list of all sub-packages # using the inspect module subpackages = [ package.__name__ + "." + name for name, val in inspect.getmembers(package) if inspect.ismodule(val) ] # recursively iterate through the subpackages for submodule in walk_packages(subpackages): yield submodule # It's a module inside a package. There's nothing else to walk/yield. else: all_packages = pkgutil.walk_packages( path, prefix=package.__name__ + ".", onerror=lambda r: None ) for _, qualified_name, _ in all_packages: yield qualified_name if __name__ == "__main__": gen_pybind11_stubs()
0.469034
0.123339
from privacy_evaluator.attacks.attack import Attack from privacy_evaluator.classifiers.classifier import Classifier import privacy_evaluator.utils.data_utils as data_utils from privacy_evaluator.utils.trainer import trainer from privacy_evaluator.models.tf.conv_net_meta_classifier import ConvNetMetaClassifier from privacy_evaluator.models.tf.cnn import ConvNet import numpy as np import torch import tensorflow as tf from sklearn.model_selection import train_test_split from typing import Tuple, Dict, List from art.estimators.classification import TensorFlowV2Classifier import string class PropertyInferenceAttack(Attack): def __init__( self, target_model: Classifier, dataset: Tuple[np.ndarray, np.ndarray] ): """ Initialize the Property Inference Attack Class. :param target_model: the target model to be attacked :param dataset: dataset for training of shadow classifiers, test_data from dataset with concatenation [test_features, test_labels] """ self.dataset = dataset # count of shadow training sets, must be eval self.amount_sets = 2 self.input_shape = self.dataset[0][0].shape # [32, 32, 3] for CIFAR10 super().__init__(target_model, None, None, None, None) def create_shadow_training_set( self, num_elements_per_class: Dict[int, int], ) -> List[Tuple[np.ndarray, np.ndarray]]: """ Create the shadow training sets with given ratio. The function works for the specific binary case that the ratio is a fixed distribution specified in the input. :param num_elements_per_class: number of elements per class :return: shadow training sets for given ratio """ training_sets = [] # Creation of shadow training sets with the size dictionaries # amount_sets divided by 2 because amount_sets describes the total amount of shadow training sets. # In this function however only all shadow training sets of one type (follow property OR negation of property) are created, hence amount_sets / 2. for _ in range(int(self.amount_sets / 2)): shadow_training_sets = data_utils.new_dataset_from_size_dict( self.dataset, num_elements_per_class ) training_sets.append(shadow_training_sets) return training_sets def train_shadow_classifiers( self, shadow_training_sets: List[Tuple[np.ndarray, np.ndarray]], num_elements_per_classes: Dict[int, int], ): """ Train shadow classifiers with each shadow training set (follows property or negation of property). :param shadow_training_sets: datasets fulfilling the a specific ratio to train shadow_classifiers :param num_elements_per_classes: specific class distribution :return: list of shadow classifiers, accuracies for the classifiers :rtype: Tuple[ List[:class:.art.estimators.estimator.BaseEstimator] """ shadow_classifiers = [] num_classes = len(num_elements_per_classes) for shadow_training_set in shadow_training_sets: shadow_training_X, shadow_training_y = shadow_training_set train_X, test_X, train_y, test_y = train_test_split( shadow_training_X, shadow_training_y, test_size=0.3 ) train_set = (train_X, train_y) test_set = (test_X, test_y) model = ConvNet(num_classes, self.input_shape) trainer(train_set, num_elements_per_classes, model) # change pytorch classifier to art classifier art_model = Classifier._to_art_classifier( model, num_classes, self.input_shape ) shadow_classifiers.append(art_model) return shadow_classifiers def create_shadow_classifier_from_training_set( self, num_elements_per_classes: Dict[int, int] ) -> list: # create training sets shadow_training_sets = self.create_shadow_training_set(num_elements_per_classes) # create classifiers with trained models based on given data set shadow_classifiers = self.train_shadow_classifiers( shadow_training_sets, num_elements_per_classes, ) return shadow_classifiers @staticmethod def feature_extraction(model): """ Extract the features of a given model. :param model: a model from which the features should be extracted :type model: :class:`.art.estimators.estimator.BaseEstimator` # BaseEstimator is very general and could be specified to art.classifier :return: feature extraction :rtype: np.ndarray """ # Filter out all trainable parameters (from every layer) # This works differently for PyTorch and TensorFlow. Raise TypeError if model is # neither of both. if isinstance(model.model, torch.nn.Module): model_parameters = list( filter(lambda p: p.requires_grad, model.model.parameters()) ) # Store the remaining parameters in a concatenated 1D numPy-array model_parameters = np.concatenate( [el.cpu().detach().numpy().flatten() for el in model_parameters] ).flatten() return model_parameters elif isinstance(model.model, tf.keras.Model): model_parameters = np.concatenate( [el.numpy().flatten() for el in model.model.trainable_variables] ).flatten() return model_parameters else: raise TypeError( "Expected model to be an instance of {} or {}, received {} instead.".format( str(torch.nn.Module), str(tf.keras.Model), str(type(model.model)) ) ) def create_meta_training_set( self, classifier_list_with_property, classifier_list_without_property ): """ Create meta training set out of shadow classifiers. :param classifier_list_with_property: list of all shadow classifiers that were trained on a dataset which fulfills the property :type classifier_list_with_property: iterable object of :class:`.art.estimators.estimator.BaseEstimator` :param classifier_list_without_property: list of all shadow classifiers that were trained on a dataset which does NOT fulfill the property :type classifier_list_without_property: iterable object of :class:`.art.estimators.estimator.BaseEstimator` :return: tuple (Meta-training set, label set) :rtype: tuple (np.ndarray, np.ndarray) """ # Apply self.feature_extraction on each shadow classifier and concatenate all features # into one array feature_list_with_property = np.array( [ self.feature_extraction(classifier) for classifier in classifier_list_with_property ] ) feature_list_without_property = np.array( [ self.feature_extraction(classifier) for classifier in classifier_list_without_property ] ) meta_features = np.concatenate( [feature_list_with_property, feature_list_without_property] ) # Create corresponding labels meta_labels = np.concatenate( [ np.ones(len(feature_list_with_property), dtype=int), np.zeros(len(feature_list_without_property), dtype=int), ] ) return meta_features, meta_labels @staticmethod def train_meta_classifier( meta_training_X: np.ndarray, meta_training_y: np.ndarray ) -> TensorFlowV2Classifier: """ Train meta-classifier with the meta-training set. :param meta_training_X: Set of feature representation of each shadow classifier. :param meta_training_y: Set of labels for each shadow classifier, according to whether property is fullfilled (1) or not (0) :return: Art Meta classifier """ # reshaping train data to fit models input meta_training_X = meta_training_X.reshape( (meta_training_X.shape[0], meta_training_X[0].shape[0], 1) ) meta_training_y = meta_training_y.reshape((meta_training_y.shape[0], 1)) meta_input_shape = meta_training_X[0].shape # currently there are just 2 classes nb_classes = 2 inputs = tf.keras.Input(shape=meta_input_shape) # create model according to model from https://arxiv.org/pdf/2002.05688.pdf cnmc = ConvNetMetaClassifier(inputs=inputs, num_classes=nb_classes) cnmc.model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"], ) cnmc.model.fit( x=meta_training_X, y=meta_training_y, epochs=2, batch_size=128, # If enough shadow classifiers are available, one could split the training set # and create an additional validation set as input: # validation_data = (validation_X, validation_y), ) # model has .evaluate(test_X,test_y) function # convert model to ART classifier art_meta_classifier = Classifier._to_art_classifier( cnmc.model, nb_classes=nb_classes, input_shape=meta_input_shape ) return art_meta_classifier @staticmethod def perform_prediction( meta_classifier, feature_extraction_target_model ) -> np.ndarray: """ "Actual" attack: Meta classifier gets feature extraction of target model as input, outputs property prediction. :param meta_classifier: A classifier :type meta_classifier: "CLASSIFIER_TYPE" (to be found in .art.estimators) :param feature_extraction_target_model: extracted features of target model :type feature_extraction_target_model: np.ndarray :return: Prediction given as probability distribution vector whether property or negation of property is fulfilled for target data set :rtype: np.ndarray with shape (1, 2) """ feature_extraction_target_model = feature_extraction_target_model.reshape( (feature_extraction_target_model.shape[0], 1) ) assert meta_classifier.input_shape == tuple( feature_extraction_target_model.shape ) predictions = meta_classifier.predict(x=[feature_extraction_target_model]) return predictions @staticmethod def output_attack(predictions_ratios: Dict[float, np.ndarray]) -> string: """ Determination of prediction with highest probability. :param predictions_ratios: Prediction values from meta-classifier for different subattacks (different properties) :return: Output message for the attack """ # get key & value of ratio with highest property probability max_property = max(predictions_ratios.items(), key=lambda item: item[1][0][0]) # get average of neg property probabilities of 0.05, 0.95 (most unbalanced datasets --> highest probability for correctness of neg probability) average_unbalanced_cases_neg_property = ( predictions_ratios[0.95][0][1] + predictions_ratios[0.05][0][1] ) / 2 if max_property[1][0][0] > average_unbalanced_cases_neg_property: return "The property inference attack predicts that the target model is unbalanced with a ratio of {}.".format( max_property[0] ) elif max_property[1][0][0] < average_unbalanced_cases_neg_property: return "The property inference attack predicts that the target model is balanced." else: raise ValueError( "Wrong input. Property inference attack cannot predict balanced and unbalanced." ) def prediction_on_specific_property( self, feature_extraction_target_model: np.ndarray, shadow_classifiers_neg_property: list, ratio: float, size_set: int, ) -> np.ndarray: """ Perform prediction for a subattack (specific property) :param feature_extraction_target_model: extracted features of target model :param shadow_classifiers_neg_property: balanced shadow classifiers negation property :param ratio: distribution for the property :param size_set: size of one class from data set :return: Prediction of meta-classifier for property and negation property """ # property of given ratio, only on class 0 and 1 at the moment property_num_elements_per_classes = { 0: int((1 - ratio) * size_set), 1: int(ratio * size_set), } # create shadow classifiers with trained models with unbalanced data set shadow_classifiers_property = self.create_shadow_classifier_from_training_set( property_num_elements_per_classes ) # create meta training set meta_features, meta_labels = self.create_meta_training_set( shadow_classifiers_property, shadow_classifiers_neg_property ) # create meta classifier meta_classifier = self.train_meta_classifier(meta_features, meta_labels) # get prediction prediction = self.perform_prediction( meta_classifier, feature_extraction_target_model ) return prediction def attack(self): """ Perform Property Inference attack. :param params: Example data to run through target model for feature extraction :type params: np.ndarray :return: prediction about property of target data set [[1, 0]]-> property; [[0, 1]]-> negation property :rtype: np.ndarray with shape (1, 2) """ # extract features of target model feature_extraction_target_model = self.feature_extraction(self.target_model) # set ratio and size for unbalanced data sets size_set = 1000 # balanced ratio num_elements = int(round(size_set / 2)) neg_property_num_elements_per_class = {0: num_elements, 1: num_elements} # create balanced shadow classifiers negation property shadow_classifiers_neg_property = self.create_shadow_classifier_from_training_set( neg_property_num_elements_per_class ) predictions = {} # iterate over unbalanced ratios in 0.05 steps (0.05-0.45, 0.55-0.95) # (e.g. 0.55 means: class 0: 0.45 of all samples, class 1: 0.55 of all samples) for ratio in np.arange(0.55, 1, 0.05): # goes through ratios 0.55 - 0.95 predictions[ratio] = self.prediction_on_specific_property( feature_extraction_target_model, shadow_classifiers_neg_property, ratio, size_set, ) # goes through ratios 0.05 - 0.45 (because of 1-ratio) predictions[(1 - ratio)] = self.prediction_on_specific_property( feature_extraction_target_model, shadow_classifiers_neg_property, (1 - ratio), size_set, ) return self.output_attack(predictions)
privacy_evaluator/attacks/property_inference_attack.py
from privacy_evaluator.attacks.attack import Attack from privacy_evaluator.classifiers.classifier import Classifier import privacy_evaluator.utils.data_utils as data_utils from privacy_evaluator.utils.trainer import trainer from privacy_evaluator.models.tf.conv_net_meta_classifier import ConvNetMetaClassifier from privacy_evaluator.models.tf.cnn import ConvNet import numpy as np import torch import tensorflow as tf from sklearn.model_selection import train_test_split from typing import Tuple, Dict, List from art.estimators.classification import TensorFlowV2Classifier import string class PropertyInferenceAttack(Attack): def __init__( self, target_model: Classifier, dataset: Tuple[np.ndarray, np.ndarray] ): """ Initialize the Property Inference Attack Class. :param target_model: the target model to be attacked :param dataset: dataset for training of shadow classifiers, test_data from dataset with concatenation [test_features, test_labels] """ self.dataset = dataset # count of shadow training sets, must be eval self.amount_sets = 2 self.input_shape = self.dataset[0][0].shape # [32, 32, 3] for CIFAR10 super().__init__(target_model, None, None, None, None) def create_shadow_training_set( self, num_elements_per_class: Dict[int, int], ) -> List[Tuple[np.ndarray, np.ndarray]]: """ Create the shadow training sets with given ratio. The function works for the specific binary case that the ratio is a fixed distribution specified in the input. :param num_elements_per_class: number of elements per class :return: shadow training sets for given ratio """ training_sets = [] # Creation of shadow training sets with the size dictionaries # amount_sets divided by 2 because amount_sets describes the total amount of shadow training sets. # In this function however only all shadow training sets of one type (follow property OR negation of property) are created, hence amount_sets / 2. for _ in range(int(self.amount_sets / 2)): shadow_training_sets = data_utils.new_dataset_from_size_dict( self.dataset, num_elements_per_class ) training_sets.append(shadow_training_sets) return training_sets def train_shadow_classifiers( self, shadow_training_sets: List[Tuple[np.ndarray, np.ndarray]], num_elements_per_classes: Dict[int, int], ): """ Train shadow classifiers with each shadow training set (follows property or negation of property). :param shadow_training_sets: datasets fulfilling the a specific ratio to train shadow_classifiers :param num_elements_per_classes: specific class distribution :return: list of shadow classifiers, accuracies for the classifiers :rtype: Tuple[ List[:class:.art.estimators.estimator.BaseEstimator] """ shadow_classifiers = [] num_classes = len(num_elements_per_classes) for shadow_training_set in shadow_training_sets: shadow_training_X, shadow_training_y = shadow_training_set train_X, test_X, train_y, test_y = train_test_split( shadow_training_X, shadow_training_y, test_size=0.3 ) train_set = (train_X, train_y) test_set = (test_X, test_y) model = ConvNet(num_classes, self.input_shape) trainer(train_set, num_elements_per_classes, model) # change pytorch classifier to art classifier art_model = Classifier._to_art_classifier( model, num_classes, self.input_shape ) shadow_classifiers.append(art_model) return shadow_classifiers def create_shadow_classifier_from_training_set( self, num_elements_per_classes: Dict[int, int] ) -> list: # create training sets shadow_training_sets = self.create_shadow_training_set(num_elements_per_classes) # create classifiers with trained models based on given data set shadow_classifiers = self.train_shadow_classifiers( shadow_training_sets, num_elements_per_classes, ) return shadow_classifiers @staticmethod def feature_extraction(model): """ Extract the features of a given model. :param model: a model from which the features should be extracted :type model: :class:`.art.estimators.estimator.BaseEstimator` # BaseEstimator is very general and could be specified to art.classifier :return: feature extraction :rtype: np.ndarray """ # Filter out all trainable parameters (from every layer) # This works differently for PyTorch and TensorFlow. Raise TypeError if model is # neither of both. if isinstance(model.model, torch.nn.Module): model_parameters = list( filter(lambda p: p.requires_grad, model.model.parameters()) ) # Store the remaining parameters in a concatenated 1D numPy-array model_parameters = np.concatenate( [el.cpu().detach().numpy().flatten() for el in model_parameters] ).flatten() return model_parameters elif isinstance(model.model, tf.keras.Model): model_parameters = np.concatenate( [el.numpy().flatten() for el in model.model.trainable_variables] ).flatten() return model_parameters else: raise TypeError( "Expected model to be an instance of {} or {}, received {} instead.".format( str(torch.nn.Module), str(tf.keras.Model), str(type(model.model)) ) ) def create_meta_training_set( self, classifier_list_with_property, classifier_list_without_property ): """ Create meta training set out of shadow classifiers. :param classifier_list_with_property: list of all shadow classifiers that were trained on a dataset which fulfills the property :type classifier_list_with_property: iterable object of :class:`.art.estimators.estimator.BaseEstimator` :param classifier_list_without_property: list of all shadow classifiers that were trained on a dataset which does NOT fulfill the property :type classifier_list_without_property: iterable object of :class:`.art.estimators.estimator.BaseEstimator` :return: tuple (Meta-training set, label set) :rtype: tuple (np.ndarray, np.ndarray) """ # Apply self.feature_extraction on each shadow classifier and concatenate all features # into one array feature_list_with_property = np.array( [ self.feature_extraction(classifier) for classifier in classifier_list_with_property ] ) feature_list_without_property = np.array( [ self.feature_extraction(classifier) for classifier in classifier_list_without_property ] ) meta_features = np.concatenate( [feature_list_with_property, feature_list_without_property] ) # Create corresponding labels meta_labels = np.concatenate( [ np.ones(len(feature_list_with_property), dtype=int), np.zeros(len(feature_list_without_property), dtype=int), ] ) return meta_features, meta_labels @staticmethod def train_meta_classifier( meta_training_X: np.ndarray, meta_training_y: np.ndarray ) -> TensorFlowV2Classifier: """ Train meta-classifier with the meta-training set. :param meta_training_X: Set of feature representation of each shadow classifier. :param meta_training_y: Set of labels for each shadow classifier, according to whether property is fullfilled (1) or not (0) :return: Art Meta classifier """ # reshaping train data to fit models input meta_training_X = meta_training_X.reshape( (meta_training_X.shape[0], meta_training_X[0].shape[0], 1) ) meta_training_y = meta_training_y.reshape((meta_training_y.shape[0], 1)) meta_input_shape = meta_training_X[0].shape # currently there are just 2 classes nb_classes = 2 inputs = tf.keras.Input(shape=meta_input_shape) # create model according to model from https://arxiv.org/pdf/2002.05688.pdf cnmc = ConvNetMetaClassifier(inputs=inputs, num_classes=nb_classes) cnmc.model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"], ) cnmc.model.fit( x=meta_training_X, y=meta_training_y, epochs=2, batch_size=128, # If enough shadow classifiers are available, one could split the training set # and create an additional validation set as input: # validation_data = (validation_X, validation_y), ) # model has .evaluate(test_X,test_y) function # convert model to ART classifier art_meta_classifier = Classifier._to_art_classifier( cnmc.model, nb_classes=nb_classes, input_shape=meta_input_shape ) return art_meta_classifier @staticmethod def perform_prediction( meta_classifier, feature_extraction_target_model ) -> np.ndarray: """ "Actual" attack: Meta classifier gets feature extraction of target model as input, outputs property prediction. :param meta_classifier: A classifier :type meta_classifier: "CLASSIFIER_TYPE" (to be found in .art.estimators) :param feature_extraction_target_model: extracted features of target model :type feature_extraction_target_model: np.ndarray :return: Prediction given as probability distribution vector whether property or negation of property is fulfilled for target data set :rtype: np.ndarray with shape (1, 2) """ feature_extraction_target_model = feature_extraction_target_model.reshape( (feature_extraction_target_model.shape[0], 1) ) assert meta_classifier.input_shape == tuple( feature_extraction_target_model.shape ) predictions = meta_classifier.predict(x=[feature_extraction_target_model]) return predictions @staticmethod def output_attack(predictions_ratios: Dict[float, np.ndarray]) -> string: """ Determination of prediction with highest probability. :param predictions_ratios: Prediction values from meta-classifier for different subattacks (different properties) :return: Output message for the attack """ # get key & value of ratio with highest property probability max_property = max(predictions_ratios.items(), key=lambda item: item[1][0][0]) # get average of neg property probabilities of 0.05, 0.95 (most unbalanced datasets --> highest probability for correctness of neg probability) average_unbalanced_cases_neg_property = ( predictions_ratios[0.95][0][1] + predictions_ratios[0.05][0][1] ) / 2 if max_property[1][0][0] > average_unbalanced_cases_neg_property: return "The property inference attack predicts that the target model is unbalanced with a ratio of {}.".format( max_property[0] ) elif max_property[1][0][0] < average_unbalanced_cases_neg_property: return "The property inference attack predicts that the target model is balanced." else: raise ValueError( "Wrong input. Property inference attack cannot predict balanced and unbalanced." ) def prediction_on_specific_property( self, feature_extraction_target_model: np.ndarray, shadow_classifiers_neg_property: list, ratio: float, size_set: int, ) -> np.ndarray: """ Perform prediction for a subattack (specific property) :param feature_extraction_target_model: extracted features of target model :param shadow_classifiers_neg_property: balanced shadow classifiers negation property :param ratio: distribution for the property :param size_set: size of one class from data set :return: Prediction of meta-classifier for property and negation property """ # property of given ratio, only on class 0 and 1 at the moment property_num_elements_per_classes = { 0: int((1 - ratio) * size_set), 1: int(ratio * size_set), } # create shadow classifiers with trained models with unbalanced data set shadow_classifiers_property = self.create_shadow_classifier_from_training_set( property_num_elements_per_classes ) # create meta training set meta_features, meta_labels = self.create_meta_training_set( shadow_classifiers_property, shadow_classifiers_neg_property ) # create meta classifier meta_classifier = self.train_meta_classifier(meta_features, meta_labels) # get prediction prediction = self.perform_prediction( meta_classifier, feature_extraction_target_model ) return prediction def attack(self): """ Perform Property Inference attack. :param params: Example data to run through target model for feature extraction :type params: np.ndarray :return: prediction about property of target data set [[1, 0]]-> property; [[0, 1]]-> negation property :rtype: np.ndarray with shape (1, 2) """ # extract features of target model feature_extraction_target_model = self.feature_extraction(self.target_model) # set ratio and size for unbalanced data sets size_set = 1000 # balanced ratio num_elements = int(round(size_set / 2)) neg_property_num_elements_per_class = {0: num_elements, 1: num_elements} # create balanced shadow classifiers negation property shadow_classifiers_neg_property = self.create_shadow_classifier_from_training_set( neg_property_num_elements_per_class ) predictions = {} # iterate over unbalanced ratios in 0.05 steps (0.05-0.45, 0.55-0.95) # (e.g. 0.55 means: class 0: 0.45 of all samples, class 1: 0.55 of all samples) for ratio in np.arange(0.55, 1, 0.05): # goes through ratios 0.55 - 0.95 predictions[ratio] = self.prediction_on_specific_property( feature_extraction_target_model, shadow_classifiers_neg_property, ratio, size_set, ) # goes through ratios 0.05 - 0.45 (because of 1-ratio) predictions[(1 - ratio)] = self.prediction_on_specific_property( feature_extraction_target_model, shadow_classifiers_neg_property, (1 - ratio), size_set, ) return self.output_attack(predictions)
0.939969
0.572454
import json from config import * from auth import * from flask import Flask, render_template, request from script import * #! Flask app init app = Flask(__name__) #! Index page @app.route("/", methods=["GET"]) def index(): return render_template('index.html', TRADE_SYMBOL=TRADE_SYMBOL) #! Webhook connection route @app.route("/webhook", methods=["GET", "POST"]) def webhook(): if request.method == "POST": quantityEntry = TRADE_QUANTITY # TODO-> User entry mechanism required! data = json.loads(request.data) if data['token'] != get_token(): return { "code": "error", "message": "Invalid token" } if data['cross_histogram'] > 0: # * Buy order structure print("**BUY ORDER**\n") buyOrder = marketBuyOrder(TRADE_SYMBOL, quantity=quantityEntry) if buyOrder: print("Buy Order Confirmed") telegram_bot_sendtext("***BUY ORDER CONFIRMED*** \n"+"`Symbol: {} Price: {}`".format(TRADE_SYMBOL, data['bar']['open'])) return { "code": "success", "message": "Buy Order Executed" } else: print("Order Failure") telegram_bot_sendtext("***BUY ORDER FAILED*** \n"+"`Symbol: {} Price: {}`".format(TRADE_SYMBOL, data['bar']['open'])) return{ "code": "error", "message": "Buy Order Failed" } else: # * Sell order structure print("**SELL ORDER**\n") sellOrder = marketSellOrder(TRADE_SYMBOL, quantity=quantityEntry) if sellOrder: print("Sell Order Confirmed") telegram_bot_sendtext("***SELL ORDER CONFIRMED*** \n"+"`Symbol: {} Price: {}`".format(TRADE_SYMBOL, data['bar']['open'])) return { "code": "success", "message": "Sell Order Executed" } else: print("Order Failure") telegram_bot_sendtext("***SELL ORDER FAILED!!*** \n"+"`Symbol: {} Price: {}`".format(TRADE_SYMBOL, data['bar']['open'])) return{ "code": "error", "message": "Sell Order Failed" } return "**Webhook Get**" if __name__ == '__main__': app.run(debug=True)
Project/app.py
import json from config import * from auth import * from flask import Flask, render_template, request from script import * #! Flask app init app = Flask(__name__) #! Index page @app.route("/", methods=["GET"]) def index(): return render_template('index.html', TRADE_SYMBOL=TRADE_SYMBOL) #! Webhook connection route @app.route("/webhook", methods=["GET", "POST"]) def webhook(): if request.method == "POST": quantityEntry = TRADE_QUANTITY # TODO-> User entry mechanism required! data = json.loads(request.data) if data['token'] != get_token(): return { "code": "error", "message": "Invalid token" } if data['cross_histogram'] > 0: # * Buy order structure print("**BUY ORDER**\n") buyOrder = marketBuyOrder(TRADE_SYMBOL, quantity=quantityEntry) if buyOrder: print("Buy Order Confirmed") telegram_bot_sendtext("***BUY ORDER CONFIRMED*** \n"+"`Symbol: {} Price: {}`".format(TRADE_SYMBOL, data['bar']['open'])) return { "code": "success", "message": "Buy Order Executed" } else: print("Order Failure") telegram_bot_sendtext("***BUY ORDER FAILED*** \n"+"`Symbol: {} Price: {}`".format(TRADE_SYMBOL, data['bar']['open'])) return{ "code": "error", "message": "Buy Order Failed" } else: # * Sell order structure print("**SELL ORDER**\n") sellOrder = marketSellOrder(TRADE_SYMBOL, quantity=quantityEntry) if sellOrder: print("Sell Order Confirmed") telegram_bot_sendtext("***SELL ORDER CONFIRMED*** \n"+"`Symbol: {} Price: {}`".format(TRADE_SYMBOL, data['bar']['open'])) return { "code": "success", "message": "Sell Order Executed" } else: print("Order Failure") telegram_bot_sendtext("***SELL ORDER FAILED!!*** \n"+"`Symbol: {} Price: {}`".format(TRADE_SYMBOL, data['bar']['open'])) return{ "code": "error", "message": "Sell Order Failed" } return "**Webhook Get**" if __name__ == '__main__': app.run(debug=True)
0.228673
0.081191
import os import re import cv2 import shelve from time import sleep from os import getlogin, mkdir from selenium import webdriver from send2trash import send2trash from webdriver_manager.chrome import ChromeDriverManager from .error import LoginError os.environ["WDM_LOG_LEVEL"] = "0" def get_qrcode(driver, timeout): for _ in range(timeout//5): qr_code = driver.find_element_by_css_selector(".landing-main") qr_code.screenshot("qrcode.png") img = cv2.imread("qrcode.png", 1) cv2.imshow("Scan the QRCode to login", img) cv2.waitKey(5000) cv2.destroyAllWindows() try: driver.find_element_by_xpath( '//*[@id="side"]/div[1]/div/label/div/div[2]' ) send2trash("qrcode.png") break except: pass def login(self, visible: bool = True, timeout: int = 60): """Logs in whatsapp and shows the QRCode if necessary Args: visible (bool, optional): Shows the process. Defaults to False. timeout (int, optional): Limit time to login in seconds. Defalts to 60 """ usr_path = ( f"C:\\Users\\{getlogin()}\\AppData\\Local\\Google\\Chrome\\User Data\\Default" ) try: self.mydata = shelve.open("data/data") except: mkdir("data") self.mydata = shelve.open("data/data") options = webdriver.ChromeOptions() options.add_argument("--hide-scrollbars") options.add_argument("--disable-gpu") options.add_argument("--log-level=OFF") options.add_experimental_option("excludeSwitches", ["enable-logging"]) driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) self.mydata["user_agent"] = driver.execute_script( "return navigator.userAgent" ) driver.close() print(f'Logging as: {self.mydata["user_agent"]}') options = webdriver.ChromeOptions() options.add_argument(f"--user-data-dir={usr_path}") options.add_argument(f"--user-agent={self.mydata['user_agent']}") options.add_argument("--start-maximized") options.add_argument("--hide-scrollbars") options.add_argument("--disable-gpu") options.add_argument("--log-level=OFF") options.add_experimental_option("excludeSwitches", ["enable-logging"]) if not visible: options.add_argument("--headless") self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) self.driver.get("https://web.whatsapp.com") logged = False for _ in range(timeout): try: self.driver.find_element_by_xpath( '//*[@id="side"]/div[1]/div/label/div/div[2]' ) break except: if not visible: try: self.driver.find_element_by_css_selector(".landing-main") get_qrcode(self.driver, timeout) break except: sleep(1) else: sleep(1) self.driver.implicitly_wait(60) self.driver.find_element_by_xpath( '//*[@id="side"]/div[1]/div/label/div/div[2]' ) logged = True if logged or visible: print("Done") else: self.close() raise LoginError("Failed to log in to WhatsApp") sleep(2) def close(self): """Exit the whatsapp""" self.driver.close()
whatsappy/login.py
import os import re import cv2 import shelve from time import sleep from os import getlogin, mkdir from selenium import webdriver from send2trash import send2trash from webdriver_manager.chrome import ChromeDriverManager from .error import LoginError os.environ["WDM_LOG_LEVEL"] = "0" def get_qrcode(driver, timeout): for _ in range(timeout//5): qr_code = driver.find_element_by_css_selector(".landing-main") qr_code.screenshot("qrcode.png") img = cv2.imread("qrcode.png", 1) cv2.imshow("Scan the QRCode to login", img) cv2.waitKey(5000) cv2.destroyAllWindows() try: driver.find_element_by_xpath( '//*[@id="side"]/div[1]/div/label/div/div[2]' ) send2trash("qrcode.png") break except: pass def login(self, visible: bool = True, timeout: int = 60): """Logs in whatsapp and shows the QRCode if necessary Args: visible (bool, optional): Shows the process. Defaults to False. timeout (int, optional): Limit time to login in seconds. Defalts to 60 """ usr_path = ( f"C:\\Users\\{getlogin()}\\AppData\\Local\\Google\\Chrome\\User Data\\Default" ) try: self.mydata = shelve.open("data/data") except: mkdir("data") self.mydata = shelve.open("data/data") options = webdriver.ChromeOptions() options.add_argument("--hide-scrollbars") options.add_argument("--disable-gpu") options.add_argument("--log-level=OFF") options.add_experimental_option("excludeSwitches", ["enable-logging"]) driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) self.mydata["user_agent"] = driver.execute_script( "return navigator.userAgent" ) driver.close() print(f'Logging as: {self.mydata["user_agent"]}') options = webdriver.ChromeOptions() options.add_argument(f"--user-data-dir={usr_path}") options.add_argument(f"--user-agent={self.mydata['user_agent']}") options.add_argument("--start-maximized") options.add_argument("--hide-scrollbars") options.add_argument("--disable-gpu") options.add_argument("--log-level=OFF") options.add_experimental_option("excludeSwitches", ["enable-logging"]) if not visible: options.add_argument("--headless") self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) self.driver.get("https://web.whatsapp.com") logged = False for _ in range(timeout): try: self.driver.find_element_by_xpath( '//*[@id="side"]/div[1]/div/label/div/div[2]' ) break except: if not visible: try: self.driver.find_element_by_css_selector(".landing-main") get_qrcode(self.driver, timeout) break except: sleep(1) else: sleep(1) self.driver.implicitly_wait(60) self.driver.find_element_by_xpath( '//*[@id="side"]/div[1]/div/label/div/div[2]' ) logged = True if logged or visible: print("Done") else: self.close() raise LoginError("Failed to log in to WhatsApp") sleep(2) def close(self): """Exit the whatsapp""" self.driver.close()
0.17252
0.049912
import sys import time import json import base64 import urllib import urllib2 import traceback import threading import ssl import weblab.configuration_doc as configuration_doc from weblab.experiment.experiment import Experiment from weblab.experiment.concurrent_experiment import ConcurrentExperiment import weblab.experiment.level as ExperimentApiLevel import weblab.core.coordinator.coordinator as Coordinator class GetInfoThread(threading.Thread): def __init__(self, experiment, coord_address, verbose): threading.Thread.__init__(self) self.setName("HttpExperiment::GetInfoThread::%s" % coord_address) self.setDaemon(True) self.experiment = experiment self.verbose = verbose def run(self): while True: try: if self.experiment.get_api_and_test(): break except: if self.verbose: print("Error in %s" % self.name) traceback.print_exc() time.sleep(10) class HttpExperiment(Experiment): def __init__(self, coord_address, locator, config, *args, **kwargs): super(Experiment, self).__init__(*args, **kwargs) self.base_url = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_URL) if self.base_url.endswith('/'): print "Warning: HTTP experiment address ends in /" self.username = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_USERNAME) self.password = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_PASSWORD) self.batch = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_BATCH) self.api = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_API) self.extension = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_EXTENSION) self.request_format = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_REQUEST_FORMAT) self.verbose = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_VERBOSE) if self.username and self.password: self.encoded = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '') self.session_id = '' self._get_info_thread = GetInfoThread(self, coord_address, self.verbose) self._tested = False if self.api != '0': # If API is '0', the /api and /test methods don't even exist self._get_info_thread.start() def _build_url(self, path): return "%s/weblab/sessions/%s" % (self.base_url, path) def _request(self, path, data = None): ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE request = urllib2.Request(self._build_url(path)) request.add_header('Accept', 'application/json,text/html') if self.username and self.password: request.add_header("Authorization", "Basic %s" % self.encoded) if data is not None: if self.request_format == 'json': request.add_header('Content-Type', 'application/json') if data is None: return urllib2.urlopen(request, context=ctx).read() elif self.request_format == 'json': return urllib2.urlopen(request, json.dumps(data), context=ctx).read() elif self.request_format == 'form': return urllib2.urlopen(request, urllib.urlencode(data), context=ctx).read() else: raise Exception("Unsupported format: %r" % fmt) def _request_json_dict(self, url, what, data = None): try: response_str = self._request(url, data) except: if self.verbose: print("Error obtaining %s from %s" % (what, self._build_url(url))) traceback.print_exc() return False if response_str == 'ok' or response_str == '' or response_str == 'deleted': response_str = "{}" try: response = json.loads(response_str) except: if self.verbose: print("Error obtaining JSON from %s response:" % what) print(response_str) return False if not isinstance(response, dict): if self.verbose: print("Error obtaining JSON from %s response: it is not an object" % what) print(response_str) return False return response def get_api(self): if self.api: return self.api if self.extension: url = 'api%s' % self.extension else: url = 'api' response = self._request_json_dict(url, "API") if response == False: return False api_version = response.get('api_version') if not api_version: if self.verbose: print("Invalid api_version") print(api_version) return False self.api = api_version return self.api def test(self): if self._tested: # Previously tested return True if self.api is None: if self.verbose: print("API not yet defined; couldn't try the test method") return False if self.api == '0': # No test required self._tested = True return True if self.extension: url = 'test%s' % self.extension else: url = 'test' response = self._request_json_dict(url, "Test") if response == False: return False valid = response.get('valid', False) if valid: self._tested = True return True raise Exception("Error testing the server: %s" % response.get('error_messages', ["Error accesing server by unknown reason"])) def get_api_and_test(self): api = self.get_api() if api: if not self.test(): return False return api def do_start_experiment(self, serialized_client_initial_data, serialized_server_initial_data): """ Invoked by WebLab on the start experiment event. :param serialized_client_initial_data: Initial client configuration. As a JSON-parseable string. :type serialized_client_initial_data: str :param serialized_server_initial_data: Initial data provided by the server. As a JSON-parseable string. :type serialized_server_initial_data: str :return: JSON parseable string containing the initial_configuration dictionary, which includes an "url" and a "back" url. :rtype: str """ api = self.get_api_and_test() if api is None: raise Exception("Couldn't obtain the API for this experiment, so it can't be started!") try: back_url = json.loads(serialized_client_initial_data).get('back','') if self.extension: url = 'new%s' % self.extension else: url = '' data = { 'back' : back_url } if api == "0" or self.request_format == 'form': data['client_initial_data'] = serialized_client_initial_data data['server_initial_data'] = serialized_server_initial_data else: data['client_initial_data'] = json.loads(serialized_client_initial_data) data['server_initial_data'] = json.loads(serialized_server_initial_data) response_str = self._request(url, data) try: response = json.loads(response_str) except: print print "Got invalid JSON response from the HTTP server:" print "*" * 20 print response_str print "*" * 20 sys.stdout.flush() raise url = response.get('url','http://server.sent.invalid.address') config = { 'url' : url, } self.session_id = response.get('session_id','invalid_session_id') return json.dumps({ "initial_configuration" : json.dumps(config), "batch" : self.batch }) except: traceback.print_exc() raise def do_get_api(self): return ExperimentApiLevel.level_2 def do_should_finish(self): """ Should the experiment finish? If the experiment server should be able to say "I've finished", it will be asked every few time; if the experiment is completely interactive (so it's up to the user and the permissions of the user to say when the session should finish), it will never be asked. Therefore, this method will return a numeric result, being: - result > 0: it hasn't finished but ask within result seconds. - result == 0: completely interactive, don't ask again - result < 0: it has finished. """ if self.extension: url = 'status%s?session_id=%s' % (self.extension, self.session_id) else: url = '%s/status' % self.session_id try: response_str = self._request(url) response = json.loads(response_str) return response['should_finish'] except: traceback.print_exc() raise def do_dispose(self): if self.extension: url = 'action%s?session_id=%s' % (self.extension, self.session_id) else: url = '%s' % self.session_id try: response = self._request_json_dict(url, "dispose", { 'action': 'delete', }) wrapped_response = { Coordinator.FINISH_FINISHED_MESSAGE: True, Coordinator.FINISH_DATA_MESSAGE: "" } if response: finished = response.get(Coordinator.FINISH_FINISHED_MESSAGE) if finished is not None: wrapped_response[Coordinator.FINISH_FINISHED_MESSAGE] = finished data = response.get(Coordinator.FINISH_DATA_MESSAGE) if data is not None: wrapped_response[Coordinator.FINISH_FINISHED_MESSAGE] = data ask_again = response.get(Coordinator.FINISH_ASK_AGAIN_MESSAGE) if ask_again is not None: wrapped_response[Coordinator.FINISH_ASK_AGAIN_MESSAGE] = ask_again return json.dumps(wrapped_response) except: traceback.print_exc() raise class ConcurrentHttpExperiment(Experiment): def __init__(self, coord_address, locator, config, *args, **kwargs): super(Experiment, self).__init__(*args, **kwargs) self.base_url = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_URL) if self.base_url.endswith('/'): print "Warning: HTTP experiment address ends in /" self.username = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_USERNAME) self.password = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_PASSWORD) self.batch = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_BATCH) self.api = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_API) self.extension = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_EXTENSION) self.request_format = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_REQUEST_FORMAT) self.verbose = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_VERBOSE) self.session_ids_lock = threading.Lock() self.session_ids = {} if self.username and self.password: self.encoded = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '') self._get_info_thread = GetInfoThread(self, coord_address, self.verbose) self._tested = False if self.api != '0': # If API is '0', the /api and /test methods don't even exist self._get_info_thread.start() def _build_url(self, path): return "%s/weblab/sessions/%s" % (self.base_url, path) def _request(self, path, data = None): ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE request = urllib2.Request(self._build_url(path)) request.add_header('Accept', 'application/json,text/html') if self.username and self.password: request.add_header("Authorization", "Basic %s" % self.encoded) if data is not None: if self.request_format == 'json': request.add_header('Content-Type', 'application/json') if data is None: return urllib2.urlopen(request, context=ctx).read() elif self.request_format == 'json': return urllib2.urlopen(request, json.dumps(data), context=ctx).read() elif self.request_format == 'form': return urllib2.urlopen(request, urllib.urlencode(data), context=ctx).read() else: raise Exception("Unsupported format: %r" % fmt) def _request_json_dict(self, url, what, data = None): try: response_str = self._request(url, data) except: if self.verbose: print("Error obtaining %s from %s" % (what, self._build_url(url))) traceback.print_exc() return False if response_str == 'ok' or response_str == '' or response_str == 'deleted': response_str = "{}" try: response = json.loads(response_str) except: if self.verbose: print("Error obtaining JSON from %s response:" % what) print(response_str) return False if not isinstance(response, dict): if self.verbose: print("Error obtaining JSON from %s response: it is not an object" % what) print(response_str) return False return response def get_api(self): if self.api: return self.api if self.extension: url = 'api%s' % self.extension else: url = 'api' response = self._request_json_dict(url, "API") if response == False: return False api_version = response.get('api_version') if not api_version: if self.verbose: print("Invalid api_version") print(api_version) return False self.api = api_version return self.api def test(self): if self._tested: # Previously tested return True if self.api is None: if self.verbose: print("API not yet defined; couldn't try the test method") return False if self.api == '0': # No test required self._tested = True return True if self.extension: url = 'test%s' % self.extension else: url = 'test' response = self._request_json_dict(url, "Test") if response == False: return False valid = response.get('valid', False) if valid: self._tested = True return True raise Exception("Error testing the server: %s" % response.get('error_messages', ["Error accesing server by unknown reason"])) def get_api_and_test(self): api = self.get_api() if api: if not self.test(): return False return api def do_start_experiment(self, lab_session_id, serialized_client_initial_data, serialized_server_initial_data): """ Invoked by WebLab on the start experiment event. :param serialized_client_initial_data: Initial client configuration. As a JSON-parseable string. :type serialized_client_initial_data: str :param serialized_server_initial_data: Initial data provided by the server. As a JSON-parseable string. :type serialized_server_initial_data: str :return: JSON parseable string containing the initial_configuration dictionary, which includes an "url" and a "back" url. :rtype: str """ api = self.get_api_and_test() if api is None: raise Exception("Couldn't obtain the API for this experiment, so it can't be started!") try: back_url = json.loads(serialized_client_initial_data).get('back','') if self.extension: url = 'new%s' % self.extension else: url = '' data = { 'back' : back_url, } if api == "0" or self.request_format == 'form': data['client_initial_data'] = serialized_client_initial_data data['server_initial_data'] = serialized_server_initial_data else: data['client_initial_data'] = json.loads(serialized_client_initial_data) data['server_initial_data'] = json.loads(serialized_server_initial_data) response_str = self._request(url, data) try: response = json.loads(response_str) except: print print "Got invalid JSON response from the HTTP server:" print "*" * 20 print response_str print "*" * 20 sys.stdout.flush() raise url = response.get('url','http://server.sent.invalid.address') config = { 'url' : url, } with self.session_ids_lock: self.session_ids[lab_session_id] = response.get('session_id','invalid_session_id') return json.dumps({ "initial_configuration" : json.dumps(config), "batch" : self.batch }) except: traceback.print_exc() raise def do_get_api(self): return ExperimentApiLevel.level_2_concurrent def do_should_finish(self, lab_session_id): """ Should the experiment finish? If the experiment server should be able to say "I've finished", it will be asked every few time; if the experiment is completely interactive (so it's up to the user and the permissions of the user to say when the session should finish), it will never be asked. Therefore, this method will return a numeric result, being: - result > 0: it hasn't finished but ask within result seconds. - result == 0: completely interactive, don't ask again - result < 0: it has finished. """ with self.session_ids_lock: session_id = self.session_ids.get(lab_session_id, 'no-session-id') if self.extension: url = 'status%s?session_id=%s' % (self.extension, session_id) else: url = '%s/status' % session_id try: response_str = self._request(url) response = json.loads(response_str) return response['should_finish'] except: traceback.print_exc() raise def do_dispose(self, lab_session_id): with self.session_ids_lock: session_id = self.session_ids.get(lab_session_id, 'no-session-id') if self.extension: url = 'action%s?session_id=%s' % (self.extension, session_id) else: url = '%s' % session_id try: response = self._request_json_dict(url, "dispose", { 'action': 'delete', }) wrapped_response = { Coordinator.FINISH_FINISHED_MESSAGE: True, Coordinator.FINISH_DATA_MESSAGE: "" } if response: finished = response.get(Coordinator.FINISH_FINISHED_MESSAGE) if finished is not None: wrapped_response[Coordinator.FINISH_FINISHED_MESSAGE] = finished data = response.get(Coordinator.FINISH_DATA_MESSAGE) if data is not None: wrapped_response[Coordinator.FINISH_FINISHED_MESSAGE] = data ask_again = response.get(Coordinator.FINISH_ASK_AGAIN_MESSAGE) if ask_again is not None: wrapped_response[Coordinator.FINISH_ASK_AGAIN_MESSAGE] = ask_again return json.dumps(wrapped_response) except: traceback.print_exc() raise
server/src/experiments/http_experiment.py
import sys import time import json import base64 import urllib import urllib2 import traceback import threading import ssl import weblab.configuration_doc as configuration_doc from weblab.experiment.experiment import Experiment from weblab.experiment.concurrent_experiment import ConcurrentExperiment import weblab.experiment.level as ExperimentApiLevel import weblab.core.coordinator.coordinator as Coordinator class GetInfoThread(threading.Thread): def __init__(self, experiment, coord_address, verbose): threading.Thread.__init__(self) self.setName("HttpExperiment::GetInfoThread::%s" % coord_address) self.setDaemon(True) self.experiment = experiment self.verbose = verbose def run(self): while True: try: if self.experiment.get_api_and_test(): break except: if self.verbose: print("Error in %s" % self.name) traceback.print_exc() time.sleep(10) class HttpExperiment(Experiment): def __init__(self, coord_address, locator, config, *args, **kwargs): super(Experiment, self).__init__(*args, **kwargs) self.base_url = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_URL) if self.base_url.endswith('/'): print "Warning: HTTP experiment address ends in /" self.username = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_USERNAME) self.password = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_PASSWORD) self.batch = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_BATCH) self.api = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_API) self.extension = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_EXTENSION) self.request_format = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_REQUEST_FORMAT) self.verbose = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_VERBOSE) if self.username and self.password: self.encoded = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '') self.session_id = '' self._get_info_thread = GetInfoThread(self, coord_address, self.verbose) self._tested = False if self.api != '0': # If API is '0', the /api and /test methods don't even exist self._get_info_thread.start() def _build_url(self, path): return "%s/weblab/sessions/%s" % (self.base_url, path) def _request(self, path, data = None): ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE request = urllib2.Request(self._build_url(path)) request.add_header('Accept', 'application/json,text/html') if self.username and self.password: request.add_header("Authorization", "Basic %s" % self.encoded) if data is not None: if self.request_format == 'json': request.add_header('Content-Type', 'application/json') if data is None: return urllib2.urlopen(request, context=ctx).read() elif self.request_format == 'json': return urllib2.urlopen(request, json.dumps(data), context=ctx).read() elif self.request_format == 'form': return urllib2.urlopen(request, urllib.urlencode(data), context=ctx).read() else: raise Exception("Unsupported format: %r" % fmt) def _request_json_dict(self, url, what, data = None): try: response_str = self._request(url, data) except: if self.verbose: print("Error obtaining %s from %s" % (what, self._build_url(url))) traceback.print_exc() return False if response_str == 'ok' or response_str == '' or response_str == 'deleted': response_str = "{}" try: response = json.loads(response_str) except: if self.verbose: print("Error obtaining JSON from %s response:" % what) print(response_str) return False if not isinstance(response, dict): if self.verbose: print("Error obtaining JSON from %s response: it is not an object" % what) print(response_str) return False return response def get_api(self): if self.api: return self.api if self.extension: url = 'api%s' % self.extension else: url = 'api' response = self._request_json_dict(url, "API") if response == False: return False api_version = response.get('api_version') if not api_version: if self.verbose: print("Invalid api_version") print(api_version) return False self.api = api_version return self.api def test(self): if self._tested: # Previously tested return True if self.api is None: if self.verbose: print("API not yet defined; couldn't try the test method") return False if self.api == '0': # No test required self._tested = True return True if self.extension: url = 'test%s' % self.extension else: url = 'test' response = self._request_json_dict(url, "Test") if response == False: return False valid = response.get('valid', False) if valid: self._tested = True return True raise Exception("Error testing the server: %s" % response.get('error_messages', ["Error accesing server by unknown reason"])) def get_api_and_test(self): api = self.get_api() if api: if not self.test(): return False return api def do_start_experiment(self, serialized_client_initial_data, serialized_server_initial_data): """ Invoked by WebLab on the start experiment event. :param serialized_client_initial_data: Initial client configuration. As a JSON-parseable string. :type serialized_client_initial_data: str :param serialized_server_initial_data: Initial data provided by the server. As a JSON-parseable string. :type serialized_server_initial_data: str :return: JSON parseable string containing the initial_configuration dictionary, which includes an "url" and a "back" url. :rtype: str """ api = self.get_api_and_test() if api is None: raise Exception("Couldn't obtain the API for this experiment, so it can't be started!") try: back_url = json.loads(serialized_client_initial_data).get('back','') if self.extension: url = 'new%s' % self.extension else: url = '' data = { 'back' : back_url } if api == "0" or self.request_format == 'form': data['client_initial_data'] = serialized_client_initial_data data['server_initial_data'] = serialized_server_initial_data else: data['client_initial_data'] = json.loads(serialized_client_initial_data) data['server_initial_data'] = json.loads(serialized_server_initial_data) response_str = self._request(url, data) try: response = json.loads(response_str) except: print print "Got invalid JSON response from the HTTP server:" print "*" * 20 print response_str print "*" * 20 sys.stdout.flush() raise url = response.get('url','http://server.sent.invalid.address') config = { 'url' : url, } self.session_id = response.get('session_id','invalid_session_id') return json.dumps({ "initial_configuration" : json.dumps(config), "batch" : self.batch }) except: traceback.print_exc() raise def do_get_api(self): return ExperimentApiLevel.level_2 def do_should_finish(self): """ Should the experiment finish? If the experiment server should be able to say "I've finished", it will be asked every few time; if the experiment is completely interactive (so it's up to the user and the permissions of the user to say when the session should finish), it will never be asked. Therefore, this method will return a numeric result, being: - result > 0: it hasn't finished but ask within result seconds. - result == 0: completely interactive, don't ask again - result < 0: it has finished. """ if self.extension: url = 'status%s?session_id=%s' % (self.extension, self.session_id) else: url = '%s/status' % self.session_id try: response_str = self._request(url) response = json.loads(response_str) return response['should_finish'] except: traceback.print_exc() raise def do_dispose(self): if self.extension: url = 'action%s?session_id=%s' % (self.extension, self.session_id) else: url = '%s' % self.session_id try: response = self._request_json_dict(url, "dispose", { 'action': 'delete', }) wrapped_response = { Coordinator.FINISH_FINISHED_MESSAGE: True, Coordinator.FINISH_DATA_MESSAGE: "" } if response: finished = response.get(Coordinator.FINISH_FINISHED_MESSAGE) if finished is not None: wrapped_response[Coordinator.FINISH_FINISHED_MESSAGE] = finished data = response.get(Coordinator.FINISH_DATA_MESSAGE) if data is not None: wrapped_response[Coordinator.FINISH_FINISHED_MESSAGE] = data ask_again = response.get(Coordinator.FINISH_ASK_AGAIN_MESSAGE) if ask_again is not None: wrapped_response[Coordinator.FINISH_ASK_AGAIN_MESSAGE] = ask_again return json.dumps(wrapped_response) except: traceback.print_exc() raise class ConcurrentHttpExperiment(Experiment): def __init__(self, coord_address, locator, config, *args, **kwargs): super(Experiment, self).__init__(*args, **kwargs) self.base_url = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_URL) if self.base_url.endswith('/'): print "Warning: HTTP experiment address ends in /" self.username = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_USERNAME) self.password = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_PASSWORD) self.batch = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_BATCH) self.api = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_API) self.extension = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_EXTENSION) self.request_format = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_REQUEST_FORMAT) self.verbose = config.get_doc_value(configuration_doc.HTTP_EXPERIMENT_VERBOSE) self.session_ids_lock = threading.Lock() self.session_ids = {} if self.username and self.password: self.encoded = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '') self._get_info_thread = GetInfoThread(self, coord_address, self.verbose) self._tested = False if self.api != '0': # If API is '0', the /api and /test methods don't even exist self._get_info_thread.start() def _build_url(self, path): return "%s/weblab/sessions/%s" % (self.base_url, path) def _request(self, path, data = None): ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE request = urllib2.Request(self._build_url(path)) request.add_header('Accept', 'application/json,text/html') if self.username and self.password: request.add_header("Authorization", "Basic %s" % self.encoded) if data is not None: if self.request_format == 'json': request.add_header('Content-Type', 'application/json') if data is None: return urllib2.urlopen(request, context=ctx).read() elif self.request_format == 'json': return urllib2.urlopen(request, json.dumps(data), context=ctx).read() elif self.request_format == 'form': return urllib2.urlopen(request, urllib.urlencode(data), context=ctx).read() else: raise Exception("Unsupported format: %r" % fmt) def _request_json_dict(self, url, what, data = None): try: response_str = self._request(url, data) except: if self.verbose: print("Error obtaining %s from %s" % (what, self._build_url(url))) traceback.print_exc() return False if response_str == 'ok' or response_str == '' or response_str == 'deleted': response_str = "{}" try: response = json.loads(response_str) except: if self.verbose: print("Error obtaining JSON from %s response:" % what) print(response_str) return False if not isinstance(response, dict): if self.verbose: print("Error obtaining JSON from %s response: it is not an object" % what) print(response_str) return False return response def get_api(self): if self.api: return self.api if self.extension: url = 'api%s' % self.extension else: url = 'api' response = self._request_json_dict(url, "API") if response == False: return False api_version = response.get('api_version') if not api_version: if self.verbose: print("Invalid api_version") print(api_version) return False self.api = api_version return self.api def test(self): if self._tested: # Previously tested return True if self.api is None: if self.verbose: print("API not yet defined; couldn't try the test method") return False if self.api == '0': # No test required self._tested = True return True if self.extension: url = 'test%s' % self.extension else: url = 'test' response = self._request_json_dict(url, "Test") if response == False: return False valid = response.get('valid', False) if valid: self._tested = True return True raise Exception("Error testing the server: %s" % response.get('error_messages', ["Error accesing server by unknown reason"])) def get_api_and_test(self): api = self.get_api() if api: if not self.test(): return False return api def do_start_experiment(self, lab_session_id, serialized_client_initial_data, serialized_server_initial_data): """ Invoked by WebLab on the start experiment event. :param serialized_client_initial_data: Initial client configuration. As a JSON-parseable string. :type serialized_client_initial_data: str :param serialized_server_initial_data: Initial data provided by the server. As a JSON-parseable string. :type serialized_server_initial_data: str :return: JSON parseable string containing the initial_configuration dictionary, which includes an "url" and a "back" url. :rtype: str """ api = self.get_api_and_test() if api is None: raise Exception("Couldn't obtain the API for this experiment, so it can't be started!") try: back_url = json.loads(serialized_client_initial_data).get('back','') if self.extension: url = 'new%s' % self.extension else: url = '' data = { 'back' : back_url, } if api == "0" or self.request_format == 'form': data['client_initial_data'] = serialized_client_initial_data data['server_initial_data'] = serialized_server_initial_data else: data['client_initial_data'] = json.loads(serialized_client_initial_data) data['server_initial_data'] = json.loads(serialized_server_initial_data) response_str = self._request(url, data) try: response = json.loads(response_str) except: print print "Got invalid JSON response from the HTTP server:" print "*" * 20 print response_str print "*" * 20 sys.stdout.flush() raise url = response.get('url','http://server.sent.invalid.address') config = { 'url' : url, } with self.session_ids_lock: self.session_ids[lab_session_id] = response.get('session_id','invalid_session_id') return json.dumps({ "initial_configuration" : json.dumps(config), "batch" : self.batch }) except: traceback.print_exc() raise def do_get_api(self): return ExperimentApiLevel.level_2_concurrent def do_should_finish(self, lab_session_id): """ Should the experiment finish? If the experiment server should be able to say "I've finished", it will be asked every few time; if the experiment is completely interactive (so it's up to the user and the permissions of the user to say when the session should finish), it will never be asked. Therefore, this method will return a numeric result, being: - result > 0: it hasn't finished but ask within result seconds. - result == 0: completely interactive, don't ask again - result < 0: it has finished. """ with self.session_ids_lock: session_id = self.session_ids.get(lab_session_id, 'no-session-id') if self.extension: url = 'status%s?session_id=%s' % (self.extension, session_id) else: url = '%s/status' % session_id try: response_str = self._request(url) response = json.loads(response_str) return response['should_finish'] except: traceback.print_exc() raise def do_dispose(self, lab_session_id): with self.session_ids_lock: session_id = self.session_ids.get(lab_session_id, 'no-session-id') if self.extension: url = 'action%s?session_id=%s' % (self.extension, session_id) else: url = '%s' % session_id try: response = self._request_json_dict(url, "dispose", { 'action': 'delete', }) wrapped_response = { Coordinator.FINISH_FINISHED_MESSAGE: True, Coordinator.FINISH_DATA_MESSAGE: "" } if response: finished = response.get(Coordinator.FINISH_FINISHED_MESSAGE) if finished is not None: wrapped_response[Coordinator.FINISH_FINISHED_MESSAGE] = finished data = response.get(Coordinator.FINISH_DATA_MESSAGE) if data is not None: wrapped_response[Coordinator.FINISH_FINISHED_MESSAGE] = data ask_again = response.get(Coordinator.FINISH_ASK_AGAIN_MESSAGE) if ask_again is not None: wrapped_response[Coordinator.FINISH_ASK_AGAIN_MESSAGE] = ask_again return json.dumps(wrapped_response) except: traceback.print_exc() raise
0.356783
0.07088
import json import copy from collections import defaultdict import ODWN_reader def convert(old_json): new_json = {"lus" : []} agent_methods = defaultdict(int) agents = defaultdict(int) methods = defaultdict(int) match_rels = defaultdict(int) for old_lu_info in old_json['lus']: new_lu_info = copy.deepcopy(old_lu_info) method = old_lu_info['provenance'] del new_lu_info['optional_lu_attrs'] agent_method = old_lu_info['optional_lu_attrs'].get('Method') if agent_method is None: continue # get agent and method if method == 'manual': agent = 'ThomasKlein' elif method == 'automatic': split = agent_method.split('_') agent = split[0] if len(split) == 2: method = split[1] else: raise Exception(f'method not known: {method}') new_lu_info['agent'] = agent new_lu_info['provenance'] = method # get rbn lu id and rel match optional_lu_attrs = old_lu_info['optional_lu_attrs'] rbn_sense_id = optional_lu_attrs.get('RBN_LU_ID') assert rbn_sense_id is not None, old_lu_info rel_match = optional_lu_attrs.get('RBN_matching_relation') if rel_match == 'near_equivalence': skos_rel = 'closeMatch' elif rel_match == 'equivalence': skos_rel = 'exactMatch' assert rel_match is not None, old_lu_info skos_predicate_to_external_references = { skos_rel: [ODWN_reader.senseid_to_uri[rbn_sense_id]] } new_lu_info['skos_predicate_to_external_references'] = skos_predicate_to_external_references agents[agent] += 1 agent_methods[(agent, method)] += 1 methods[method] += 1 match_rels[rel_match] += 1 new_json['lus'].append(new_lu_info) print() print(f'agent and methods: {agent_methods}') print(f'agents: {agents}') print(f'methods: {methods}') print(f'matching rels: {match_rels}') return new_json old_json_path = '../res/json/iterations_1_2.json' new_json_path = '../res/json/iterations_1_2_v2.json' old_json = json.load(open(old_json_path)) # convert new_json = convert(old_json=old_json) with open(new_json_path, 'w') as outfile: json.dump(new_json, outfile, indent=4, sort_keys=True)
src/convert_iterations_1_and_2_to_new_version.py
import json import copy from collections import defaultdict import ODWN_reader def convert(old_json): new_json = {"lus" : []} agent_methods = defaultdict(int) agents = defaultdict(int) methods = defaultdict(int) match_rels = defaultdict(int) for old_lu_info in old_json['lus']: new_lu_info = copy.deepcopy(old_lu_info) method = old_lu_info['provenance'] del new_lu_info['optional_lu_attrs'] agent_method = old_lu_info['optional_lu_attrs'].get('Method') if agent_method is None: continue # get agent and method if method == 'manual': agent = 'ThomasKlein' elif method == 'automatic': split = agent_method.split('_') agent = split[0] if len(split) == 2: method = split[1] else: raise Exception(f'method not known: {method}') new_lu_info['agent'] = agent new_lu_info['provenance'] = method # get rbn lu id and rel match optional_lu_attrs = old_lu_info['optional_lu_attrs'] rbn_sense_id = optional_lu_attrs.get('RBN_LU_ID') assert rbn_sense_id is not None, old_lu_info rel_match = optional_lu_attrs.get('RBN_matching_relation') if rel_match == 'near_equivalence': skos_rel = 'closeMatch' elif rel_match == 'equivalence': skos_rel = 'exactMatch' assert rel_match is not None, old_lu_info skos_predicate_to_external_references = { skos_rel: [ODWN_reader.senseid_to_uri[rbn_sense_id]] } new_lu_info['skos_predicate_to_external_references'] = skos_predicate_to_external_references agents[agent] += 1 agent_methods[(agent, method)] += 1 methods[method] += 1 match_rels[rel_match] += 1 new_json['lus'].append(new_lu_info) print() print(f'agent and methods: {agent_methods}') print(f'agents: {agents}') print(f'methods: {methods}') print(f'matching rels: {match_rels}') return new_json old_json_path = '../res/json/iterations_1_2.json' new_json_path = '../res/json/iterations_1_2_v2.json' old_json = json.load(open(old_json_path)) # convert new_json = convert(old_json=old_json) with open(new_json_path, 'w') as outfile: json.dump(new_json, outfile, indent=4, sort_keys=True)
0.172694
0.159774
import pytest import uuid from flask import json, url_for from tests.conftest import create_authorization_header from tests.db import create_article sample_articles = [ { "id": "1", "title": "Forty Years Fighting Racism and Intolerance", "author": "<NAME>", "content": """<h2>A century with no solidarity</h2>\r\n One of the worst plagues that the twentieth century has had to \r\n bear is racial discrimination.""", "entrydate": "2015-11-01" }, { "id": "2", "title": "Modern Mythology", "author": "<NAME>", "content": """Despite their universal existence in all civilizations and all \r\ntimes of history, myths have often been scoffed at and regarded as old wives\u2019 \r\ntales.""", "entrydate": "2016-01-30" }, ] class WhenGettingArticles: def it_returns_all_articles(self, client, sample_article, db_session): response = client.get( url_for('articles.get_articles'), headers=[create_authorization_header()] ) assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert len(data) == 1 assert data[0]['id'] == str(sample_article.id) def it_returns_all_articles_summary(self, client, sample_article, db_session): response = client.get( url_for('articles.get_articles_summary'), headers=[create_authorization_header()] ) assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert len(data) == 1 assert data[0]['id'] == str(sample_article.id) def it_returns_up_to_4_articles_summary(self, client, sample_article, db_session): create_article(title='test 1') create_article(title='test 2') create_article(title='test 3') create_article(title='test 4') create_article(title='test 5') response = client.get( url_for('articles.get_articles_summary'), headers=[create_authorization_header()] ) assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert len(data) == 5 def it_returns_selected_article_summary(self, client, sample_article, db_session): article_1 = create_article(title='test 1') create_article(title='test 2') article_ids = "{},{}".format(sample_article.id, article_1.id) response = client.get( url_for('articles.get_articles_summary', ids=article_ids), headers=[create_authorization_header()] ) assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert len(data) == 2 assert set([str(sample_article.id), str(article_1.id)]) == set(article_ids.split(',')) class WhenGettingArticleByID: def it_returns_correct_article(self, client, sample_article, db_session): response = client.get( url_for('article.get_article_by_id', article_id=str(sample_article.id)), headers=[create_authorization_header()] ) assert response.status_code == 200 json_resp = json.loads(response.get_data(as_text=True)) assert json_resp['id'] == str(sample_article.id) class WhenPostingImportArticles(object): def it_creates_articles_for_imported_articles(self, client, db_session): response = client.post( url_for('articles.import_articles'), data=json.dumps(sample_articles), headers=[('Content-Type', 'application/json'), create_authorization_header()] ) assert response.status_code == 201 json_articles = json.loads(response.get_data(as_text=True))['articles'] assert len(json_articles) == len(sample_articles) for i in range(0, len(sample_articles) - 1): assert json_articles[i]["old_id"] == int(sample_articles[i]["id"]) assert json_articles[i]["title"] == sample_articles[i]["title"] assert json_articles[i]["author"] == sample_articles[i]["author"] assert json_articles[i]["content"] == sample_articles[i]["content"] assert json_articles[i]["created_at"] == sample_articles[i]["entrydate"] def it_does_not_create_article_for_imported_articles_with_duplicates(self, client, db_session): duplicate_article = { "id": "1", "title": "Forty Years Fighting Racism and Intolerance", "author": "<NAME>", "content": """<h2>A century with no solidarity</h2>\r\n One of the worst plagues that the twentieth century has had to \r\n bear is racial discrimination.""", "entrydate": "2015-11-01" }, sample_articles.extend(duplicate_article) response = client.post( url_for('articles.import_articles'), data=json.dumps(sample_articles), headers=[('Content-Type', 'application/json'), create_authorization_header()] ) assert response.status_code == 201 json_articles = json.loads(response.get_data(as_text=True))['articles'] assert len(json_articles) == len(sample_articles) - 1 # don't add in duplicate article for i in range(0, len(sample_articles) - 1): assert json_articles[i]["old_id"] == int(sample_articles[i]["id"]) assert json_articles[i]["title"] == sample_articles[i]["title"] assert json_articles[i]["author"] == sample_articles[i]["author"] assert json_articles[i]["content"] == sample_articles[i]["content"] assert json_articles[i]["created_at"] == sample_articles[i]["entrydate"] class WhenPostingUpdateArticle: def it_updates_an_article(self, client, db_session, sample_article): data = { 'title': 'Updated', 'image_filename': 'new_filename.jpg' } response = client.post( url_for('article.update_article_by_old_id', old_id=sample_article.old_id), data=json.dumps(data), headers=[('Content-Type', 'application/json'), create_authorization_header()] ) assert response.status_code == 200 assert response.json['image_filename'] == data['image_filename']
tests/app/routes/articles/test_rest.py
import pytest import uuid from flask import json, url_for from tests.conftest import create_authorization_header from tests.db import create_article sample_articles = [ { "id": "1", "title": "Forty Years Fighting Racism and Intolerance", "author": "<NAME>", "content": """<h2>A century with no solidarity</h2>\r\n One of the worst plagues that the twentieth century has had to \r\n bear is racial discrimination.""", "entrydate": "2015-11-01" }, { "id": "2", "title": "Modern Mythology", "author": "<NAME>", "content": """Despite their universal existence in all civilizations and all \r\ntimes of history, myths have often been scoffed at and regarded as old wives\u2019 \r\ntales.""", "entrydate": "2016-01-30" }, ] class WhenGettingArticles: def it_returns_all_articles(self, client, sample_article, db_session): response = client.get( url_for('articles.get_articles'), headers=[create_authorization_header()] ) assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert len(data) == 1 assert data[0]['id'] == str(sample_article.id) def it_returns_all_articles_summary(self, client, sample_article, db_session): response = client.get( url_for('articles.get_articles_summary'), headers=[create_authorization_header()] ) assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert len(data) == 1 assert data[0]['id'] == str(sample_article.id) def it_returns_up_to_4_articles_summary(self, client, sample_article, db_session): create_article(title='test 1') create_article(title='test 2') create_article(title='test 3') create_article(title='test 4') create_article(title='test 5') response = client.get( url_for('articles.get_articles_summary'), headers=[create_authorization_header()] ) assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert len(data) == 5 def it_returns_selected_article_summary(self, client, sample_article, db_session): article_1 = create_article(title='test 1') create_article(title='test 2') article_ids = "{},{}".format(sample_article.id, article_1.id) response = client.get( url_for('articles.get_articles_summary', ids=article_ids), headers=[create_authorization_header()] ) assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert len(data) == 2 assert set([str(sample_article.id), str(article_1.id)]) == set(article_ids.split(',')) class WhenGettingArticleByID: def it_returns_correct_article(self, client, sample_article, db_session): response = client.get( url_for('article.get_article_by_id', article_id=str(sample_article.id)), headers=[create_authorization_header()] ) assert response.status_code == 200 json_resp = json.loads(response.get_data(as_text=True)) assert json_resp['id'] == str(sample_article.id) class WhenPostingImportArticles(object): def it_creates_articles_for_imported_articles(self, client, db_session): response = client.post( url_for('articles.import_articles'), data=json.dumps(sample_articles), headers=[('Content-Type', 'application/json'), create_authorization_header()] ) assert response.status_code == 201 json_articles = json.loads(response.get_data(as_text=True))['articles'] assert len(json_articles) == len(sample_articles) for i in range(0, len(sample_articles) - 1): assert json_articles[i]["old_id"] == int(sample_articles[i]["id"]) assert json_articles[i]["title"] == sample_articles[i]["title"] assert json_articles[i]["author"] == sample_articles[i]["author"] assert json_articles[i]["content"] == sample_articles[i]["content"] assert json_articles[i]["created_at"] == sample_articles[i]["entrydate"] def it_does_not_create_article_for_imported_articles_with_duplicates(self, client, db_session): duplicate_article = { "id": "1", "title": "Forty Years Fighting Racism and Intolerance", "author": "<NAME>", "content": """<h2>A century with no solidarity</h2>\r\n One of the worst plagues that the twentieth century has had to \r\n bear is racial discrimination.""", "entrydate": "2015-11-01" }, sample_articles.extend(duplicate_article) response = client.post( url_for('articles.import_articles'), data=json.dumps(sample_articles), headers=[('Content-Type', 'application/json'), create_authorization_header()] ) assert response.status_code == 201 json_articles = json.loads(response.get_data(as_text=True))['articles'] assert len(json_articles) == len(sample_articles) - 1 # don't add in duplicate article for i in range(0, len(sample_articles) - 1): assert json_articles[i]["old_id"] == int(sample_articles[i]["id"]) assert json_articles[i]["title"] == sample_articles[i]["title"] assert json_articles[i]["author"] == sample_articles[i]["author"] assert json_articles[i]["content"] == sample_articles[i]["content"] assert json_articles[i]["created_at"] == sample_articles[i]["entrydate"] class WhenPostingUpdateArticle: def it_updates_an_article(self, client, db_session, sample_article): data = { 'title': 'Updated', 'image_filename': 'new_filename.jpg' } response = client.post( url_for('article.update_article_by_old_id', old_id=sample_article.old_id), data=json.dumps(data), headers=[('Content-Type', 'application/json'), create_authorization_header()] ) assert response.status_code == 200 assert response.json['image_filename'] == data['image_filename']
0.498535
0.47859
# This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import logging from abc import ABC from typing import Any, Dict, Tuple, Type import torch import tqdm from mmf.common.meter import Meter from mmf.common.report import Report from mmf.common.sample import to_device from mmf.utils.distributed import is_master logger = logging.getLogger(__name__) class TrainerEvaluationLoopMixin(ABC): def evaluation_loop( self, loader, use_tqdm: bool = False, single_batch: bool = False ) -> Tuple[Dict[str, Any], Type[Meter]]: meter = Meter() with torch.no_grad(): self.model.eval() disable_tqdm = not use_tqdm or not is_master() combined_report = None for batch in tqdm.tqdm(loader, disable=disable_tqdm): report = self._forward(batch) self.update_meter(report, meter) # accumulate necessary params for metric calculation if combined_report is None: combined_report = report else: combined_report.accumulate_tensor_fields( report, self.metrics.required_params ) combined_report.batch_size += report.batch_size if single_batch is True: break combined_report.metrics = self.metrics(combined_report, combined_report) self.update_meter(combined_report, meter, eval_mode=True) # enable train mode again self.model.train() _print_psnr_ssim_perc_sim(meter) return combined_report, meter def prediction_loop(self, dataset_type: str) -> None: reporter = self.dataset_loader.get_test_reporter(dataset_type) with torch.no_grad(): self.model.eval() logger.info(f"Starting {dataset_type} inference predictions") while reporter.next_dataset(): dataloader = reporter.get_dataloader() for batch in tqdm.tqdm(dataloader): prepared_batch = reporter.prepare_batch(batch) prepared_batch = to_device(prepared_batch, torch.device("cuda")) model_output = self.model(prepared_batch) report = Report(prepared_batch, model_output) reporter.add_to_report(report, self.model) logger.info("Finished predicting") self.model.train() def _print_psnr_ssim_perc_sim(meter): meters = meter.meters keys_to_print = [ "PSNR", "PSNR_InVis", "PSNR_Vis", "SSIM", "SSIM_InVis", "SSIM_Vis", "PercSim", "PercSim_InVis", "PercSim_Vis", ] key_print_list = [] val_print_list = [] for k_print in keys_to_print: for k, v in meters.items(): if k.endswith(k_print): key_print_list.append(k) val_print_list.append(f"{v.global_avg:.4f}") if len(key_print_list) > 0: print('\n') print('-' * 80) print('copy-paste metrics:') print(','.join(key_print_list)) print(','.join(val_print_list)) print('-' * 80) print('\n')
mmf/trainers/core/evaluation_loop.py
# This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import logging from abc import ABC from typing import Any, Dict, Tuple, Type import torch import tqdm from mmf.common.meter import Meter from mmf.common.report import Report from mmf.common.sample import to_device from mmf.utils.distributed import is_master logger = logging.getLogger(__name__) class TrainerEvaluationLoopMixin(ABC): def evaluation_loop( self, loader, use_tqdm: bool = False, single_batch: bool = False ) -> Tuple[Dict[str, Any], Type[Meter]]: meter = Meter() with torch.no_grad(): self.model.eval() disable_tqdm = not use_tqdm or not is_master() combined_report = None for batch in tqdm.tqdm(loader, disable=disable_tqdm): report = self._forward(batch) self.update_meter(report, meter) # accumulate necessary params for metric calculation if combined_report is None: combined_report = report else: combined_report.accumulate_tensor_fields( report, self.metrics.required_params ) combined_report.batch_size += report.batch_size if single_batch is True: break combined_report.metrics = self.metrics(combined_report, combined_report) self.update_meter(combined_report, meter, eval_mode=True) # enable train mode again self.model.train() _print_psnr_ssim_perc_sim(meter) return combined_report, meter def prediction_loop(self, dataset_type: str) -> None: reporter = self.dataset_loader.get_test_reporter(dataset_type) with torch.no_grad(): self.model.eval() logger.info(f"Starting {dataset_type} inference predictions") while reporter.next_dataset(): dataloader = reporter.get_dataloader() for batch in tqdm.tqdm(dataloader): prepared_batch = reporter.prepare_batch(batch) prepared_batch = to_device(prepared_batch, torch.device("cuda")) model_output = self.model(prepared_batch) report = Report(prepared_batch, model_output) reporter.add_to_report(report, self.model) logger.info("Finished predicting") self.model.train() def _print_psnr_ssim_perc_sim(meter): meters = meter.meters keys_to_print = [ "PSNR", "PSNR_InVis", "PSNR_Vis", "SSIM", "SSIM_InVis", "SSIM_Vis", "PercSim", "PercSim_InVis", "PercSim_Vis", ] key_print_list = [] val_print_list = [] for k_print in keys_to_print: for k, v in meters.items(): if k.endswith(k_print): key_print_list.append(k) val_print_list.append(f"{v.global_avg:.4f}") if len(key_print_list) > 0: print('\n') print('-' * 80) print('copy-paste metrics:') print(','.join(key_print_list)) print(','.join(val_print_list)) print('-' * 80) print('\n')
0.857932
0.189784
def check_empty(value, default='', post=', '): if value is None: return default elif value == '': return default elif value == ' ': return default else: return "{}{}".format(value, post) status_lookup = {'AC': 'Active', 'CN': 'Cancelled', 'CO': 'Converted Out', 'CS': 'Consolidation', 'CV': 'Converted', 'D': 'Dissolved', 'DS': 'Unknown', 'ER': 'Expired Reservation', 'EX': 'Expired', 'FF': 'Forfeited', 'M': 'Merged', 'PC': 'Pending Conversion', 'PM': 'Pending Merger', 'RC': 'Reserved Cancel', 'RD': 'Redomesticated', 'RE': 'Recorded', 'RG': 'Registered', 'RN': 'Renunciated', 'RS': 'Reserved', 'RV': 'Revoked', 'W': 'W Second', 'WD': 'Withdrawn'} subtype_lookup = {' ': 'Unknown', 'B': 'Benefit Corporation', 'C': 'Corporation', 'D': 'Domestic Limited Partnership', 'F': 'Foreign Limited Partnership', 'G': 'Domestic Limited Liability Company', 'H': 'Foreign Limited Liability Company', 'I': 'Domestic Limited Liability Partnership', 'J': 'Foreign Limited Liability Partnership', 'K': 'General Partnership', 'L': 'Domestic Statutory Trust', 'M': 'Foreign Statutory Trust', 'O': 'Other', 'P': 'Domestic Stock Corporation', 'Q': 'Foreign Stock Corporation', 'R': 'Domestic Non-Stock Corporation', 'S': 'Foreign Non-Stock Corporation', 'T': 'All Entities', 'U': 'Domestic Credit Union Stock', 'V': 'Domestic Credit Union Non-Stock', 'W': 'Domestic Bank Stock', 'X': 'Domestic Bank Non-Stock', 'Y': 'Domestic Insurance Stock', 'Z': 'Domestic Insurance Non-Stock'} corp_type_lookup = { '': None, 'B': 'Benefit', 'S': 'Stock', 'N': 'Non-Stock' } origin_lookup = { 'R': 'Regular', 'S': 'Special Charter' } category_lookup = { "BK": "Bank", "CU": "Credit Union", "IN": "Insurance", "R": "Religious", "C": "Cemetery" }
web/sots/helpers.py
def check_empty(value, default='', post=', '): if value is None: return default elif value == '': return default elif value == ' ': return default else: return "{}{}".format(value, post) status_lookup = {'AC': 'Active', 'CN': 'Cancelled', 'CO': 'Converted Out', 'CS': 'Consolidation', 'CV': 'Converted', 'D': 'Dissolved', 'DS': 'Unknown', 'ER': 'Expired Reservation', 'EX': 'Expired', 'FF': 'Forfeited', 'M': 'Merged', 'PC': 'Pending Conversion', 'PM': 'Pending Merger', 'RC': 'Reserved Cancel', 'RD': 'Redomesticated', 'RE': 'Recorded', 'RG': 'Registered', 'RN': 'Renunciated', 'RS': 'Reserved', 'RV': 'Revoked', 'W': 'W Second', 'WD': 'Withdrawn'} subtype_lookup = {' ': 'Unknown', 'B': 'Benefit Corporation', 'C': 'Corporation', 'D': 'Domestic Limited Partnership', 'F': 'Foreign Limited Partnership', 'G': 'Domestic Limited Liability Company', 'H': 'Foreign Limited Liability Company', 'I': 'Domestic Limited Liability Partnership', 'J': 'Foreign Limited Liability Partnership', 'K': 'General Partnership', 'L': 'Domestic Statutory Trust', 'M': 'Foreign Statutory Trust', 'O': 'Other', 'P': 'Domestic Stock Corporation', 'Q': 'Foreign Stock Corporation', 'R': 'Domestic Non-Stock Corporation', 'S': 'Foreign Non-Stock Corporation', 'T': 'All Entities', 'U': 'Domestic Credit Union Stock', 'V': 'Domestic Credit Union Non-Stock', 'W': 'Domestic Bank Stock', 'X': 'Domestic Bank Non-Stock', 'Y': 'Domestic Insurance Stock', 'Z': 'Domestic Insurance Non-Stock'} corp_type_lookup = { '': None, 'B': 'Benefit', 'S': 'Stock', 'N': 'Non-Stock' } origin_lookup = { 'R': 'Regular', 'S': 'Special Charter' } category_lookup = { "BK": "Bank", "CU": "Credit Union", "IN": "Insurance", "R": "Religious", "C": "Cemetery" }
0.524151
0.246715
import numpy as np import cv2 import glob import tqdm import multiprocessing import random import orderedset class Encoder: """ A simple class for one hot encoding and decoding labels. """ def __init__(self, labels): """ :param labels: A complete list of labels to be used to define one-hot encoding. Position in list corresponds to position in encoding. :return: """ self.labels = labels def encode(self, label): """ Given a label, return a one-hot encoding for that label :param label: :return: a one-hot encoded vector """ encoding = np.zeros([len(self.labels), 1]) index = self.labels.index(label) encoding[index] = 1 return encoding def decode(self, encoding): """ Given encoding, return a label it represents :param encoding: one-hot encoded vector :return: label encoding represents """ index = np.argmax(encoding) return self.labels[index] def get_images(path): """ Given a path, return a list of grayscale images found at that path :param path: Path to images :return: A list of grayscale images """ images_paths = glob.glob(path + "/*.jpg") images = [cv2.imread(image_path) for image_path in images_paths] return [cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in images] def get_data_dictionary(base_path, labels): """ Given a base path and a list of labels, return a data dictionary. Base path is assumed to contain subdirectories, one subdirectory for each label. Each subdirectory is named after the label and contains jpg images with data for that label. :param base_path: Parent directory for all labels data :param labels: A list of labels for which data should be read :return: A dictionary of form {label : images list} """ print("Loading data") with multiprocessing.Pool() as pool: # Get a list of futures queued on the pool results = [pool.apply_async(get_images, (base_path + label + "/",)) for label in labels] # Build a results dictionary using labels as key and results of futures, which are # evaluated to lists of data for that label, as values. Wrap iteration over labels in tqdm # to add a printed progress bar to terminal output data_dictionary = {label: result.get() for label, result in zip(tqdm.tqdm(labels), results)} return data_dictionary def get_training_test_data_split(data_dictionary, split_ratio): """ Given a data dictionary of structure {label : images list}, return a training list and a test list. Each list element is made up of (image, label) tuple. Split ratio determines ratio of data samples that are put into training set. Remaining items are put into test set. :param data_dictionary: :param split_ratio: :return: """ training_data = [] test_data = [] for label, images in data_dictionary.items(): random.shuffle(images) training_size = int(len(images) * split_ratio) data = [(image, label) for image in images] training_data.extend(data[:training_size]) test_data.extend(data[training_size:]) return training_data, test_data def get_data_batches(data, batch_size): """ Given a data in a list, return a list of batches, each batch of size batch_size. If len(data) doesn't divide evenly by batch_size, leftover items are not returned. :param data: list of data items :param batch_size: size or return batches :return: list of batches, each batch a list of data elements """ last_batch_start_index = int(len(data) / batch_size) * batch_size batched_data = [data[index: index + batch_size] for index in range(0, last_batch_start_index, batch_size)] return batched_data def data_tuples_to_matrices(data): """ Given a list of tuples, where each tuple is made up of numpy column vectors, convert them to matrices, such that corresponding elements from each tuple are laid out in matrices columns-wise. E.g. if data is a 5-elements list of 10x1 and 20x1 tuples, the result is a single tuple of 10x5 and 20x5 matrices. :param data: a list of tuples. Each tuple contains numpy column vectors :return: a list of 2D numpy arrays. """ data_length = len(data) matrices_count = len(data[0]) matrices = [] for matrix_index in range(matrices_count): # Our reference for matrix size and type vector_length = data[0][matrix_index].shape[0] vector_type = data[0][matrix_index].dtype # Allocate matrix matrix = np.zeros(shape=[vector_length, data_length]).astype(vector_type) for data_index in range(data_length): matrix[:, data_index] = data[data_index][matrix_index].reshape([vector_length]) matrices.append(matrix) return matrices def sigmoid(z): try: # Do a bit of acrobatics to ensure we don't compute values that lead to division # by infinity. For inputs that would lead to that, simply return 0 output = np.zeros(z.shape) indices = np.where(-50 < z) output[indices] = 1 / (1 + np.exp(-z[indices])) return output except RuntimeWarning as problem: print("Runtime warning occurred in sigmoid for input") print(problem) print(z) print("that gives output") print(output) exit(0) def sigmoid_prime(z): return sigmoid(z) * (1 - sigmoid(z)) def softmax(z): try: # Clip values to sensible range for numerical stability clipped = np.clip(z, -50, 50) return np.exp(clipped) / np.sum(np.exp(clipped), axis=0) except RuntimeWarning as problem: print("Runtime warning occurred in softmax") print(problem) print("For input") print(z) exit(0) def relu(z): return z * (z > 0) def relu_prime(z): return (z > 0).astype(np.float32) def remove_visually_identical_characters(characters): """ Some Japanese characters, especially some hiragana and katakana, look the same. So if our set contains both, remove one of them :param characters: :return: characters with visually identical doubles removed and ordered of characters preserver """ characters = list(orderedset.OrderedSet(characters)) # There characters have different unicodes, but visually they are identical identical_characters_list = [ ['ぺ', 'ペ'], ['ベ', 'べ'], ['ヘ', 'へ'], ] for identical_characters in identical_characters_list: try: first_index = characters.index(identical_characters[0]) second_index = characters.index(identical_characters[1]) index_to_pop = first_index if first_index > second_index else second_index characters.pop(index_to_pop) except ValueError: # It's okay if element doesn't exist in the list. # In fact it means we have no duplicates to remove pass return characters
net/utilities.py
import numpy as np import cv2 import glob import tqdm import multiprocessing import random import orderedset class Encoder: """ A simple class for one hot encoding and decoding labels. """ def __init__(self, labels): """ :param labels: A complete list of labels to be used to define one-hot encoding. Position in list corresponds to position in encoding. :return: """ self.labels = labels def encode(self, label): """ Given a label, return a one-hot encoding for that label :param label: :return: a one-hot encoded vector """ encoding = np.zeros([len(self.labels), 1]) index = self.labels.index(label) encoding[index] = 1 return encoding def decode(self, encoding): """ Given encoding, return a label it represents :param encoding: one-hot encoded vector :return: label encoding represents """ index = np.argmax(encoding) return self.labels[index] def get_images(path): """ Given a path, return a list of grayscale images found at that path :param path: Path to images :return: A list of grayscale images """ images_paths = glob.glob(path + "/*.jpg") images = [cv2.imread(image_path) for image_path in images_paths] return [cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in images] def get_data_dictionary(base_path, labels): """ Given a base path and a list of labels, return a data dictionary. Base path is assumed to contain subdirectories, one subdirectory for each label. Each subdirectory is named after the label and contains jpg images with data for that label. :param base_path: Parent directory for all labels data :param labels: A list of labels for which data should be read :return: A dictionary of form {label : images list} """ print("Loading data") with multiprocessing.Pool() as pool: # Get a list of futures queued on the pool results = [pool.apply_async(get_images, (base_path + label + "/",)) for label in labels] # Build a results dictionary using labels as key and results of futures, which are # evaluated to lists of data for that label, as values. Wrap iteration over labels in tqdm # to add a printed progress bar to terminal output data_dictionary = {label: result.get() for label, result in zip(tqdm.tqdm(labels), results)} return data_dictionary def get_training_test_data_split(data_dictionary, split_ratio): """ Given a data dictionary of structure {label : images list}, return a training list and a test list. Each list element is made up of (image, label) tuple. Split ratio determines ratio of data samples that are put into training set. Remaining items are put into test set. :param data_dictionary: :param split_ratio: :return: """ training_data = [] test_data = [] for label, images in data_dictionary.items(): random.shuffle(images) training_size = int(len(images) * split_ratio) data = [(image, label) for image in images] training_data.extend(data[:training_size]) test_data.extend(data[training_size:]) return training_data, test_data def get_data_batches(data, batch_size): """ Given a data in a list, return a list of batches, each batch of size batch_size. If len(data) doesn't divide evenly by batch_size, leftover items are not returned. :param data: list of data items :param batch_size: size or return batches :return: list of batches, each batch a list of data elements """ last_batch_start_index = int(len(data) / batch_size) * batch_size batched_data = [data[index: index + batch_size] for index in range(0, last_batch_start_index, batch_size)] return batched_data def data_tuples_to_matrices(data): """ Given a list of tuples, where each tuple is made up of numpy column vectors, convert them to matrices, such that corresponding elements from each tuple are laid out in matrices columns-wise. E.g. if data is a 5-elements list of 10x1 and 20x1 tuples, the result is a single tuple of 10x5 and 20x5 matrices. :param data: a list of tuples. Each tuple contains numpy column vectors :return: a list of 2D numpy arrays. """ data_length = len(data) matrices_count = len(data[0]) matrices = [] for matrix_index in range(matrices_count): # Our reference for matrix size and type vector_length = data[0][matrix_index].shape[0] vector_type = data[0][matrix_index].dtype # Allocate matrix matrix = np.zeros(shape=[vector_length, data_length]).astype(vector_type) for data_index in range(data_length): matrix[:, data_index] = data[data_index][matrix_index].reshape([vector_length]) matrices.append(matrix) return matrices def sigmoid(z): try: # Do a bit of acrobatics to ensure we don't compute values that lead to division # by infinity. For inputs that would lead to that, simply return 0 output = np.zeros(z.shape) indices = np.where(-50 < z) output[indices] = 1 / (1 + np.exp(-z[indices])) return output except RuntimeWarning as problem: print("Runtime warning occurred in sigmoid for input") print(problem) print(z) print("that gives output") print(output) exit(0) def sigmoid_prime(z): return sigmoid(z) * (1 - sigmoid(z)) def softmax(z): try: # Clip values to sensible range for numerical stability clipped = np.clip(z, -50, 50) return np.exp(clipped) / np.sum(np.exp(clipped), axis=0) except RuntimeWarning as problem: print("Runtime warning occurred in softmax") print(problem) print("For input") print(z) exit(0) def relu(z): return z * (z > 0) def relu_prime(z): return (z > 0).astype(np.float32) def remove_visually_identical_characters(characters): """ Some Japanese characters, especially some hiragana and katakana, look the same. So if our set contains both, remove one of them :param characters: :return: characters with visually identical doubles removed and ordered of characters preserver """ characters = list(orderedset.OrderedSet(characters)) # There characters have different unicodes, but visually they are identical identical_characters_list = [ ['ぺ', 'ペ'], ['ベ', 'べ'], ['ヘ', 'へ'], ] for identical_characters in identical_characters_list: try: first_index = characters.index(identical_characters[0]) second_index = characters.index(identical_characters[1]) index_to_pop = first_index if first_index > second_index else second_index characters.pop(index_to_pop) except ValueError: # It's okay if element doesn't exist in the list. # In fact it means we have no duplicates to remove pass return characters
0.783782
0.601974
import random from typing import List, Optional from interfaces.SentenceOperation import SentenceOperation from tasks.TaskTypes import TaskType class SpeechDisfluencyPerturbation(SentenceOperation): tasks: List[TaskType] = [ TaskType.TEXT_CLASSIFICATION, TaskType.TEXT_TO_TEXT_GENERATION, ] languages: List[str] = ["en"] keywords = [ "lexical", "noise", "rule-based", "highly-meaning-preserving", "high-precision", ] filler_words: List[str] = ["um", "uh", "erm", "ah", "er"] insert_prob: float def __init__( self, seed: int = 0, max_outputs: int = 1, insert_prob: float = 0.2, filler_words: Optional[List[str]] = None, ) -> None: super().__init__(seed, max_outputs=max_outputs) self.insert_prob = insert_prob if filler_words is not None: self.filler_words = filler_words def generate(self, sentence: str) -> List[str]: random.seed(self.seed) perturbed_texts = [] for _ in range(self.max_outputs): tokens = sentence.split() # In the case of very short inputs, allow for the addition of # tokens before/after the first/last token respectively. short_input = len(tokens) <= 1 if short_input: tokens = [None] + tokens + [None] choices = [] while not choices: for i in range(1, len(tokens)): if random.random() < self.insert_prob: choices.append(i) choices = choices[::-1] for i in choices: tokens.insert(i, random.choice(self.filler_words)) if short_input: tokens = tokens[1:-1] perturbed_texts.append(" ".join(tokens)) return perturbed_texts """ # Sample code to demonstrate usage. if __name__ == "__main__": import json from TestRunner import convert_to_snake_case tf = SpeechDisfluencyPerturbation(max_outputs=1) test_cases = [] for sentence in [ "Andrew finally returned the French book to Chris that I bought last week", "Sentences with gapping, such as Paul likes coffee and Mary tea, lack an overt predicate to indicate the relation between two or more arguments.", "Alice in Wonderland is a 2010 American live-action/animated dark fantasy adventure film", "Ujjal Dev Dosanjh served as 33rd Premier of British Columbia from 2000 to 2001", "Neuroplasticity is a continuous processing allowing short-term, medium-term, and long-term remodeling of the neuronosynaptic organization.", "Yes", "Of course", "", ]: test_cases.append( { "class": tf.name(), "inputs": {"sentence": sentence}, "outputs": [{"sentence": o} for o in tf.generate(sentence)], } ) for filler_words, sentence in [ (["oof", "agh"], "Where did you learn how to drive again?"), (["eek"], "I'm deathly afraid of mice!"), (["hmph", "ahem", "wheeze"], "I've had a sore throat all week."), ]: tf2 = SpeechDisfluencyPerturbation( max_outputs=1, filler_words=filler_words ) test_cases.append( { "class": tf2.name(), "args": {"filler_words": filler_words}, "inputs": {"sentence": sentence}, "outputs": [{"sentence": o} for o in tf2.generate(sentence)], } ) json_file = { "type": convert_to_snake_case(tf.name()), "test_cases": test_cases, } print(json.dumps(json_file, indent=2)) """
transformations/speech_disfluency_perturbation/transformation.py
import random from typing import List, Optional from interfaces.SentenceOperation import SentenceOperation from tasks.TaskTypes import TaskType class SpeechDisfluencyPerturbation(SentenceOperation): tasks: List[TaskType] = [ TaskType.TEXT_CLASSIFICATION, TaskType.TEXT_TO_TEXT_GENERATION, ] languages: List[str] = ["en"] keywords = [ "lexical", "noise", "rule-based", "highly-meaning-preserving", "high-precision", ] filler_words: List[str] = ["um", "uh", "erm", "ah", "er"] insert_prob: float def __init__( self, seed: int = 0, max_outputs: int = 1, insert_prob: float = 0.2, filler_words: Optional[List[str]] = None, ) -> None: super().__init__(seed, max_outputs=max_outputs) self.insert_prob = insert_prob if filler_words is not None: self.filler_words = filler_words def generate(self, sentence: str) -> List[str]: random.seed(self.seed) perturbed_texts = [] for _ in range(self.max_outputs): tokens = sentence.split() # In the case of very short inputs, allow for the addition of # tokens before/after the first/last token respectively. short_input = len(tokens) <= 1 if short_input: tokens = [None] + tokens + [None] choices = [] while not choices: for i in range(1, len(tokens)): if random.random() < self.insert_prob: choices.append(i) choices = choices[::-1] for i in choices: tokens.insert(i, random.choice(self.filler_words)) if short_input: tokens = tokens[1:-1] perturbed_texts.append(" ".join(tokens)) return perturbed_texts """ # Sample code to demonstrate usage. if __name__ == "__main__": import json from TestRunner import convert_to_snake_case tf = SpeechDisfluencyPerturbation(max_outputs=1) test_cases = [] for sentence in [ "Andrew finally returned the French book to Chris that I bought last week", "Sentences with gapping, such as Paul likes coffee and Mary tea, lack an overt predicate to indicate the relation between two or more arguments.", "Alice in Wonderland is a 2010 American live-action/animated dark fantasy adventure film", "Ujjal Dev Dosanjh served as 33rd Premier of British Columbia from 2000 to 2001", "Neuroplasticity is a continuous processing allowing short-term, medium-term, and long-term remodeling of the neuronosynaptic organization.", "Yes", "Of course", "", ]: test_cases.append( { "class": tf.name(), "inputs": {"sentence": sentence}, "outputs": [{"sentence": o} for o in tf.generate(sentence)], } ) for filler_words, sentence in [ (["oof", "agh"], "Where did you learn how to drive again?"), (["eek"], "I'm deathly afraid of mice!"), (["hmph", "ahem", "wheeze"], "I've had a sore throat all week."), ]: tf2 = SpeechDisfluencyPerturbation( max_outputs=1, filler_words=filler_words ) test_cases.append( { "class": tf2.name(), "args": {"filler_words": filler_words}, "inputs": {"sentence": sentence}, "outputs": [{"sentence": o} for o in tf2.generate(sentence)], } ) json_file = { "type": convert_to_snake_case(tf.name()), "test_cases": test_cases, } print(json.dumps(json_file, indent=2)) """
0.722918
0.371992
from View.ViewData import ViewData from View.ViewGraph import ViewGraph from View.ViewConsole import ViewConsole from View.ViewCurvefit import ViewCurvefit from View.ViewTitle import ViewTitle from Model.Graphic import Graphic from Model.Curvefit import Curvefit from Model.DataExtractor import DataExtractor from PyQt5.QtWidgets import QMainWindow, QTabWidget from PyQt5 import uic import os MainWindowPath = os.path.dirname(os.path.realpath(__file__)) + '/ui{}MainWindow.ui'.format(os.sep) Ui_MainWindow, QtBaseClass = uic.loadUiType(MainWindowPath) class MainWindow(QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setupUi(self) self.modelGraphic = Graphic() self.modelData = DataExtractor() self.modelCurvefit = Curvefit() self.createsComponentsAndPointers() self.setupWindowTabs() def setupWindowTabs(self): #01 self.tabWidget = QTabWidget() self.setCentralWidget(self.tabWidget) self.tabWidget.addTab(self.dataView, "Data") self.tabWidget.addTab(self.graphView, "Graph") self.tabWidget.addTab(self.curvefitView, "Curvefit") self.tabWidget.addTab(self.titleView, "Title") self.tabWidget.addTab(self.consoleView, "Console") def createsComponentsAndPointers(self): #02 # Components self.graphView = ViewGraph(self.modelGraphic, self.modelData) self.consoleView = ViewConsole() self.dataView = ViewData(self.modelData) self.curvefitView = ViewCurvefit(self.modelGraphic, self.modelData, self.modelCurvefit) self.titleView = ViewTitle(self.modelGraphic, self.modelData) # Pointers self.curvefitView.consoleView = self.consoleView self.titleView.consoleView = self.consoleView self.graphView.consoleView = self.consoleView self.graphView.curvefitView = self.curvefitView self.graphView.titleView = self.titleView self.dataView.consoleView = self.consoleView self.dataView.graphView = self.graphView self.dataView.curvefitView = self.curvefitView
View/mainWindow.py
from View.ViewData import ViewData from View.ViewGraph import ViewGraph from View.ViewConsole import ViewConsole from View.ViewCurvefit import ViewCurvefit from View.ViewTitle import ViewTitle from Model.Graphic import Graphic from Model.Curvefit import Curvefit from Model.DataExtractor import DataExtractor from PyQt5.QtWidgets import QMainWindow, QTabWidget from PyQt5 import uic import os MainWindowPath = os.path.dirname(os.path.realpath(__file__)) + '/ui{}MainWindow.ui'.format(os.sep) Ui_MainWindow, QtBaseClass = uic.loadUiType(MainWindowPath) class MainWindow(QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setupUi(self) self.modelGraphic = Graphic() self.modelData = DataExtractor() self.modelCurvefit = Curvefit() self.createsComponentsAndPointers() self.setupWindowTabs() def setupWindowTabs(self): #01 self.tabWidget = QTabWidget() self.setCentralWidget(self.tabWidget) self.tabWidget.addTab(self.dataView, "Data") self.tabWidget.addTab(self.graphView, "Graph") self.tabWidget.addTab(self.curvefitView, "Curvefit") self.tabWidget.addTab(self.titleView, "Title") self.tabWidget.addTab(self.consoleView, "Console") def createsComponentsAndPointers(self): #02 # Components self.graphView = ViewGraph(self.modelGraphic, self.modelData) self.consoleView = ViewConsole() self.dataView = ViewData(self.modelData) self.curvefitView = ViewCurvefit(self.modelGraphic, self.modelData, self.modelCurvefit) self.titleView = ViewTitle(self.modelGraphic, self.modelData) # Pointers self.curvefitView.consoleView = self.consoleView self.titleView.consoleView = self.consoleView self.graphView.consoleView = self.consoleView self.graphView.curvefitView = self.curvefitView self.graphView.titleView = self.titleView self.dataView.consoleView = self.consoleView self.dataView.graphView = self.graphView self.dataView.curvefitView = self.curvefitView
0.500244
0.097133
import mxnet as mx import net_symbols as syms def create_block(data, name, num_filter, kernel, pad=0, stride=1, dilate=1, workspace=512, use_global_stats=True, lr_type="alex"): res = syms.conv(data=data, name="res" + name, num_filter=num_filter, pad=pad, kernel=kernel, stride=stride, dilate=dilate, no_bias=True, workspace=workspace, lr_type=lr_type) bn = syms.bn(res, name="bn" + name, use_global_stats=use_global_stats, lr_type=lr_type) return bn def create_big_block(data, name, num_filter1, num_filter2, stride=1, dilate=1, pad=1, identity_map=True, workspace=512, use_global_stats=True, lr_type="alex"): blocka = create_block(data, name=name+"_branch2a", num_filter=num_filter1, kernel=1, stride=stride, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) relu1 = syms.relu(blocka) blockb = create_block(relu1, name=name + "_branch2b", num_filter=num_filter1, kernel=3, dilate=dilate, pad=pad, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) relu2 = syms.relu(blockb) blockc = create_block(relu2, name=name+"_branch2c", num_filter=num_filter2, kernel=1, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) if identity_map: return syms.relu(data+blockc) else: branch1 = create_block(data, name=name+"_branch1", num_filter=num_filter2, kernel=1, stride=stride, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) return syms.relu(branch1+blockc) def create_part1(data, workspace=512, use_global_stats=True, lr_type="alex"): data = mx.symbol.Variable(name="data") conv1 = syms.conv(data, name="conv1", num_filter=64, pad=3, kernel=7, stride=2, workspace=workspace, lr_type=lr_type) bn = syms.bn(conv1, name="bn_conv1", use_global_stats=use_global_stats, lr_type=lr_type) relu = syms.relu(bn) pool1 = syms.maxpool(relu, kernel=3, stride=2, pad=1) res2a = create_big_block(pool1, "2a", 64, 256, identity_map=False, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res2b = create_big_block(res2a, "2b", 64, 256, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res2c = create_big_block(res2b, "2c", 64, 256, workspace=workspace, use_global_stats=use_global_stats) res3a = create_big_block(res2c, "3a", 128, 512, stride=2,identity_map=False, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res3b = create_big_block(res3a, "3b", 128, 512, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) return res3b def create_part2(data, num_class, workspace=512, use_global_stats=True, lr_type="alex"): res3c = create_big_block(data, "3c", 128, 512, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res3d = create_big_block(res3c, "3d", 128, 512, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4a = create_big_block(res3d, "4a", 256, 1024, stride=2, identity_map=False, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4b = create_big_block(res4a, "4b", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4c = create_big_block(res4b, "4c", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4d = create_big_block(res4c, "4d", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4e = create_big_block(res4d, "4e", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4f = create_big_block(res4e, "4f", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res5a = create_big_block(res4f, "5a", 512, 2048, stride=2, identity_map=False, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res5b = create_big_block(res5a, "5b", 512, 2048, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res5c = create_big_block(res5b, "5c", 512, 2048, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) adapt = syms.conv(res5c, "adapt", 512, kernel=1, lr_type="alex10", workspace=workspace) adapt_relu = syms.relu(adapt) fl = mx.sym.flatten(adapt_relu) fc6 = syms.fullyconnected(fl, num_hidden=1024, name="CAMfc6", lr_type="alex10") relu6 = syms.relu(fc6) # group7 fc7 = syms.fullyconnected(relu6, num_hidden=1024, name="CAMfc7", lr_type="alex10") relu7 = syms.relu(fc7) fc8 = syms.fullyconnected(relu7, num_hidden=num_class, name="CAMfc8", lr_type="alex10") return fc8 def create_train(num_class, workspace=512): label = mx.sym.Variable("label") data = mx.sym.Variable("data") part1 = create_part1(data, workspace=workspace) part2 = create_part2(part1, num_class, workspace=workspace) score = mx.sym.LogisticRegressionOutput(part2, label=label) return score
cores/symbols/bg_cue_resnet50.py
import mxnet as mx import net_symbols as syms def create_block(data, name, num_filter, kernel, pad=0, stride=1, dilate=1, workspace=512, use_global_stats=True, lr_type="alex"): res = syms.conv(data=data, name="res" + name, num_filter=num_filter, pad=pad, kernel=kernel, stride=stride, dilate=dilate, no_bias=True, workspace=workspace, lr_type=lr_type) bn = syms.bn(res, name="bn" + name, use_global_stats=use_global_stats, lr_type=lr_type) return bn def create_big_block(data, name, num_filter1, num_filter2, stride=1, dilate=1, pad=1, identity_map=True, workspace=512, use_global_stats=True, lr_type="alex"): blocka = create_block(data, name=name+"_branch2a", num_filter=num_filter1, kernel=1, stride=stride, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) relu1 = syms.relu(blocka) blockb = create_block(relu1, name=name + "_branch2b", num_filter=num_filter1, kernel=3, dilate=dilate, pad=pad, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) relu2 = syms.relu(blockb) blockc = create_block(relu2, name=name+"_branch2c", num_filter=num_filter2, kernel=1, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) if identity_map: return syms.relu(data+blockc) else: branch1 = create_block(data, name=name+"_branch1", num_filter=num_filter2, kernel=1, stride=stride, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) return syms.relu(branch1+blockc) def create_part1(data, workspace=512, use_global_stats=True, lr_type="alex"): data = mx.symbol.Variable(name="data") conv1 = syms.conv(data, name="conv1", num_filter=64, pad=3, kernel=7, stride=2, workspace=workspace, lr_type=lr_type) bn = syms.bn(conv1, name="bn_conv1", use_global_stats=use_global_stats, lr_type=lr_type) relu = syms.relu(bn) pool1 = syms.maxpool(relu, kernel=3, stride=2, pad=1) res2a = create_big_block(pool1, "2a", 64, 256, identity_map=False, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res2b = create_big_block(res2a, "2b", 64, 256, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res2c = create_big_block(res2b, "2c", 64, 256, workspace=workspace, use_global_stats=use_global_stats) res3a = create_big_block(res2c, "3a", 128, 512, stride=2,identity_map=False, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res3b = create_big_block(res3a, "3b", 128, 512, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) return res3b def create_part2(data, num_class, workspace=512, use_global_stats=True, lr_type="alex"): res3c = create_big_block(data, "3c", 128, 512, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res3d = create_big_block(res3c, "3d", 128, 512, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4a = create_big_block(res3d, "4a", 256, 1024, stride=2, identity_map=False, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4b = create_big_block(res4a, "4b", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4c = create_big_block(res4b, "4c", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4d = create_big_block(res4c, "4d", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4e = create_big_block(res4d, "4e", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res4f = create_big_block(res4e, "4f", 256, 1024, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res5a = create_big_block(res4f, "5a", 512, 2048, stride=2, identity_map=False, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res5b = create_big_block(res5a, "5b", 512, 2048, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) res5c = create_big_block(res5b, "5c", 512, 2048, workspace=workspace, use_global_stats=use_global_stats, lr_type=lr_type) adapt = syms.conv(res5c, "adapt", 512, kernel=1, lr_type="alex10", workspace=workspace) adapt_relu = syms.relu(adapt) fl = mx.sym.flatten(adapt_relu) fc6 = syms.fullyconnected(fl, num_hidden=1024, name="CAMfc6", lr_type="alex10") relu6 = syms.relu(fc6) # group7 fc7 = syms.fullyconnected(relu6, num_hidden=1024, name="CAMfc7", lr_type="alex10") relu7 = syms.relu(fc7) fc8 = syms.fullyconnected(relu7, num_hidden=num_class, name="CAMfc8", lr_type="alex10") return fc8 def create_train(num_class, workspace=512): label = mx.sym.Variable("label") data = mx.sym.Variable("data") part1 = create_part1(data, workspace=workspace) part2 = create_part2(part1, num_class, workspace=workspace) score = mx.sym.LogisticRegressionOutput(part2, label=label) return score
0.373304
0.234779