ngram
listlengths
0
67.8k
[ "> temp: arr[index] = arr[index - 1] index -= 1 arr[index] = temp", "in range(l + 1, r + 1): temp = arr[i] index = i", "0 and arr[index - 1] > temp: arr[index] = arr[index - 1] index", "- 1] > temp: arr[index] = arr[index - 1] index -= 1 arr[index]", "and arr[index - 1] > temp: arr[index] = arr[index - 1] index -=", "arr[i] index = i while index > 0 and arr[index - 1] >", "= arr[i] index = i while index > 0 and arr[index - 1]", "index > 0 and arr[index - 1] > temp: arr[index] = arr[index -", "= i while index > 0 and arr[index - 1] > temp: arr[index]", "temp = arr[i] index = i while index > 0 and arr[index -", "+ 1): temp = arr[i] index = i while index > 0 and", "while index > 0 and arr[index - 1] > temp: arr[index] = arr[index", "1] > temp: arr[index] = arr[index - 1] index -= 1 arr[index] =", "l, r): for i in range(l + 1, r + 1): temp =", "r): for i in range(l + 1, r + 1): temp = arr[i]", "index = i while index > 0 and arr[index - 1] > temp:", "+ 1, r + 1): temp = arr[i] index = i while index", "for i in range(l + 1, r + 1): temp = arr[i] index", "range(l + 1, r + 1): temp = arr[i] index = i while", "insertion_sort(arr, l, r): for i in range(l + 1, r + 1): temp", "arr[index - 1] > temp: arr[index] = arr[index - 1] index -= 1", "1, r + 1): temp = arr[i] index = i while index >", "r + 1): temp = arr[i] index = i while index > 0", "1): temp = arr[i] index = i while index > 0 and arr[index", "i while index > 0 and arr[index - 1] > temp: arr[index] =", "> 0 and arr[index - 1] > temp: arr[index] = arr[index - 1]", "def insertion_sort(arr, l, r): for i in range(l + 1, r + 1):", "i in range(l + 1, r + 1): temp = arr[i] index =" ]
[ "of this ShowCertificateResponse. :type: str \"\"\" self._push_support = push_support @property def revoke_reason(self): \"\"\"Gets", "self._validation_method = None self._domain_type = None self._domain = None self._sans = None self._domain_count", "\"\"\" self._validity_period = validity_period @property def validation_method(self): \"\"\"Gets the validation_method of this ShowCertificateResponse.", "getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x,", "validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method of this ShowCertificateResponse. :rtype: str", "str \"\"\" return self._status @status.setter def status(self, status): \"\"\"Sets the status of this", "not_before(self, not_before): \"\"\"Sets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before: The not_before", "wildcard_count): \"\"\"Sets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count: The wildcard_count of", "str \"\"\" self._status = status @property def order_id(self): \"\"\"Gets the order_id of this", "str \"\"\" return self._revoke_reason @revoke_reason.setter def revoke_reason(self, revoke_reason): \"\"\"Sets the revoke_reason of this", "name(self): \"\"\"Gets the name of this ShowCertificateResponse. 证书名称。 :return: The name of this", "证书绑定域名。 :param domain: The domain of this ShowCertificateResponse. :type: str \"\"\" self._domain =", "{} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list):", "@order_id.setter def order_id(self, order_id): \"\"\"Sets the order_id of this ShowCertificateResponse. 订单id。 :param order_id:", "(dict): The key is attribute name and the value is attribute type. attribute_map", "self._validity_period = validity_period @property def validation_method(self): \"\"\"Gets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。", "\"\"\"For `print`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns true if both objects are", "ShowCertificateResponse. :type: str \"\"\" self._sans = sans @property def domain_count(self): \"\"\"Gets the domain_count", "if not_after is not None: self.not_after = not_after if validity_period is not None:", "not_after @property def validity_period(self): \"\"\"Gets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :return: The", "validity_period if validation_method is not None: self.validation_method = validation_method if domain_type is not", "this ShowCertificateResponse. :rtype: int \"\"\" return self._domain_count @domain_count.setter def domain_count(self, domain_count): \"\"\"Sets the", "'validation_method', 'domain_type': 'domain_type', 'domain': 'domain', 'sans': 'sans', 'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count', 'authentification': 'authentification'", "'int', 'wildcard_count': 'int', 'authentification': 'list[Authentification]' } attribute_map = { 'id': 'id', 'status': 'status',", "class ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types (dict): The key is attribute name and the", "the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The wildcard_count of this ShowCertificateResponse. :rtype:", "The brand of this ShowCertificateResponse. :rtype: str \"\"\" return self._brand @brand.setter def brand(self,", "x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr] =", "self.discriminator = None if id is not None: self.id = id if status", "and the value is json key in definition. \"\"\" sensitive_list = [] openapi_types", "@signature_algrithm.setter def signature_algrithm(self, signature_algrithm): \"\"\"Sets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :param signature_algrithm:", "hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item:", "id of this ShowCertificateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self, id):", "status(self, status): \"\"\"Sets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。", "true if both objects are equal\"\"\" if not isinstance(other, ShowCertificateResponse): return False return", ":param not_after: The not_after of this ShowCertificateResponse. :type: str \"\"\" self._not_after = not_after", "\"\"\" return self._brand @brand.setter def brand(self, brand): \"\"\"Sets the brand of this ShowCertificateResponse.", "None: self.validation_method = validation_method if domain_type is not None: self.domain_type = domain_type if", "\"\"\"Gets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :return: The domain_count of this ShowCertificateResponse.", "= sans @property def domain_count(self): \"\"\"Gets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :return:", "UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status of this ShowCertificateResponse. :rtype: str", "the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count: The wildcard_count of this ShowCertificateResponse.", "self._validity_period @validity_period.setter def validity_period(self, validity_period): \"\"\"Sets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :param", ":return: The signature_algrithm of this ShowCertificateResponse. :rtype: str \"\"\" return self._signature_algrithm @signature_algrithm.setter def", "\"\"\" self._validation_method = validation_method @property def domain_type(self): \"\"\"Gets the domain_type of this ShowCertificateResponse.", "ShowCertificateResponse. 订单id。 :param order_id: The order_id of this ShowCertificateResponse. :type: str \"\"\" self._order_id", "= name @property def type(self): \"\"\"Gets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、", "not_before is not None: self.not_before = not_before if not_after is not None: self.not_after", "this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification of this ShowCertificateResponse. :rtype: list[Authentification] \"\"\" return", ":return: The type of this ShowCertificateResponse. :rtype: str \"\"\" return self._type @type.setter def", "\"\"\" self._authentification = authentification def to_dict(self): \"\"\"Returns the model properties as a dict\"\"\"", ":rtype: int \"\"\" return self._wildcard_count @wildcard_count.setter def wildcard_count(self, wildcard_count): \"\"\"Sets the wildcard_count of", "order_id of this ShowCertificateResponse. :rtype: str \"\"\" return self._order_id @order_id.setter def order_id(self, order_id):", "str \"\"\" self._domain_type = domain_type @property def domain(self): \"\"\"Gets the domain of this", "CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。", "ShowCertificateResponse. 签名算法。 :return: The signature_algrithm of this ShowCertificateResponse. :rtype: str \"\"\" return self._signature_algrithm", "not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse - a model", "coding: utf-8 import re import six from huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils import", "id of this ShowCertificateResponse. 证书id。 :param id: The id of this ShowCertificateResponse. :type:", "order_id(self, order_id): \"\"\"Sets the order_id of this ShowCertificateResponse. 订单id。 :param order_id: The order_id", "list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value,", "证书id。 :return: The id of this ShowCertificateResponse. :rtype: str \"\"\" return self._id @id.setter", "def id(self, id): \"\"\"Sets the id of this ShowCertificateResponse. 证书id。 :param id: The", "sans(self, sans): \"\"\"Sets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans: The sans", "wildcard_count(self): \"\"\"Gets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The wildcard_count of this", "order_id: The order_id of this ShowCertificateResponse. :type: str \"\"\" self._order_id = order_id @property", "self.authentification = authentification @property def id(self): \"\"\"Gets the id of this ShowCertificateResponse. 证书id。", "validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :return: The validity_period of this ShowCertificateResponse. :rtype: int", "domain(self): \"\"\"Gets the domain of this ShowCertificateResponse. 证书绑定域名。 :return: The domain of this", "ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。", "this ShowCertificateResponse. :type: str \"\"\" self._name = name @property def type(self): \"\"\"Gets the", "of the model\"\"\" import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding(\"utf-8\")", "self.validation_method = validation_method if domain_type is not None: self.domain_type = domain_type if domain", "the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :return: The validity_period of this ShowCertificateResponse. :rtype:", "- SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status of this ShowCertificateResponse. :rtype: str \"\"\"", "this ShowCertificateResponse. 证书id。 :return: The id of this ShowCertificateResponse. :rtype: str \"\"\" return", "None: self.authentification = authentification @property def id(self): \"\"\"Gets the id of this ShowCertificateResponse.", "None self._domain_type = None self._domain = None self._sans = None self._domain_count = None", "the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time: The issue_time of this ShowCertificateResponse.", ":return: The sans of this ShowCertificateResponse. :rtype: str \"\"\" return self._sans @sans.setter def", "'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time', 'not_before': 'not_before', 'not_after': 'not_after', 'validity_period': 'validity_period', 'validation_method':", "= wildcard_count if authentification is not None: self.authentification = authentification @property def id(self):", "= push_support if revoke_reason is not None: self.revoke_reason = revoke_reason if signature_algrithm is", "this ShowCertificateResponse. :type: str \"\"\" self._domain_type = domain_type @property def domain(self): \"\"\"Gets the", "of this ShowCertificateResponse. 证书id。 :return: The id of this ShowCertificateResponse. :rtype: str \"\"\"", "if push_support is not None: self.push_support = push_support if revoke_reason is not None:", "def to_str(self): \"\"\"Returns the string representation of the model\"\"\" import simplejson as json", ":rtype: str \"\"\" return self._revoke_reason @revoke_reason.setter def revoke_reason(self, revoke_reason): \"\"\"Sets the revoke_reason of", "The name of this ShowCertificateResponse. :type: str \"\"\" self._name = name @property def", "self._domain_type = domain_type @property def domain(self): \"\"\"Gets the domain of this ShowCertificateResponse. 证书绑定域名。", "domain of this ShowCertificateResponse. 证书绑定域名。 :param domain: The domain of this ShowCertificateResponse. :type:", "self.domain = domain if sans is not None: self.sans = sans if domain_count", "\"\"\"Gets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The wildcard_count of this ShowCertificateResponse.", "self._status = None self._order_id = None self._name = None self._type = None self._brand", "status(self): \"\"\"Gets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 -", "of this ShowCertificateResponse. :rtype: int \"\"\" return self._domain_count @domain_count.setter def domain_count(self, domain_count): \"\"\"Sets", "def wildcard_count(self): \"\"\"Gets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The wildcard_count of", "'domain': 'domain', 'sans': 'sans', 'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count', 'authentification': 'authentification' } def __init__(self,", "wildcard_count is not None: self.wildcard_count = wildcard_count if authentification is not None: self.authentification", "self).__init__() self._id = None self._status = None self._order_id = None self._name = None", "'int', 'authentification': 'list[Authentification]' } attribute_map = { 'id': 'id', 'status': 'status', 'order_id': 'order_id',", "EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The", "self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self, signature_algrithm): \"\"\"Sets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :param", "def brand(self): \"\"\"Gets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand", "= None self._sans = None self._domain_count = None self._wildcard_count = None self._authentification =", "of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method of this ShowCertificateResponse. :rtype: str \"\"\"", "ShowCertificateResponse. :type: str \"\"\" self._revoke_reason = revoke_reason @property def signature_algrithm(self): \"\"\"Gets the signature_algrithm", "self.not_after = not_after if validity_period is not None: self.validity_period = validity_period if validation_method", ":type: int \"\"\" self._validity_period = validity_period @property def validation_method(self): \"\"\"Gets the validation_method of", "'type': 'type', 'brand': 'brand', 'push_support': 'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time', 'not_before':", "brand(self, brand): \"\"\"Sets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The", "The push_support of this ShowCertificateResponse. :type: str \"\"\" self._push_support = push_support @property def", "\"\"\"Gets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand of this", "= None self._revoke_reason = None self._signature_algrithm = None self._issue_time = None self._not_before =", "@domain_type.setter def domain_type(self, domain_type): \"\"\"Sets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名", "validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period: The validity_period of this ShowCertificateResponse. :type:", "return self._id @id.setter def id(self, id): \"\"\"Sets the id of this ShowCertificateResponse. 证书id。", "= not_before @property def not_after(self): \"\"\"Gets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return:", "is not None: self.wildcard_count = wildcard_count if authentification is not None: self.authentification =", "not None: self.order_id = order_id if name is not None: self.name = name", "'str', 'sans': 'str', 'domain_count': 'int', 'wildcard_count': 'int', 'authentification': 'list[Authentification]' } attribute_map = {", "id of this ShowCertificateResponse. 证书id。 :return: The id of this ShowCertificateResponse. :rtype: str", "issue_time if not_before is not None: self.not_before = not_before if not_after is not", "revoke_reason): \"\"\"Sets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :param revoke_reason: The revoke_reason of", "= type if brand is not None: self.brand = brand if push_support is", "\"\"\" return self._push_support @push_support.setter def push_support(self, push_support): \"\"\"Sets the push_support of this ShowCertificateResponse.", "this ShowCertificateResponse. :type: str \"\"\" self._domain = domain @property def sans(self): \"\"\"Gets the", "huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types (dict): The key is attribute", "import sys reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): \"\"\"For `print`\"\"\" return self.to_str()", "not None: self.type = type if brand is not None: self.brand = brand", "name and the value is json key in definition. \"\"\" sensitive_list = []", "self.sensitive_list: result[attr] = \"****\" else: result[attr] = value return result def to_str(self): \"\"\"Returns", "domain_type(self, domain_type): \"\"\"Sets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符", "'brand', 'push_support': 'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time', 'not_before': 'not_before', 'not_after': 'not_after',", "this ShowCertificateResponse. 证书绑定域名。 :param domain: The domain of this ShowCertificateResponse. :type: str \"\"\"", "\"\"\" return self._issue_time @issue_time.setter def issue_time(self, issue_time): \"\"\"Sets the issue_time of this ShowCertificateResponse.", "self.wildcard_count = wildcard_count if authentification is not None: self.authentification = authentification @property def", "self.type = type if brand is not None: self.brand = brand if push_support", "dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item,", ":return: The validity_period of this ShowCertificateResponse. :rtype: int \"\"\" return self._validity_period @validity_period.setter def", "attr in self.sensitive_list: result[attr] = \"****\" else: result[attr] = value return result def", "is attribute name and the value is attribute type. attribute_map (dict): The key", "of this ShowCertificateResponse. 证书id。 :param id: The id of this ShowCertificateResponse. :type: str", "not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before: The not_before of this ShowCertificateResponse. :type:", "UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The status of this ShowCertificateResponse. :type:", "of this ShowCertificateResponse. :type: str \"\"\" self._not_after = not_after @property def validity_period(self): \"\"\"Gets", "attribute name and the value is json key in definition. \"\"\" sensitive_list =", "return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): \"\"\"For `print`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns", "域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param domain_type: The domain_type of this", "this ShowCertificateResponse. :type: list[Authentification] \"\"\" self._authentification = authentification def to_dict(self): \"\"\"Returns the model", "the push_support of this ShowCertificateResponse. 证书是否支持推送。 :return: The push_support of this ShowCertificateResponse. :rtype:", "The domain_count of this ShowCertificateResponse. :type: int \"\"\" self._domain_count = domain_count @property def", "'authentification': 'list[Authentification]' } attribute_map = { 'id': 'id', 'status': 'status', 'order_id': 'order_id', 'name':", "this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The brand of this ShowCertificateResponse. :type: str", "not_before of this ShowCertificateResponse. :type: str \"\"\" self._not_before = not_before @property def not_after(self):", "self._not_before = None self._not_after = None self._validity_period = None self._validation_method = None self._domain_type", "validity_period @property def validation_method(self): \"\"\"Gets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The", "defined in huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id = None self._status = None self._order_id", "self._name @name.setter def name(self, name): \"\"\"Sets the name of this ShowCertificateResponse. 证书名称。 :param", "this ShowCertificateResponse. 证书名称。 :param name: The name of this ShowCertificateResponse. :type: str \"\"\"", "a dict\"\"\" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self,", "证书可绑定域名个数。 :param domain_count: The domain_count of this ShowCertificateResponse. :type: int \"\"\" self._domain_count =", "this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type of this ShowCertificateResponse. :rtype: str", "domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :return: The domain_count of this ShowCertificateResponse. :rtype: int", "None self._validity_period = None self._validation_method = None self._domain_type = None self._domain = None", "is not None: self.validity_period = validity_period if validation_method is not None: self.validation_method =", "def domain_count(self, domain_count): \"\"\"Sets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count: The", "this ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count: The domain_count of this ShowCertificateResponse. :type: int \"\"\"", "are equal\"\"\" if not isinstance(other, ShowCertificateResponse): return False return self.__dict__ == other.__dict__ def", "\"\"\" self._revoke_reason = revoke_reason @property def signature_algrithm(self): \"\"\"Gets the signature_algrithm of this ShowCertificateResponse.", "'signature_algrithm': 'str', 'issue_time': 'str', 'not_before': 'str', 'not_after': 'str', 'validity_period': 'int', 'validation_method': 'str', 'domain_type':", "self._sans = sans @property def domain_count(self): \"\"\"Gets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。", "this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The issue_time of this ShowCertificateResponse. :rtype: str \"\"\" return", "of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before: The not_before of this ShowCertificateResponse. :type: str", "validation_method(self, validation_method): \"\"\"Sets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The validation_method", "authentification is not None: self.authentification = authentification @property def id(self): \"\"\"Gets the id", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._type @type.setter def type(self, type): \"\"\"Sets", "= { 'id': 'str', 'status': 'str', 'order_id': 'str', 'name': 'str', 'type': 'str', 'brand':", "is not None: self.domain = domain if sans is not None: self.sans =", "The push_support of this ShowCertificateResponse. :rtype: str \"\"\" return self._push_support @push_support.setter def push_support(self,", "this ShowCertificateResponse. 证书id。 :param id: The id of this ShowCertificateResponse. :type: str \"\"\"", "(dict): The key is attribute name and the value is json key in", "self._domain_count = domain_count @property def wildcard_count(self): \"\"\"Gets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。", "not_after if validity_period is not None: self.validity_period = validity_period if validation_method is not", "ShowCertificateResponse. 证书有效期,按月为单位。 :return: The validity_period of this ShowCertificateResponse. :rtype: int \"\"\" return self._validity_period", "None: self.validity_period = validity_period if validation_method is not None: self.validation_method = validation_method if", "'validity_period': 'int', 'validation_method': 'str', 'domain_type': 'str', 'domain': 'str', 'sans': 'str', 'domain_count': 'int', 'wildcard_count':", "= domain @property def sans(self): \"\"\"Gets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :return:", "'authentification' } def __init__(self, id=None, status=None, order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None,", "ShowCertificateResponse. :rtype: str \"\"\" return self._order_id @order_id.setter def order_id(self, order_id): \"\"\"Sets the order_id", "self._not_after = None self._validity_period = None self._validation_method = None self._domain_type = None self._domain", "of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The brand of this ShowCertificateResponse. :type:", "of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The type of this ShowCertificateResponse.", "= domain_count @property def wildcard_count(self): \"\"\"Gets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :return:", "if revoke_reason is not None: self.revoke_reason = revoke_reason if signature_algrithm is not None:", "sys reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): \"\"\"For `print`\"\"\" return self.to_str() def", "this ShowCertificateResponse. :type: str \"\"\" self._push_support = push_support @property def revoke_reason(self): \"\"\"Gets the", "int \"\"\" self._domain_count = domain_count @property def wildcard_count(self): \"\"\"Gets the wildcard_count of this", "brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None,", "ShowCertificateResponse. :rtype: str \"\"\" return self._push_support @push_support.setter def push_support(self, push_support): \"\"\"Sets the push_support", "this ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The sans of this ShowCertificateResponse. :rtype: str \"\"\" return", "证书可绑定附加域名个数。 :return: The wildcard_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._wildcard_count @wildcard_count.setter", "= status if order_id is not None: self.order_id = order_id if name is", "The signature_algrithm of this ShowCertificateResponse. :type: str \"\"\" self._signature_algrithm = signature_algrithm @property def", "domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return: The", "this ShowCertificateResponse. 证书吊销原因。 :param revoke_reason: The revoke_reason of this ShowCertificateResponse. :type: str \"\"\"", "'str', 'status': 'str', 'order_id': 'str', 'name': 'str', 'type': 'str', 'brand': 'str', 'push_support': 'str',", "证书签发时间,没有获取到有效值时为空。 :param issue_time: The issue_time of this ShowCertificateResponse. :type: str \"\"\" self._issue_time =", "of this ShowCertificateResponse. :type: str \"\"\" self._issue_time = issue_time @property def not_before(self): \"\"\"Gets", "def validity_period(self, validity_period): \"\"\"Sets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period: The", "validation_method(self): \"\"\"Gets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method of this", "name of this ShowCertificateResponse. 证书名称。 :return: The name of this ShowCertificateResponse. :rtype: str", "of this ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The wildcard_count of this ShowCertificateResponse. :rtype: int \"\"\"", "of this ShowCertificateResponse. :type: str \"\"\" self._validation_method = validation_method @property def domain_type(self): \"\"\"Gets", "if six.PY2: import sys reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): \"\"\"For `print`\"\"\"", "def not_after(self, not_after): \"\"\"Sets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after: The", "id(self): \"\"\"Gets the id of this ShowCertificateResponse. 证书id。 :return: The id of this", "ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return: The domain_type of this", "to_str(self): \"\"\"Returns the string representation of the model\"\"\" import simplejson as json if", "\"\"\"Sets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count: The domain_count of this", "'brand': 'brand', 'push_support': 'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time', 'not_before': 'not_before', 'not_after':", "the id of this ShowCertificateResponse. 证书id。 :return: The id of this ShowCertificateResponse. :rtype:", "type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type of this ShowCertificateResponse.", "of this ShowCertificateResponse. 证书名称。 :return: The name of this ShowCertificateResponse. :rtype: str \"\"\"", "- ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 -", "\"\"\"Sets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after: The not_after of this", "issue_time: The issue_time of this ShowCertificateResponse. :type: str \"\"\" self._issue_time = issue_time @property", ")) else: if attr in self.sensitive_list: result[attr] = \"****\" else: result[attr] = value", "'str', 'revoke_reason': 'str', 'signature_algrithm': 'str', 'issue_time': 'str', 'not_before': 'str', 'not_after': 'str', 'validity_period': 'int',", "from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types (dict): The key is", "ShowCertificateResponse. :type: str \"\"\" self._push_support = push_support @property def revoke_reason(self): \"\"\"Gets the revoke_reason", "self._id = None self._status = None self._order_id = None self._name = None self._type", "in definition. \"\"\" sensitive_list = [] openapi_types = { 'id': 'str', 'status': 'str',", "def authentification(self, authentification): \"\"\"Sets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The", "\"\"\"Sets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名", "the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The issue_time of this ShowCertificateResponse. :rtype:", "not_before): \"\"\"Sets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before: The not_before of", "'str', 'signature_algrithm': 'str', 'issue_time': 'str', 'not_before': 'str', 'not_after': 'str', 'validity_period': 'int', 'validation_method': 'str',", "this ShowCertificateResponse. :type: str \"\"\" self._order_id = order_id @property def name(self): \"\"\"Gets the", "\"\"\" self._domain_count = domain_count @property def wildcard_count(self): \"\"\"Gets the wildcard_count of this ShowCertificateResponse.", "the signature_algrithm of this ShowCertificateResponse. 签名算法。 :param signature_algrithm: The signature_algrithm of this ShowCertificateResponse.", ":param domain: The domain of this ShowCertificateResponse. :type: str \"\"\" self._domain = domain", "this ShowCertificateResponse. :rtype: str \"\"\" return self._not_before @not_before.setter def not_before(self, not_before): \"\"\"Sets the", "None: self.signature_algrithm = signature_algrithm if issue_time is not None: self.issue_time = issue_time if", "__eq__(self, other): \"\"\"Returns true if both objects are equal\"\"\" if not isinstance(other, ShowCertificateResponse):", "The type of this ShowCertificateResponse. :rtype: str \"\"\" return self._type @type.setter def type(self,", "the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The not_before of this ShowCertificateResponse. :rtype:", "'str', 'domain': 'str', 'sans': 'str', 'domain_count': 'int', 'wildcard_count': 'int', 'authentification': 'list[Authentification]' } attribute_map", "= None if id is not None: self.id = id if status is", "type if brand is not None: self.brand = brand if push_support is not", "not None: self.validity_period = validity_period if validation_method is not None: self.validation_method = validation_method", "attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\")", "not_before(self): \"\"\"Gets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The not_before of this", "this ShowCertificateResponse. :type: str \"\"\" self._revoke_reason = revoke_reason @property def signature_algrithm(self): \"\"\"Gets the", "The type of this ShowCertificateResponse. :type: str \"\"\" self._type = type @property def", "push_support of this ShowCertificateResponse. 证书是否支持推送。 :param push_support: The push_support of this ShowCertificateResponse. :type:", "this ShowCertificateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self, status): \"\"\"Sets the", "of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The not_before of this ShowCertificateResponse. :rtype: str \"\"\"", "REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status of this ShowCertificateResponse.", "'str', 'type': 'str', 'brand': 'str', 'push_support': 'str', 'revoke_reason': 'str', 'signature_algrithm': 'str', 'issue_time': 'str',", "authentification of this ShowCertificateResponse. :type: list[Authentification] \"\"\" self._authentification = authentification def to_dict(self): \"\"\"Returns", "six from huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\" Attributes:", "REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The status", "order_id @property def name(self): \"\"\"Gets the name of this ShowCertificateResponse. 证书名称。 :return: The", "self._not_before @not_before.setter def not_before(self, not_before): \"\"\"Sets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param", "\"\"\" return self._authentification @authentification.setter def authentification(self, authentification): \"\"\"Sets the authentification of this ShowCertificateResponse.", "sans @property def domain_count(self): \"\"\"Gets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :return: The", "ShowCertificateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns true if", "authentification @property def id(self): \"\"\"Gets the id of this ShowCertificateResponse. 证书id。 :return: The", "this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The type of this ShowCertificateResponse. :type:", "'wildcard_count': 'wildcard_count', 'authentification': 'authentification' } def __init__(self, id=None, status=None, order_id=None, name=None, type=None, brand=None,", "the value is attribute type. attribute_map (dict): The key is attribute name and", "= signature_algrithm if issue_time is not None: self.issue_time = issue_time if not_before is", "model properties as a dict\"\"\" result = {} for attr, _ in six.iteritems(self.openapi_types):", "this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param domain_type: The domain_type", "id=None, status=None, order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None,", "signature_algrithm of this ShowCertificateResponse. :rtype: str \"\"\" return self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self, signature_algrithm):", "= id if status is not None: self.status = status if order_id is", "'sans': 'str', 'domain_count': 'int', 'wildcard_count': 'int', 'authentification': 'list[Authentification]' } attribute_map = { 'id':", "push_support of this ShowCertificateResponse. 证书是否支持推送。 :return: The push_support of this ShowCertificateResponse. :rtype: str", "of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 -", "revoke_reason: The revoke_reason of this ShowCertificateResponse. :type: str \"\"\" self._revoke_reason = revoke_reason @property", "not_after(self, not_after): \"\"\"Sets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after: The not_after", "status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。", "authentification: The authentification of this ShowCertificateResponse. :type: list[Authentification] \"\"\" self._authentification = authentification def", "equal\"\"\" if not isinstance(other, ShowCertificateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self,", "ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans: The sans of this ShowCertificateResponse. :type: str \"\"\" self._sans", "hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif", "if validity_period is not None: self.validity_period = validity_period if validation_method is not None:", "\"\"\" return self._wildcard_count @wildcard_count.setter def wildcard_count(self, wildcard_count): \"\"\"Sets the wildcard_count of this ShowCertificateResponse.", "definition. \"\"\" sensitive_list = [] openapi_types = { 'id': 'str', 'status': 'str', 'order_id':", "None self._not_after = None self._validity_period = None self._validation_method = None self._domain_type = None", "of this ShowCertificateResponse. 证书是否支持推送。 :return: The push_support of this ShowCertificateResponse. :rtype: str \"\"\"", "ShowCertificateResponse. :type: str \"\"\" self._brand = brand @property def push_support(self): \"\"\"Gets the push_support", "not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse - a", "status): \"\"\"Sets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 -", "else: if attr in self.sensitive_list: result[attr] = \"****\" else: result[attr] = value return", "elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\")", "json key in definition. \"\"\" sensitive_list = [] openapi_types = { 'id': 'str',", "name(self, name): \"\"\"Sets the name of this ShowCertificateResponse. 证书名称。 :param name: The name", "def validity_period(self): \"\"\"Gets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :return: The validity_period of", "= domain_type if domain is not None: self.domain = domain if sans is", "The not_after of this ShowCertificateResponse. :type: str \"\"\" self._not_after = not_after @property def", "of this ShowCertificateResponse. 签名算法。 :param signature_algrithm: The signature_algrithm of this ShowCertificateResponse. :type: str", "representation of the model\"\"\" import simplejson as json if six.PY2: import sys reload(sys)", "this ShowCertificateResponse. 证书可绑定域名个数。 :return: The domain_count of this ShowCertificateResponse. :rtype: int \"\"\" return", "is not None: self.order_id = order_id if name is not None: self.name =", "The status of this ShowCertificateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self,", "this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。", "the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans: The sans of this ShowCertificateResponse.", "签名算法。 :return: The signature_algrithm of this ShowCertificateResponse. :rtype: str \"\"\" return self._signature_algrithm @signature_algrithm.setter", "@property def not_before(self): \"\"\"Gets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The not_before", "ShowCertificateResponse. 证书是否支持推送。 :return: The push_support of this ShowCertificateResponse. :rtype: str \"\"\" return self._push_support", "'push_support': 'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time', 'not_before': 'not_before', 'not_after': 'not_after', 'validity_period':", "if order_id is not None: self.order_id = order_id if name is not None:", "self._type = None self._brand = None self._push_support = None self._revoke_reason = None self._signature_algrithm", "of this ShowCertificateResponse. :type: list[Authentification] \"\"\" self._authentification = authentification def to_dict(self): \"\"\"Returns the", "None: self.issue_time = issue_time if not_before is not None: self.not_before = not_before if", "not_after(self): \"\"\"Gets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The not_after of this", "= not_after if validity_period is not None: self.validity_period = validity_period if validation_method is", "result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items()", "ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The validation_method of this ShowCertificateResponse. :type: str \"\"\" self._validation_method", "is not None: self.signature_algrithm = signature_algrithm if issue_time is not None: self.issue_time =", "not isinstance(other, ShowCertificateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns", "self._domain_count @domain_count.setter def domain_count(self, domain_count): \"\"\"Sets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :param", "this ShowCertificateResponse. :type: str \"\"\" self._issue_time = issue_time @property def not_before(self): \"\"\"Gets the", "sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id = None self._status = None self._order_id = None self._name", "'domain', 'sans': 'sans', 'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count', 'authentification': 'authentification' } def __init__(self, id=None,", "of this ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans: The sans of this ShowCertificateResponse. :type: str", "self._sans = None self._domain_count = None self._wildcard_count = None self._authentification = None self.discriminator", "def not_after(self): \"\"\"Gets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The not_after of", ":type: str \"\"\" self._push_support = push_support @property def revoke_reason(self): \"\"\"Gets the revoke_reason of", "domain if sans is not None: self.sans = sans if domain_count is not", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._sans @sans.setter def sans(self, sans): \"\"\"Sets", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain_type @domain_type.setter def domain_type(self, domain_type): \"\"\"Sets", "'type', 'brand': 'brand', 'push_support': 'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time', 'not_before': 'not_before',", "None self._authentification = None self.discriminator = None if id is not None: self.id", "not_after: The not_after of this ShowCertificateResponse. :type: str \"\"\" self._not_after = not_after @property", "authentification of this ShowCertificateResponse. :rtype: list[Authentification] \"\"\" return self._authentification @authentification.setter def authentification(self, authentification):", "str \"\"\" self._issue_time = issue_time @property def not_before(self): \"\"\"Gets the not_before of this", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain @domain.setter def domain(self, domain): \"\"\"Sets", "'validation_method': 'str', 'domain_type': 'str', 'domain': 'str', 'sans': 'str', 'domain_count': 'int', 'wildcard_count': 'int', 'authentification':", "None self._order_id = None self._name = None self._type = None self._brand = None", "the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count: The domain_count of this ShowCertificateResponse.", "of this ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The sans of this ShowCertificateResponse. :rtype: str \"\"\"", "if domain_type is not None: self.domain_type = domain_type if domain is not None:", ":type: str \"\"\" self._status = status @property def order_id(self): \"\"\"Gets the order_id of", ":param sans: The sans of this ShowCertificateResponse. :type: str \"\"\" self._sans = sans", "brand: The brand of this ShowCertificateResponse. :type: str \"\"\" self._brand = brand @property", ":rtype: str \"\"\" return self._not_before @not_before.setter def not_before(self, not_before): \"\"\"Sets the not_before of", "str \"\"\" self._name = name @property def type(self): \"\"\"Gets the type of this", "ShowCertificateResponse. :type: int \"\"\" self._validity_period = validity_period @property def validation_method(self): \"\"\"Gets the validation_method", "type of this ShowCertificateResponse. :type: str \"\"\" self._type = type @property def brand(self):", "None: self.order_id = order_id if name is not None: self.name = name if", "= None self._status = None self._order_id = None self._name = None self._type =", "= None self._authentification = None self.discriminator = None if id is not None:", "\"\"\" return self._domain_count @domain_count.setter def domain_count(self, domain_count): \"\"\"Sets the domain_count of this ShowCertificateResponse.", "self._validation_method @validation_method.setter def validation_method(self, validation_method): \"\"\"Sets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param", "\"\"\"Sets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。", "import sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types (dict): The key is attribute name", "None self._type = None self._brand = None self._push_support = None self._revoke_reason = None", ":return: The not_before of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_before @not_before.setter def", "brand(self): \"\"\"Gets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand of", "order_id if name is not None: self.name = name if type is not", "this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before: The not_before of this ShowCertificateResponse. :type: str \"\"\"", "None self._push_support = None self._revoke_reason = None self._signature_algrithm = None self._issue_time = None", ":rtype: str \"\"\" return self._status @status.setter def status(self, status): \"\"\"Sets the status of", "\"\"\"Gets the domain of this ShowCertificateResponse. 证书绑定域名。 :return: The domain of this ShowCertificateResponse.", "def id(self): \"\"\"Gets the id of this ShowCertificateResponse. 证书id。 :return: The id of", "The not_before of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_before @not_before.setter def not_before(self,", "None self.discriminator = None if id is not None: self.id = id if", "id is not None: self.id = id if status is not None: self.status", "of this ShowCertificateResponse. :type: str \"\"\" self._order_id = order_id @property def name(self): \"\"\"Gets", "self._domain_count = None self._wildcard_count = None self._authentification = None self.discriminator = None if", "def type(self, type): \"\"\"Sets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param", "ShowCertificateResponse. :rtype: str \"\"\" return self._domain @domain.setter def domain(self, domain): \"\"\"Sets the domain", "'domain_type': 'domain_type', 'domain': 'domain', 'sans': 'sans', 'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count', 'authentification': 'authentification' }", "issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time: The issue_time of this ShowCertificateResponse. :type:", "push_support: The push_support of this ShowCertificateResponse. :type: str \"\"\" self._push_support = push_support @property", "domain_count(self, domain_count): \"\"\"Sets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count: The domain_count", "is not None: self.domain_type = domain_type if domain is not None: self.domain =", "not None: self.issue_time = issue_time if not_before is not None: self.not_before = not_before", "@authentification.setter def authentification(self, authentification): \"\"\"Sets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification:", "self._validity_period = None self._validation_method = None self._domain_type = None self._domain = None self._sans", "The validity_period of this ShowCertificateResponse. :rtype: int \"\"\" return self._validity_period @validity_period.setter def validity_period(self,", "domain_count of this ShowCertificateResponse. :type: int \"\"\" self._domain_count = domain_count @property def wildcard_count(self):", "issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse -", "type: The type of this ShowCertificateResponse. :type: str \"\"\" self._type = type @property", "证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。", "ShowCertificateResponse. :rtype: int \"\"\" return self._wildcard_count @wildcard_count.setter def wildcard_count(self, wildcard_count): \"\"\"Sets the wildcard_count", "of this ShowCertificateResponse. 证书绑定域名。 :param domain: The domain of this ShowCertificateResponse. :type: str", "status of this ShowCertificateResponse. :type: str \"\"\" self._status = status @property def order_id(self):", "of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The not_after of this ShowCertificateResponse. :rtype: str \"\"\"", "None: self.revoke_reason = revoke_reason if signature_algrithm is not None: self.signature_algrithm = signature_algrithm if", "validation_method of this ShowCertificateResponse. :rtype: str \"\"\" return self._validation_method @validation_method.setter def validation_method(self, validation_method):", "not None: self.id = id if status is not None: self.status = status", "return self._wildcard_count @wildcard_count.setter def wildcard_count(self, wildcard_count): \"\"\"Sets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。", "validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The validation_method of this ShowCertificateResponse. :type:", ":type: str \"\"\" self._domain_type = domain_type @property def domain(self): \"\"\"Gets the domain of", "is attribute name and the value is json key in definition. \"\"\" sensitive_list", "not_before of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_before @not_before.setter def not_before(self, not_before):", "'domain_type', 'domain': 'domain', 'sans': 'sans', 'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count', 'authentification': 'authentification' } def", "the domain of this ShowCertificateResponse. 证书绑定域名。 :param domain: The domain of this ShowCertificateResponse.", "The not_before of this ShowCertificateResponse. :type: str \"\"\" self._not_before = not_before @property def", "if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else", "\"\"\"Sets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period: The validity_period of this", "The key is attribute name and the value is json key in definition.", ":type: str \"\"\" self._brand = brand @property def push_support(self): \"\"\"Gets the push_support of", "None self._brand = None self._push_support = None self._revoke_reason = None self._signature_algrithm = None", "The wildcard_count of this ShowCertificateResponse. :type: int \"\"\" self._wildcard_count = wildcard_count @property def", ":type: str \"\"\" self._revoke_reason = revoke_reason @property def signature_algrithm(self): \"\"\"Gets the signature_algrithm of", ":param issue_time: The issue_time of this ShowCertificateResponse. :type: str \"\"\" self._issue_time = issue_time", "ShowCertificateResponse. :type: str \"\"\" self._not_before = not_before @property def not_after(self): \"\"\"Gets the not_after", "= None self._name = None self._type = None self._brand = None self._push_support =", "name is not None: self.name = name if type is not None: self.type", "\"\"\"Gets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名", "\"\"\" return self._name @name.setter def name(self, name): \"\"\"Sets the name of this ShowCertificateResponse.", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self, id): \"\"\"Sets", "id of this ShowCertificateResponse. :type: str \"\"\" self._id = id @property def status(self):", "model\"\"\" import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self),", ":return: The revoke_reason of this ShowCertificateResponse. :rtype: str \"\"\" return self._revoke_reason @revoke_reason.setter def", "The domain_type of this ShowCertificateResponse. :type: str \"\"\" self._domain_type = domain_type @property def", "'not_after', 'validity_period': 'validity_period', 'validation_method': 'validation_method', 'domain_type': 'domain_type', 'domain': 'domain', 'sans': 'sans', 'domain_count': 'domain_count',", "'str', 'push_support': 'str', 'revoke_reason': 'str', 'signature_algrithm': 'str', 'issue_time': 'str', 'not_before': 'str', 'not_after': 'str',", "= brand if push_support is not None: self.push_support = push_support if revoke_reason is", "ShowCertificateResponse. 证书id。 :param id: The id of this ShowCertificateResponse. :type: str \"\"\" self._id", "the push_support of this ShowCertificateResponse. 证书是否支持推送。 :param push_support: The push_support of this ShowCertificateResponse.", "self._order_id = order_id @property def name(self): \"\"\"Gets the name of this ShowCertificateResponse. 证书名称。", "revoke_reason @property def signature_algrithm(self): \"\"\"Gets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :return: The", "\"\"\"Sets the order_id of this ShowCertificateResponse. 订单id。 :param order_id: The order_id of this", "The sans of this ShowCertificateResponse. :rtype: str \"\"\" return self._sans @sans.setter def sans(self,", "x: x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr]", "issue_time): \"\"\"Sets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time: The issue_time of", "'issue_time', 'not_before': 'not_before', 'not_after': 'not_after', 'validity_period': 'validity_period', 'validation_method': 'validation_method', 'domain_type': 'domain_type', 'domain': 'domain',", "'brand': 'str', 'push_support': 'str', 'revoke_reason': 'str', 'signature_algrithm': 'str', 'issue_time': 'str', 'not_before': 'str', 'not_after':", "return self._type @type.setter def type(self, type): \"\"\"Sets the type of this ShowCertificateResponse. 证书类型。取值如下:", "of this ShowCertificateResponse. :type: str \"\"\" self._revoke_reason = revoke_reason @property def signature_algrithm(self): \"\"\"Gets", "\"\"\" self._domain_type = domain_type @property def domain(self): \"\"\"Gets the domain of this ShowCertificateResponse.", "properties as a dict\"\"\" result = {} for attr, _ in six.iteritems(self.openapi_types): value", "is not None: self.sans = sans if domain_count is not None: self.domain_count =", "@property def authentification(self): \"\"\"Gets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification", "REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The status of this", "sensitive_list = [] openapi_types = { 'id': 'str', 'status': 'str', 'order_id': 'str', 'name':", "证书吊销原因。 :param revoke_reason: The revoke_reason of this ShowCertificateResponse. :type: str \"\"\" self._revoke_reason =", "= getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if", "\"\"\"Returns the string representation of the model\"\"\" import simplejson as json if six.PY2:", "None: self.type = type if brand is not None: self.brand = brand if", "证书是否支持推送。 :param push_support: The push_support of this ShowCertificateResponse. :type: str \"\"\" self._push_support =", "ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param domain_type: The domain_type of", "type(self, type): \"\"\"Sets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type:", "return self._domain_type @domain_type.setter def domain_type(self, domain_type): \"\"\"Sets the domain_type of this ShowCertificateResponse. 域名类型,取值如下:", "None if id is not None: self.id = id if status is not", "in huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id = None self._status = None self._order_id =", "order_id(self): \"\"\"Gets the order_id of this ShowCertificateResponse. 订单id。 :return: The order_id of this", "'str', 'not_after': 'str', 'validity_period': 'int', 'validation_method': 'str', 'domain_type': 'str', 'domain': 'str', 'sans': 'str',", "ShowCertificateResponse. 证书名称。 :return: The name of this ShowCertificateResponse. :rtype: str \"\"\" return self._name", "self._authentification = authentification def to_dict(self): \"\"\"Returns the model properties as a dict\"\"\" result", "revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :return: The revoke_reason of this ShowCertificateResponse. :rtype: str", "domain_type): \"\"\"Sets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 -", "this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The not_before of this ShowCertificateResponse. :rtype: str \"\"\" return", "= domain if sans is not None: self.sans = sans if domain_count is", "not None: self.domain_type = domain_type if domain is not None: self.domain = domain", "@property def revoke_reason(self): \"\"\"Gets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :return: The revoke_reason", "import six from huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\"", "def signature_algrithm(self, signature_algrithm): \"\"\"Sets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :param signature_algrithm: The", "The revoke_reason of this ShowCertificateResponse. :type: str \"\"\" self._revoke_reason = revoke_reason @property def", "ShowCertificateResponse. :rtype: str \"\"\" return self._name @name.setter def name(self, name): \"\"\"Sets the name", "\"\"\" self._name = name @property def type(self): \"\"\"Gets the type of this ShowCertificateResponse.", "both objects are equal\"\"\" if not isinstance(other, ShowCertificateResponse): return False return self.__dict__ ==", "validation_method is not None: self.validation_method = validation_method if domain_type is not None: self.domain_type", "not None: self.sans = sans if domain_count is not None: self.domain_count = domain_count", "= status @property def order_id(self): \"\"\"Gets the order_id of this ShowCertificateResponse. 订单id。 :return:", "push_support): \"\"\"Sets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :param push_support: The push_support of", "'name': 'str', 'type': 'str', 'brand': 'str', 'push_support': 'str', 'revoke_reason': 'str', 'signature_algrithm': 'str', 'issue_time':", "\"\"\" self._not_before = not_before @property def not_after(self): \"\"\"Gets the not_after of this ShowCertificateResponse.", "域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The authentification of this ShowCertificateResponse. :type: list[Authentification] \"\"\" self._authentification =", "not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The not_after of this ShowCertificateResponse. :rtype: str", "str \"\"\" self._signature_algrithm = signature_algrithm @property def issue_time(self): \"\"\"Gets the issue_time of this", "the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return:", "the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The not_after of this ShowCertificateResponse. :rtype:", "= value return result def to_str(self): \"\"\"Returns the string representation of the model\"\"\"", "str \"\"\" return self._domain @domain.setter def domain(self, domain): \"\"\"Sets the domain of this", "证书名称。 :return: The name of this ShowCertificateResponse. :rtype: str \"\"\" return self._name @name.setter", "of this ShowCertificateResponse. 证书可绑定域名个数。 :return: The domain_count of this ShowCertificateResponse. :rtype: int \"\"\"", "else x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict):", "'not_after': 'str', 'validity_period': 'int', 'validation_method': 'str', 'domain_type': 'str', 'domain': 'str', 'sans': 'str', 'domain_count':", "self._revoke_reason = None self._signature_algrithm = None self._issue_time = None self._not_before = None self._not_after", "\"\"\"Sets the domain of this ShowCertificateResponse. 证书绑定域名。 :param domain: The domain of this", ":return: The name of this ShowCertificateResponse. :rtype: str \"\"\" return self._name @name.setter def", "of this ShowCertificateResponse. 证书绑定域名。 :return: The domain of this ShowCertificateResponse. :rtype: str \"\"\"", ":rtype: str \"\"\" return self._type @type.setter def type(self, type): \"\"\"Sets the type of", "\"\"\"Gets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :return: The signature_algrithm of this ShowCertificateResponse.", "\"\"\"Sets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The authentification of this", "domain_count if wildcard_count is not None: self.wildcard_count = wildcard_count if authentification is not", "\"\"\" return self._id @id.setter def id(self, id): \"\"\"Sets the id of this ShowCertificateResponse.", "self._id @id.setter def id(self, id): \"\"\"Sets the id of this ShowCertificateResponse. 证书id。 :param", "issue_time(self, issue_time): \"\"\"Sets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time: The issue_time", "this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The not_after of this ShowCertificateResponse. :rtype: str \"\"\" return", "status=None, order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None,", "- MULTI_DOMAIN:多域名 :return: The domain_type of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain_type", "type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The type of this", "@property def validation_method(self): \"\"\"Gets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method", "None self._wildcard_count = None self._authentification = None self.discriminator = None if id is", "} def __init__(self, id=None, status=None, order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None,", "SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return: The domain_type of this ShowCertificateResponse. :rtype: str", "str \"\"\" return self._name @name.setter def name(self, name): \"\"\"Sets the name of this", "order_id is not None: self.order_id = order_id if name is not None: self.name", "def order_id(self): \"\"\"Gets the order_id of this ShowCertificateResponse. 订单id。 :return: The order_id of", ":rtype: str \"\"\" return self._issue_time @issue_time.setter def issue_time(self, issue_time): \"\"\"Sets the issue_time of", "domain_count): \"\"\"Sets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count: The domain_count of", "str \"\"\" return self._push_support @push_support.setter def push_support(self, push_support): \"\"\"Sets the push_support of this", "'status': 'str', 'order_id': 'str', 'name': 'str', 'type': 'str', 'brand': 'str', 'push_support': 'str', 'revoke_reason':", "name if type is not None: self.type = type if brand is not", "self.name = name if type is not None: self.type = type if brand", "self._brand @brand.setter def brand(self, brand): \"\"\"Sets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。", "this ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count: The wildcard_count of this ShowCertificateResponse. :type: int \"\"\"", "\"\"\"Sets the id of this ShowCertificateResponse. 证书id。 :param id: The id of this", "else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = \"****\" else:", "of this ShowCertificateResponse. :type: str \"\"\" self._not_before = not_before @property def not_after(self): \"\"\"Gets", "- CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The status of this ShowCertificateResponse. :type: str \"\"\" self._status", "validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse - a model defined in", "this ShowCertificateResponse. :rtype: str \"\"\" return self._revoke_reason @revoke_reason.setter def revoke_reason(self, revoke_reason): \"\"\"Sets the", "of this ShowCertificateResponse. :rtype: list[Authentification] \"\"\" return self._authentification @authentification.setter def authentification(self, authentification): \"\"\"Sets", "\"\"\" self._wildcard_count = wildcard_count @property def authentification(self): \"\"\"Gets the authentification of this ShowCertificateResponse.", "ShowCertificateResponse. :type: int \"\"\" self._wildcard_count = wildcard_count @property def authentification(self): \"\"\"Gets the authentification", "\"\"\" return self._domain_type @domain_type.setter def domain_type(self, domain_type): \"\"\"Sets the domain_type of this ShowCertificateResponse.", "域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method of this ShowCertificateResponse. :rtype: str \"\"\" return self._validation_method @validation_method.setter", "} attribute_map = { 'id': 'id', 'status': 'status', 'order_id': 'order_id', 'name': 'name', 'type':", "= domain_count if wildcard_count is not None: self.wildcard_count = wildcard_count if authentification is", "ShowCertificateResponse. :type: str \"\"\" self._domain_type = domain_type @property def domain(self): \"\"\"Gets the domain", "@revoke_reason.setter def revoke_reason(self, revoke_reason): \"\"\"Sets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :param revoke_reason:", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._name @name.setter def name(self, name): \"\"\"Sets", "证书绑定的附加域名信息。 :param sans: The sans of this ShowCertificateResponse. :type: str \"\"\" self._sans =", "- REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The", "ShowCertificateResponse. :rtype: str \"\"\" return self._revoke_reason @revoke_reason.setter def revoke_reason(self, revoke_reason): \"\"\"Sets the revoke_reason", "the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand of this ShowCertificateResponse.", "- CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status of this ShowCertificateResponse. :rtype: str \"\"\" return self._status", "\"\"\"Returns the model properties as a dict\"\"\" result = {} for attr, _", "self._not_after @not_after.setter def not_after(self, not_after): \"\"\"Sets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param", "domain_type: The domain_type of this ShowCertificateResponse. :type: str \"\"\" self._domain_type = domain_type @property", "this ShowCertificateResponse. 订单id。 :param order_id: The order_id of this ShowCertificateResponse. :type: str \"\"\"", "not None: self.domain = domain if sans is not None: self.sans = sans", "issue_time is not None: self.issue_time = issue_time if not_before is not None: self.not_before", "this ShowCertificateResponse. :rtype: str \"\"\" return self._brand @brand.setter def brand(self, brand): \"\"\"Sets the", ":rtype: str \"\"\" return self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self, signature_algrithm): \"\"\"Sets the signature_algrithm of", "is not None: self.push_support = push_support if revoke_reason is not None: self.revoke_reason =", ":return: The issue_time of this ShowCertificateResponse. :rtype: str \"\"\" return self._issue_time @issue_time.setter def", "\"to_dict\") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = \"****\"", "None: self.domain = domain if sans is not None: self.sans = sans if", "def revoke_reason(self, revoke_reason): \"\"\"Sets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :param revoke_reason: The", "self._issue_time = issue_time @property def not_before(self): \"\"\"Gets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。", "WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param domain_type: The domain_type of this ShowCertificateResponse. :type: str \"\"\"", "json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): \"\"\"For `print`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns true", "reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): \"\"\"For `print`\"\"\" return self.to_str() def __eq__(self,", "push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None,", "this ShowCertificateResponse. :rtype: str \"\"\" return self._sans @sans.setter def sans(self, sans): \"\"\"Sets the", "def sans(self, sans): \"\"\"Sets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans: The", "validity_period: The validity_period of this ShowCertificateResponse. :type: int \"\"\" self._validity_period = validity_period @property", "the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before: The not_before of this ShowCertificateResponse.", "def __repr__(self): \"\"\"For `print`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns true if both", "'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count', 'authentification': 'authentification' } def __init__(self, id=None, status=None, order_id=None, name=None,", "issue_time @property def not_before(self): \"\"\"Gets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The", "ShowCertificateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self, status): \"\"\"Sets the status", "= None self._validation_method = None self._domain_type = None self._domain = None self._sans =", "self._brand = brand @property def push_support(self): \"\"\"Gets the push_support of this ShowCertificateResponse. 证书是否支持推送。", "\"\"\"Gets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :return: The push_support of this ShowCertificateResponse.", ":type: str \"\"\" self._name = name @property def type(self): \"\"\"Gets the type of", "None: self.wildcard_count = wildcard_count if authentification is not None: self.authentification = authentification @property", "the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after: The not_after of this ShowCertificateResponse.", "self._authentification @authentification.setter def authentification(self, authentification): \"\"\"Sets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param", "status is not None: self.status = status if order_id is not None: self.order_id", "订单id。 :param order_id: The order_id of this ShowCertificateResponse. :type: str \"\"\" self._order_id =", "The brand of this ShowCertificateResponse. :type: str \"\"\" self._brand = brand @property def", "domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count: The domain_count of this ShowCertificateResponse. :type:", "self._not_before = not_before @property def not_after(self): \"\"\"Gets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。", "wildcard_count of this ShowCertificateResponse. :type: int \"\"\" self._wildcard_count = wildcard_count @property def authentification(self):", "this ShowCertificateResponse. :type: int \"\"\" self._wildcard_count = wildcard_count @property def authentification(self): \"\"\"Gets the", "the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The brand of this", "self._wildcard_count = wildcard_count @property def authentification(self): \"\"\"Gets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。", "_ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map(", "ShowCertificateResponse. :type: str \"\"\" self._id = id @property def status(self): \"\"\"Gets the status", "def status(self): \"\"\"Gets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。", "'validity_period': 'validity_period', 'validation_method': 'validation_method', 'domain_type': 'domain_type', 'domain': 'domain', 'sans': 'sans', 'domain_count': 'domain_count', 'wildcard_count':", "str \"\"\" return self._issue_time @issue_time.setter def issue_time(self, issue_time): \"\"\"Sets the issue_time of this", "the order_id of this ShowCertificateResponse. 订单id。 :param order_id: The order_id of this ShowCertificateResponse.", "ShowCertificateResponse. :rtype: str \"\"\" return self._brand @brand.setter def brand(self, brand): \"\"\"Sets the brand", "self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns true if both objects are not", "- UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status of this ShowCertificateResponse. :rtype:", "if domain is not None: self.domain = domain if sans is not None:", "isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,", ":rtype: str \"\"\" return self._order_id @order_id.setter def order_id(self, order_id): \"\"\"Sets the order_id of", "not None: self.name = name if type is not None: self.type = type", "def status(self, status): \"\"\"Sets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 -", "is attribute type. attribute_map (dict): The key is attribute name and the value", ":rtype: str \"\"\" return self._name @name.setter def name(self, name): \"\"\"Sets the name of", "@property def issue_time(self): \"\"\"Gets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The issue_time", "__init__(self, id=None, status=None, order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None,", "this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand of this ShowCertificateResponse. :rtype: str \"\"\"", "this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after: The not_after of this ShowCertificateResponse. :type: str \"\"\"", "json if six.PY2: import sys reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): \"\"\"For", "domain_type @property def domain(self): \"\"\"Gets the domain of this ShowCertificateResponse. 证书绑定域名。 :return: The", "def name(self): \"\"\"Gets the name of this ShowCertificateResponse. 证书名称。 :return: The name of", "self.to_str() def __eq__(self, other): \"\"\"Returns true if both objects are equal\"\"\" if not", "The signature_algrithm of this ShowCertificateResponse. :rtype: str \"\"\" return self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self,", "is not None: self.name = name if type is not None: self.type =", "if signature_algrithm is not None: self.signature_algrithm = signature_algrithm if issue_time is not None:", "this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method of this ShowCertificateResponse. :rtype: str \"\"\" return", "\"\"\"Sets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The type", "\"\"\" self._domain = domain @property def sans(self): \"\"\"Gets the sans of this ShowCertificateResponse.", "= validation_method @property def domain_type(self): \"\"\"Gets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: -", "this ShowCertificateResponse. 订单id。 :return: The order_id of this ShowCertificateResponse. :rtype: str \"\"\" return", "str \"\"\" self._brand = brand @property def push_support(self): \"\"\"Gets the push_support of this", "str \"\"\" return self._not_before @not_before.setter def not_before(self, not_before): \"\"\"Sets the not_before of this", "the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The validation_method of this ShowCertificateResponse.", "of this ShowCertificateResponse. :rtype: int \"\"\" return self._validity_period @validity_period.setter def validity_period(self, validity_period): \"\"\"Sets", "'order_id', 'name': 'name', 'type': 'type', 'brand': 'brand', 'push_support': 'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm',", "not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after: The not_after of this ShowCertificateResponse. :type:", "def not_before(self): \"\"\"Gets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The not_before of", "self._order_id @order_id.setter def order_id(self, order_id): \"\"\"Sets the order_id of this ShowCertificateResponse. 订单id。 :param", "this ShowCertificateResponse. 证书吊销原因。 :return: The revoke_reason of this ShowCertificateResponse. :rtype: str \"\"\" return", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._brand @brand.setter def brand(self, brand): \"\"\"Sets", "证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The type of this ShowCertificateResponse. :type: str \"\"\"", "not None: self.domain_count = domain_count if wildcard_count is not None: self.wildcard_count = wildcard_count", "self.domain_type = domain_type if domain is not None: self.domain = domain if sans", "revoke_reason if signature_algrithm is not None: self.signature_algrithm = signature_algrithm if issue_time is not", "item, value.items() )) else: if attr in self.sensitive_list: result[attr] = \"****\" else: result[attr]", "of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The authentification of this ShowCertificateResponse. :type: list[Authentification]", "of this ShowCertificateResponse. 签名算法。 :return: The signature_algrithm of this ShowCertificateResponse. :rtype: str \"\"\"", "= { 'id': 'id', 'status': 'status', 'order_id': 'order_id', 'name': 'name', 'type': 'type', 'brand':", "- PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 -", "The domain of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain @domain.setter def domain(self,", "value is json key in definition. \"\"\" sensitive_list = [] openapi_types = {", "The key is attribute name and the value is attribute type. attribute_map (dict):", "is not None: self.revoke_reason = revoke_reason if signature_algrithm is not None: self.signature_algrithm =", "= None self._type = None self._brand = None self._push_support = None self._revoke_reason =", ":rtype: str \"\"\" return self._push_support @push_support.setter def push_support(self, push_support): \"\"\"Sets the push_support of", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._order_id @order_id.setter def order_id(self, order_id): \"\"\"Sets", ":rtype: list[Authentification] \"\"\" return self._authentification @authentification.setter def authentification(self, authentification): \"\"\"Sets the authentification of", "lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else: if", "validity_period of this ShowCertificateResponse. :rtype: int \"\"\" return self._validity_period @validity_period.setter def validity_period(self, validity_period):", "= None self._not_before = None self._not_after = None self._validity_period = None self._validation_method =", "@property def signature_algrithm(self): \"\"\"Gets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :return: The signature_algrithm", "self._status @status.setter def status(self, status): \"\"\"Sets the status of this ShowCertificateResponse. 证书状态。取值如下: -", "int \"\"\" return self._validity_period @validity_period.setter def validity_period(self, validity_period): \"\"\"Sets the validity_period of this", "None self._issue_time = None self._not_before = None self._not_after = None self._validity_period = None", "'validation_method': 'validation_method', 'domain_type': 'domain_type', 'domain': 'domain', 'sans': 'sans', 'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count', 'authentification':", ":type: str \"\"\" self._not_after = not_after @property def validity_period(self): \"\"\"Gets the validity_period of", "= None self._signature_algrithm = None self._issue_time = None self._not_before = None self._not_after =", "- EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param", "six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x:", "The order_id of this ShowCertificateResponse. :type: str \"\"\" self._order_id = order_id @property def", "ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The not_after of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_after", "self._domain = domain @property def sans(self): \"\"\"Gets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。", "of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return: The domain_type", "None: self.status = status if order_id is not None: self.order_id = order_id if", "\"\"\"Returns true if both objects are not equal\"\"\" return not self == other", "\"\"\" return self._not_before @not_before.setter def not_before(self, not_before): \"\"\"Sets the not_before of this ShowCertificateResponse.", "ShowCertificateResponse. 证书可绑定域名个数。 :return: The domain_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._domain_count", "import SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types (dict): The", "brand of this ShowCertificateResponse. :rtype: str \"\"\" return self._brand @brand.setter def brand(self, brand):", "this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The authentification of this ShowCertificateResponse. :type: list[Authentification] \"\"\"", "@id.setter def id(self, id): \"\"\"Sets the id of this ShowCertificateResponse. 证书id。 :param id:", "ShowCertificateResponse. :type: list[Authentification] \"\"\" self._authentification = authentification def to_dict(self): \"\"\"Returns the model properties", ":type: str \"\"\" self._not_before = not_before @property def not_after(self): \"\"\"Gets the not_after of", "def push_support(self): \"\"\"Gets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :return: The push_support of", "GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand of this ShowCertificateResponse. :rtype: str \"\"\" return self._brand @brand.setter", "self.not_before = not_before if not_after is not None: self.not_after = not_after if validity_period", "string representation of the model\"\"\" import simplejson as json if six.PY2: import sys", ":param order_id: The order_id of this ShowCertificateResponse. :type: str \"\"\" self._order_id = order_id", "订单id。 :return: The order_id of this ShowCertificateResponse. :rtype: str \"\"\" return self._order_id @order_id.setter", "of this ShowCertificateResponse. :type: str \"\"\" self._brand = brand @property def push_support(self): \"\"\"Gets", "sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types (dict): The key is attribute name and", "revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :param revoke_reason: The revoke_reason of this ShowCertificateResponse. :type:", "\"\"\"Gets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The not_before of this ShowCertificateResponse.", "ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types (dict): The key is attribute name and the value", "wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The wildcard_count of this ShowCertificateResponse. :rtype: int", "value is attribute type. attribute_map (dict): The key is attribute name and the", "The id of this ShowCertificateResponse. :type: str \"\"\" self._id = id @property def", "int \"\"\" return self._domain_count @domain_count.setter def domain_count(self, domain_count): \"\"\"Sets the domain_count of this", "ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand of this ShowCertificateResponse. :rtype: str \"\"\" return", "域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The validation_method of this ShowCertificateResponse. :type: str \"\"\" self._validation_method =", "domain of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain @domain.setter def domain(self, domain):", "status of this ShowCertificateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self, status):", "is not None: self.id = id if status is not None: self.status =", "import re import six from huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class", "The validation_method of this ShowCertificateResponse. :rtype: str \"\"\" return self._validation_method @validation_method.setter def validation_method(self,", "ShowCertificateResponse. :type: str \"\"\" self._signature_algrithm = signature_algrithm @property def issue_time(self): \"\"\"Gets the issue_time", "`print`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns true if both objects are equal\"\"\"", "@property def domain_type(self): \"\"\"Gets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 -", "@property def push_support(self): \"\"\"Gets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :return: The push_support", "str \"\"\" self._not_before = not_before @property def not_after(self): \"\"\"Gets the not_after of this", "def not_before(self, not_before): \"\"\"Sets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before: The", "def domain_count(self): \"\"\"Gets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :return: The domain_count of", "证书生效时间,没有获取到有效值时为空。 :return: The not_before of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_before @not_before.setter", ":param wildcard_count: The wildcard_count of this ShowCertificateResponse. :type: int \"\"\" self._wildcard_count = wildcard_count", "value.items() )) else: if attr in self.sensitive_list: result[attr] = \"****\" else: result[attr] =", "REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status of", "ShowCertificateResponse. :type: str \"\"\" self._status = status @property def order_id(self): \"\"\"Gets the order_id", "self._revoke_reason @revoke_reason.setter def revoke_reason(self, revoke_reason): \"\"\"Sets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :param", "self._push_support @push_support.setter def push_support(self, push_support): \"\"\"Sets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :param", "not None: self.not_before = not_before if not_after is not None: self.not_after = not_after", "The revoke_reason of this ShowCertificateResponse. :rtype: str \"\"\" return self._revoke_reason @revoke_reason.setter def revoke_reason(self,", "= validity_period if validation_method is not None: self.validation_method = validation_method if domain_type is", "def sans(self): \"\"\"Gets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The sans of", "attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] =", "\"\"\"Returns true if both objects are equal\"\"\" if not isinstance(other, ShowCertificateResponse): return False", "None self._not_before = None self._not_after = None self._validity_period = None self._validation_method = None", "= None self._wildcard_count = None self._authentification = None self.discriminator = None if id", "\"****\" else: result[attr] = value return result def to_str(self): \"\"\"Returns the string representation", "key is attribute name and the value is json key in definition. \"\"\"", "int \"\"\" self._validity_period = validity_period @property def validation_method(self): \"\"\"Gets the validation_method of this", "if issue_time is not None: self.issue_time = issue_time if not_before is not None:", "other): \"\"\"Returns true if both objects are equal\"\"\" if not isinstance(other, ShowCertificateResponse): return", "str \"\"\" self._type = type @property def brand(self): \"\"\"Gets the brand of this", "this ShowCertificateResponse. :rtype: str \"\"\" return self._order_id @order_id.setter def order_id(self, order_id): \"\"\"Sets the", "validation_method if domain_type is not None: self.domain_type = domain_type if domain is not", "the name of this ShowCertificateResponse. 证书名称。 :param name: The name of this ShowCertificateResponse.", "domain @property def sans(self): \"\"\"Gets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The", "@type.setter def type(self, type): \"\"\"Sets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。", "ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The brand of this ShowCertificateResponse. :type: str \"\"\"", "of this ShowCertificateResponse. 证书有效期,按月为单位。 :return: The validity_period of this ShowCertificateResponse. :rtype: int \"\"\"", "authentification): \"\"\"Sets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The authentification of", "'name', 'type': 'type', 'brand': 'brand', 'push_support': 'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time',", "this ShowCertificateResponse. :rtype: int \"\"\" return self._wildcard_count @wildcard_count.setter def wildcard_count(self, wildcard_count): \"\"\"Sets the", ":type: list[Authentification] \"\"\" self._authentification = authentification def to_dict(self): \"\"\"Returns the model properties as", "ShowCertificateResponse. :type: str \"\"\" self._validation_method = validation_method @property def domain_type(self): \"\"\"Gets the domain_type", "not_before if not_after is not None: self.not_after = not_after if validity_period is not", "wildcard_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._wildcard_count @wildcard_count.setter def wildcard_count(self, wildcard_count):", "is not None: self.validation_method = validation_method if domain_type is not None: self.domain_type =", "\"\"\"Gets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The issue_time of this ShowCertificateResponse.", "ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The not_before of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_before", "of this ShowCertificateResponse. :type: str \"\"\" self._status = status @property def order_id(self): \"\"\"Gets", "domain): \"\"\"Sets the domain of this ShowCertificateResponse. 证书绑定域名。 :param domain: The domain of", "of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification of this ShowCertificateResponse. :rtype: list[Authentification] \"\"\"", "is json key in definition. \"\"\" sensitive_list = [] openapi_types = { 'id':", "of this ShowCertificateResponse. :type: str \"\"\" self._domain = domain @property def sans(self): \"\"\"Gets", ":param status: The status of this ShowCertificateResponse. :type: str \"\"\" self._status = status", ":return: The validation_method of this ShowCertificateResponse. :rtype: str \"\"\" return self._validation_method @validation_method.setter def", ":rtype: str \"\"\" return self._not_after @not_after.setter def not_after(self, not_after): \"\"\"Sets the not_after of", "the signature_algrithm of this ShowCertificateResponse. 签名算法。 :return: The signature_algrithm of this ShowCertificateResponse. :rtype:", "return self._not_after @not_after.setter def not_after(self, not_after): \"\"\"Sets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。", "None: self.domain_type = domain_type if domain is not None: self.domain = domain if", "of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time: The issue_time of this ShowCertificateResponse. :type: str", "validity_period is not None: self.validity_period = validity_period if validation_method is not None: self.validation_method", "证书id。 :param id: The id of this ShowCertificateResponse. :type: str \"\"\" self._id =", "authentification def to_dict(self): \"\"\"Returns the model properties as a dict\"\"\" result = {}", "hasattr(item[1], \"to_dict\") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] =", "None self._domain_count = None self._wildcard_count = None self._authentification = None self.discriminator = None", "'str', 'validity_period': 'int', 'validation_method': 'str', 'domain_type': 'str', 'domain': 'str', 'sans': 'str', 'domain_count': 'int',", "this ShowCertificateResponse. :type: str \"\"\" self._id = id @property def status(self): \"\"\"Gets the", "other.__dict__ def __ne__(self, other): \"\"\"Returns true if both objects are not equal\"\"\" return", "revoke_reason(self): \"\"\"Gets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :return: The revoke_reason of this", "not None: self.push_support = push_support if revoke_reason is not None: self.revoke_reason = revoke_reason", "{ 'id': 'id', 'status': 'status', 'order_id': 'order_id', 'name': 'name', 'type': 'type', 'brand': 'brand',", "signature_algrithm if issue_time is not None: self.issue_time = issue_time if not_before is not", "not_after of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_after @not_after.setter def not_after(self, not_after):", "\"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0],", "wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse - a model defined in huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id", "issue_time(self): \"\"\"Gets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The issue_time of this", "of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after: The not_after of this ShowCertificateResponse. :type: str", "ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The issue_time of this ShowCertificateResponse. :rtype: str \"\"\" return self._issue_time", "order_id): \"\"\"Sets the order_id of this ShowCertificateResponse. 订单id。 :param order_id: The order_id of", "= type @property def brand(self): \"\"\"Gets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。", "not_after): \"\"\"Sets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after: The not_after of", "ShowCertificateResponse. :rtype: str \"\"\" return self._not_before @not_before.setter def not_before(self, not_before): \"\"\"Sets the not_before", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._validation_method @validation_method.setter def validation_method(self, validation_method): \"\"\"Sets", "this ShowCertificateResponse. 签名算法。 :param signature_algrithm: The signature_algrithm of this ShowCertificateResponse. :type: str \"\"\"", "ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The wildcard_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._wildcard_count", "= value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if", "\"\"\"Sets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count: The wildcard_count of this", "- MULTI_DOMAIN:多域名 :param domain_type: The domain_type of this ShowCertificateResponse. :type: str \"\"\" self._domain_type", "result def to_str(self): \"\"\"Returns the string representation of the model\"\"\" import simplejson as", "not_after of this ShowCertificateResponse. :type: str \"\"\" self._not_after = not_after @property def validity_period(self):", "return self._authentification @authentification.setter def authentification(self, authentification): \"\"\"Sets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。", "the domain of this ShowCertificateResponse. 证书绑定域名。 :return: The domain of this ShowCertificateResponse. :rtype:", "\"\"\" return self._type @type.setter def type(self, type): \"\"\"Sets the type of this ShowCertificateResponse.", "SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param domain_type: The domain_type of this ShowCertificateResponse. :type:", "sans of this ShowCertificateResponse. :rtype: str \"\"\" return self._sans @sans.setter def sans(self, sans):", "if brand is not None: self.brand = brand if push_support is not None:", "in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda", ":param validation_method: The validation_method of this ShowCertificateResponse. :type: str \"\"\" self._validation_method = validation_method", "- REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status", "@property def name(self): \"\"\"Gets the name of this ShowCertificateResponse. 证书名称。 :return: The name", "self._domain_type @domain_type.setter def domain_type(self, domain_type): \"\"\"Sets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: -", "= revoke_reason @property def signature_algrithm(self): \"\"\"Gets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :return:", "result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if", "= dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() ))", "MULTI_DOMAIN:多域名 :param domain_type: The domain_type of this ShowCertificateResponse. :type: str \"\"\" self._domain_type =", "signature_algrithm): \"\"\"Sets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :param signature_algrithm: The signature_algrithm of", "id(self, id): \"\"\"Sets the id of this ShowCertificateResponse. 证书id。 :param id: The id", "the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period: The validity_period of this ShowCertificateResponse.", "\"\"\" self._signature_algrithm = signature_algrithm @property def issue_time(self): \"\"\"Gets the issue_time of this ShowCertificateResponse.", "the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification of this ShowCertificateResponse. :rtype:", "this ShowCertificateResponse. 证书有效期,按月为单位。 :return: The validity_period of this ShowCertificateResponse. :rtype: int \"\"\" return", "signature_algrithm of this ShowCertificateResponse. 签名算法。 :param signature_algrithm: The signature_algrithm of this ShowCertificateResponse. :type:", "'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time', 'not_before': 'not_before', 'not_after': 'not_after', 'validity_period': 'validity_period', 'validation_method': 'validation_method',", "def to_dict(self): \"\"\"Returns the model properties as a dict\"\"\" result = {} for", "this ShowCertificateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self, id): \"\"\"Sets the", "\"\"\" Attributes: openapi_types (dict): The key is attribute name and the value is", "this ShowCertificateResponse. :rtype: list[Authentification] \"\"\" return self._authentification @authentification.setter def authentification(self, authentification): \"\"\"Sets the", ":rtype: str \"\"\" return self._domain @domain.setter def domain(self, domain): \"\"\"Sets the domain of", "this ShowCertificateResponse. :rtype: str \"\"\" return self._name @name.setter def name(self, name): \"\"\"Sets the", "issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The issue_time of this ShowCertificateResponse. :rtype: str", "brand): \"\"\"Sets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The brand", "The domain_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._domain_count @domain_count.setter def domain_count(self,", "self._domain = None self._sans = None self._domain_count = None self._wildcard_count = None self._authentification", "validation_method): \"\"\"Sets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The validation_method of", "= None self._domain_count = None self._wildcard_count = None self._authentification = None self.discriminator =", "GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The brand of this ShowCertificateResponse. :type: str \"\"\" self._brand =", ":param domain_type: The domain_type of this ShowCertificateResponse. :type: str \"\"\" self._domain_type = domain_type", "None self._signature_algrithm = None self._issue_time = None self._not_before = None self._not_after = None", "= None self._validity_period = None self._validation_method = None self._domain_type = None self._domain =", "\"\"\"Sets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans: The sans of this", "ShowCertificateResponse. :rtype: int \"\"\" return self._validity_period @validity_period.setter def validity_period(self, validity_period): \"\"\"Sets the validity_period", "self._validation_method = validation_method @property def domain_type(self): \"\"\"Gets the domain_type of this ShowCertificateResponse. 域名类型,取值如下:", "'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time', 'not_before': 'not_before', 'not_after': 'not_after', 'validity_period': 'validity_period',", "str \"\"\" return self._validation_method @validation_method.setter def validation_method(self, validation_method): \"\"\"Sets the validation_method of this", "domain_count(self): \"\"\"Gets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :return: The domain_count of this", "domain_type is not None: self.domain_type = domain_type if domain is not None: self.domain", "'order_id': 'order_id', 'name': 'name', 'type': 'type', 'brand': 'brand', 'push_support': 'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm':", "x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr]", "- WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param domain_type: The domain_type of this ShowCertificateResponse. :type: str", "'status': 'status', 'order_id': 'order_id', 'name': 'name', 'type': 'type', 'brand': 'brand', 'push_support': 'push_support', 'revoke_reason':", ":return: The domain_type of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain_type @domain_type.setter def", "and the value is attribute type. attribute_map (dict): The key is attribute name", "- a model defined in huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id = None self._status", "SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The status of this ShowCertificateResponse. :type: str \"\"\"", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self, signature_algrithm): \"\"\"Sets", "'str', 'brand': 'str', 'push_support': 'str', 'revoke_reason': 'str', 'signature_algrithm': 'str', 'issue_time': 'str', 'not_before': 'str',", "@property def brand(self): \"\"\"Gets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The", "domain_count @property def wildcard_count(self): \"\"\"Gets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The", "= push_support @property def revoke_reason(self): \"\"\"Gets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :return:", ":param revoke_reason: The revoke_reason of this ShowCertificateResponse. :type: str \"\"\" self._revoke_reason = revoke_reason", "not None: self.authentification = authentification @property def id(self): \"\"\"Gets the id of this", "def order_id(self, order_id): \"\"\"Sets the order_id of this ShowCertificateResponse. 订单id。 :param order_id: The", "str \"\"\" self._push_support = push_support @property def revoke_reason(self): \"\"\"Gets the revoke_reason of this", "type): \"\"\"Sets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The", "this ShowCertificateResponse. :type: str \"\"\" self._sans = sans @property def domain_count(self): \"\"\"Gets the", "item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else: if attr in self.sensitive_list:", "self.order_id = order_id if name is not None: self.name = name if type", "domain of this ShowCertificateResponse. :type: str \"\"\" self._domain = domain @property def sans(self):", "push_support(self): \"\"\"Gets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :return: The push_support of this", "None self._name = None self._type = None self._brand = None self._push_support = None", "this ShowCertificateResponse. :rtype: str \"\"\" return self._domain @domain.setter def domain(self, domain): \"\"\"Sets the", "ShowCertificateResponse. :rtype: str \"\"\" return self._type @type.setter def type(self, type): \"\"\"Sets the type", "this ShowCertificateResponse. :type: int \"\"\" self._domain_count = domain_count @property def wildcard_count(self): \"\"\"Gets the", "authentification=None): \"\"\"ShowCertificateResponse - a model defined in huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id =", "@property def not_after(self): \"\"\"Gets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The not_after", "not_after is not None: self.not_after = not_after if validity_period is not None: self.validity_period", "of this ShowCertificateResponse. :type: str \"\"\" self._sans = sans @property def domain_count(self): \"\"\"Gets", ":rtype: str \"\"\" return self._domain_type @domain_type.setter def domain_type(self, domain_type): \"\"\"Sets the domain_type of", "of this ShowCertificateResponse. 订单id。 :param order_id: The order_id of this ShowCertificateResponse. :type: str", "\"\"\" self._push_support = push_support @property def revoke_reason(self): \"\"\"Gets the revoke_reason of this ShowCertificateResponse.", ":param validity_period: The validity_period of this ShowCertificateResponse. :type: int \"\"\" self._validity_period = validity_period", "return self._issue_time @issue_time.setter def issue_time(self, issue_time): \"\"\"Sets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。", "\"\"\"Sets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time: The issue_time of this", "order_id of this ShowCertificateResponse. :type: str \"\"\" self._order_id = order_id @property def name(self):", "self._domain @domain.setter def domain(self, domain): \"\"\"Sets the domain of this ShowCertificateResponse. 证书绑定域名。 :param", "self._order_id = None self._name = None self._type = None self._brand = None self._push_support", "'sans': 'sans', 'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count', 'authentification': 'authentification' } def __init__(self, id=None, status=None,", "the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The authentification of this ShowCertificateResponse.", "self._signature_algrithm = signature_algrithm @property def issue_time(self): \"\"\"Gets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。", "证书可绑定域名个数。 :return: The domain_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._domain_count @domain_count.setter", "six.PY2: import sys reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): \"\"\"For `print`\"\"\" return", "\"\"\"Sets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :param signature_algrithm: The signature_algrithm of this", "not None: self.signature_algrithm = signature_algrithm if issue_time is not None: self.issue_time = issue_time", ":return: The brand of this ShowCertificateResponse. :rtype: str \"\"\" return self._brand @brand.setter def", "this ShowCertificateResponse. :rtype: str \"\"\" return self._issue_time @issue_time.setter def issue_time(self, issue_time): \"\"\"Sets the", "the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type of this", "revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None):", "str \"\"\" self._sans = sans @property def domain_count(self): \"\"\"Gets the domain_count of this", "result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict())", "if not_before is not None: self.not_before = not_before if not_after is not None:", "'not_before', 'not_after': 'not_after', 'validity_period': 'validity_period', 'validation_method': 'validation_method', 'domain_type': 'domain_type', 'domain': 'domain', 'sans': 'sans',", "- REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status of this", "order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None,", "None self._revoke_reason = None self._signature_algrithm = None self._issue_time = None self._not_before = None", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._issue_time @issue_time.setter def issue_time(self, issue_time): \"\"\"Sets", "None: self.sans = sans if domain_count is not None: self.domain_count = domain_count if", "return self._revoke_reason @revoke_reason.setter def revoke_reason(self, revoke_reason): \"\"\"Sets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。", "CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status of this ShowCertificateResponse. :rtype: str \"\"\" return self._status @status.setter", "attribute_map (dict): The key is attribute name and the value is json key", "type. attribute_map (dict): The key is attribute name and the value is json", "huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types (dict):", "of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type of this ShowCertificateResponse. :rtype:", "value return result def to_str(self): \"\"\"Returns the string representation of the model\"\"\" import", "authentification(self): \"\"\"Gets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification of this", "name of this ShowCertificateResponse. :rtype: str \"\"\" return self._name @name.setter def name(self, name):", "isinstance(other, ShowCertificateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns true", "self._issue_time = None self._not_before = None self._not_after = None self._validity_period = None self._validation_method", "signature_algrithm is not None: self.signature_algrithm = signature_algrithm if issue_time is not None: self.issue_time", "\"\"\"Sets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :param revoke_reason: The revoke_reason of this", "self.validity_period = validity_period if validation_method is not None: self.validation_method = validation_method if domain_type", "'not_before': 'str', 'not_after': 'str', 'validity_period': 'int', 'validation_method': 'str', 'domain_type': 'str', 'domain': 'str', 'sans':", "name): \"\"\"Sets the name of this ShowCertificateResponse. 证书名称。 :param name: The name of", ":rtype: str \"\"\" return self._brand @brand.setter def brand(self, brand): \"\"\"Sets the brand of", "str \"\"\" return self._id @id.setter def id(self, id): \"\"\"Sets the id of this", "str \"\"\" self._id = id @property def status(self): \"\"\"Gets the status of this", "证书可绑定附加域名个数。 :param wildcard_count: The wildcard_count of this ShowCertificateResponse. :type: int \"\"\" self._wildcard_count =", ":return: The id of this ShowCertificateResponse. :rtype: str \"\"\" return self._id @id.setter def", "as json if six.PY2: import sys reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self):", "the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The type of", "@issue_time.setter def issue_time(self, issue_time): \"\"\"Sets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time:", ":type: int \"\"\" self._wildcard_count = wildcard_count @property def authentification(self): \"\"\"Gets the authentification of", "if authentification is not None: self.authentification = authentification @property def id(self): \"\"\"Gets the", "None self._sans = None self._domain_count = None self._wildcard_count = None self._authentification = None", "self._domain_type = None self._domain = None self._sans = None self._domain_count = None self._wildcard_count", "simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def", "DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The type of this ShowCertificateResponse. :type: str \"\"\" self._type", "of this ShowCertificateResponse. :rtype: int \"\"\" return self._wildcard_count @wildcard_count.setter def wildcard_count(self, wildcard_count): \"\"\"Sets", "= not_before if not_after is not None: self.not_after = not_after if validity_period is", "The authentification of this ShowCertificateResponse. :type: list[Authentification] \"\"\" self._authentification = authentification def to_dict(self):", "def authentification(self): \"\"\"Gets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification of", "@validation_method.setter def validation_method(self, validation_method): \"\"\"Sets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method:", "= None self._not_after = None self._validity_period = None self._validation_method = None self._domain_type =", "of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param domain_type: The", "'push_support': 'str', 'revoke_reason': 'str', 'signature_algrithm': 'str', 'issue_time': 'str', 'not_before': 'str', 'not_after': 'str', 'validity_period':", "'signature_algrithm': 'signature_algrithm', 'issue_time': 'issue_time', 'not_before': 'not_before', 'not_after': 'not_after', 'validity_period': 'validity_period', 'validation_method': 'validation_method', 'domain_type':", "a model defined in huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id = None self._status =", "push_support of this ShowCertificateResponse. :rtype: str \"\"\" return self._push_support @push_support.setter def push_support(self, push_support):", "'id', 'status': 'status', 'order_id': 'order_id', 'name': 'name', 'type': 'type', 'brand': 'brand', 'push_support': 'push_support',", "\"\"\"Gets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification of this ShowCertificateResponse.", "domain: The domain of this ShowCertificateResponse. :type: str \"\"\" self._domain = domain @property", "domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse - a model defined in huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__()", "None self._validation_method = None self._domain_type = None self._domain = None self._sans = None", "ShowCertificateResponse. :rtype: str \"\"\" return self._issue_time @issue_time.setter def issue_time(self, issue_time): \"\"\"Sets the issue_time", "def domain_type(self): \"\"\"Gets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符", "domain(self, domain): \"\"\"Sets the domain of this ShowCertificateResponse. 证书绑定域名。 :param domain: The domain", "is not None: self.type = type if brand is not None: self.brand =", "return result def to_str(self): \"\"\"Returns the string representation of the model\"\"\" import simplejson", "self._not_after = not_after @property def validity_period(self): \"\"\"Gets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。", "= issue_time if not_before is not None: self.not_before = not_before if not_after is", ":return: The domain of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain @domain.setter def", "'wildcard_count': 'int', 'authentification': 'list[Authentification]' } attribute_map = { 'id': 'id', 'status': 'status', 'order_id':", "objects are equal\"\"\" if not isinstance(other, ShowCertificateResponse): return False return self.__dict__ == other.__dict__", "None: self.push_support = push_support if revoke_reason is not None: self.revoke_reason = revoke_reason if", "this ShowCertificateResponse. 证书是否支持推送。 :param push_support: The push_support of this ShowCertificateResponse. :type: str \"\"\"", "def __eq__(self, other): \"\"\"Returns true if both objects are equal\"\"\" if not isinstance(other,", "= None self._brand = None self._push_support = None self._revoke_reason = None self._signature_algrithm =", "return self._domain_count @domain_count.setter def domain_count(self, domain_count): \"\"\"Sets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。", "ShowCertificateResponse. 证书吊销原因。 :param revoke_reason: The revoke_reason of this ShowCertificateResponse. :type: str \"\"\" self._revoke_reason", ":type: str \"\"\" self._signature_algrithm = signature_algrithm @property def issue_time(self): \"\"\"Gets the issue_time of", "not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return: The not_before of this ShowCertificateResponse. :rtype: str", "def __ne__(self, other): \"\"\"Returns true if both objects are not equal\"\"\" return not", ":rtype: str \"\"\" return self._sans @sans.setter def sans(self, sans): \"\"\"Sets the sans of", "\"\"\" self._brand = brand @property def push_support(self): \"\"\"Gets the push_support of this ShowCertificateResponse.", "name of this ShowCertificateResponse. 证书名称。 :param name: The name of this ShowCertificateResponse. :type:", "ShowCertificateResponse. 签名算法。 :param signature_algrithm: The signature_algrithm of this ShowCertificateResponse. :type: str \"\"\" self._signature_algrithm", "None: self.name = name if type is not None: self.type = type if", "@property def validity_period(self): \"\"\"Gets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :return: The validity_period", "ShowCertificateResponse. :rtype: str \"\"\" return self._validation_method @validation_method.setter def validation_method(self, validation_method): \"\"\"Sets the validation_method", ":type: int \"\"\" self._domain_count = domain_count @property def wildcard_count(self): \"\"\"Gets the wildcard_count of", "ShowCertificateResponse. 证书是否支持推送。 :param push_support: The push_support of this ShowCertificateResponse. :type: str \"\"\" self._push_support", "revoke_reason of this ShowCertificateResponse. :rtype: str \"\"\" return self._revoke_reason @revoke_reason.setter def revoke_reason(self, revoke_reason):", "- EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return:", "Attributes: openapi_types (dict): The key is attribute name and the value is attribute", "this ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The wildcard_count of this ShowCertificateResponse. :rtype: int \"\"\" return", "\"\"\" self._not_after = not_after @property def validity_period(self): \"\"\"Gets the validity_period of this ShowCertificateResponse.", "the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The sans of this ShowCertificateResponse. :rtype:", "ShowCertificateResponse. 证书绑定域名。 :return: The domain of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self, status): \"\"\"Sets", "if hasattr(item[1], \"to_dict\") else item, value.items() )) else: if attr in self.sensitive_list: result[attr]", "self._id = id @property def status(self): \"\"\"Gets the status of this ShowCertificateResponse. 证书状态。取值如下:", "ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period: The validity_period of this ShowCertificateResponse. :type: int \"\"\" self._validity_period", "in self.sensitive_list: result[attr] = \"****\" else: result[attr] = value return result def to_str(self):", "domain_count is not None: self.domain_count = domain_count if wildcard_count is not None: self.wildcard_count", "== other.__dict__ def __ne__(self, other): \"\"\"Returns true if both objects are not equal\"\"\"", "ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count: The wildcard_count of this ShowCertificateResponse. :type: int \"\"\" self._wildcard_count", "EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type of this ShowCertificateResponse. :rtype: str \"\"\" return self._type @type.setter", "validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse - a model defined", ":param signature_algrithm: The signature_algrithm of this ShowCertificateResponse. :type: str \"\"\" self._signature_algrithm = signature_algrithm", ":type: str \"\"\" self._issue_time = issue_time @property def not_before(self): \"\"\"Gets the not_before of", "signature_algrithm @property def issue_time(self): \"\"\"Gets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The", "sans is not None: self.sans = sans if domain_count is not None: self.domain_count", "= name if type is not None: self.type = type if brand is", "self.issue_time = issue_time if not_before is not None: self.not_before = not_before if not_after", "[] openapi_types = { 'id': 'str', 'status': 'str', 'order_id': 'str', 'name': 'str', 'type':", "def type(self): \"\"\"Gets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The", "the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :return: The revoke_reason of this ShowCertificateResponse. :rtype:", "@status.setter def status(self, status): \"\"\"Sets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。", "of this ShowCertificateResponse. :type: str \"\"\" self._type = type @property def brand(self): \"\"\"Gets", "str \"\"\" return self._brand @brand.setter def brand(self, brand): \"\"\"Sets the brand of this", "this ShowCertificateResponse. :rtype: str \"\"\" return self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self, signature_algrithm): \"\"\"Sets the", ":rtype: str \"\"\" return self._validation_method @validation_method.setter def validation_method(self, validation_method): \"\"\"Sets the validation_method of", "\"\"\"Gets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The not_after of this ShowCertificateResponse.", "push_support if revoke_reason is not None: self.revoke_reason = revoke_reason if signature_algrithm is not", "ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 -", ":param authentification: The authentification of this ShowCertificateResponse. :type: list[Authentification] \"\"\" self._authentification = authentification", "\"\"\" self._type = type @property def brand(self): \"\"\"Gets the brand of this ShowCertificateResponse.", "not None: self.brand = brand if push_support is not None: self.push_support = push_support", "ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The sans of this ShowCertificateResponse. :rtype: str \"\"\" return self._sans", ":rtype: int \"\"\" return self._validity_period @validity_period.setter def validity_period(self, validity_period): \"\"\"Sets the validity_period of", "validation_method: The validation_method of this ShowCertificateResponse. :type: str \"\"\" self._validation_method = validation_method @property", "id if status is not None: self.status = status if order_id is not", "None: self.not_before = not_before if not_after is not None: self.not_after = not_after if", "domain_type(self): \"\"\"Gets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 -", "push_support of this ShowCertificateResponse. :type: str \"\"\" self._push_support = push_support @property def revoke_reason(self):", "ShowCertificateResponse. :type: int \"\"\" self._domain_count = domain_count @property def wildcard_count(self): \"\"\"Gets the wildcard_count", "'wildcard_count', 'authentification': 'authentification' } def __init__(self, id=None, status=None, order_id=None, name=None, type=None, brand=None, push_support=None,", "The issue_time of this ShowCertificateResponse. :rtype: str \"\"\" return self._issue_time @issue_time.setter def issue_time(self,", "status if order_id is not None: self.order_id = order_id if name is not", "this ShowCertificateResponse. :type: int \"\"\" self._validity_period = validity_period @property def validation_method(self): \"\"\"Gets the", "ShowCertificateResponse. :type: str \"\"\" self._domain = domain @property def sans(self): \"\"\"Gets the sans", "- CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 -", "return self._not_before @not_before.setter def not_before(self, not_before): \"\"\"Sets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。", "def name(self, name): \"\"\"Sets the name of this ShowCertificateResponse. 证书名称。 :param name: The", "this ShowCertificateResponse. :rtype: str \"\"\" return self._push_support @push_support.setter def push_support(self, push_support): \"\"\"Sets the", "def domain(self): \"\"\"Gets the domain of this ShowCertificateResponse. 证书绑定域名。 :return: The domain of", "str \"\"\" return self._order_id @order_id.setter def order_id(self, order_id): \"\"\"Sets the order_id of this", "push_support(self, push_support): \"\"\"Sets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :param push_support: The push_support", "self._push_support = push_support @property def revoke_reason(self): \"\"\"Gets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。", "of this ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count: The wildcard_count of this ShowCertificateResponse. :type: int", ":type: str \"\"\" self._domain = domain @property def sans(self): \"\"\"Gets the sans of", "@brand.setter def brand(self, brand): \"\"\"Sets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param", "other): \"\"\"Returns true if both objects are not equal\"\"\" return not self ==", "revoke_reason is not None: self.revoke_reason = revoke_reason if signature_algrithm is not None: self.signature_algrithm", "The validation_method of this ShowCertificateResponse. :type: str \"\"\" self._validation_method = validation_method @property def", "if type is not None: self.type = type if brand is not None:", "id @property def status(self): \"\"\"Gets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。", "not_before @property def not_after(self): \"\"\"Gets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :return: The", "ShowCertificateResponse. :type: str \"\"\" self._issue_time = issue_time @property def not_before(self): \"\"\"Gets the not_before", "type is not None: self.type = type if brand is not None: self.brand", "self._revoke_reason = revoke_reason @property def signature_algrithm(self): \"\"\"Gets the signature_algrithm of this ShowCertificateResponse. 签名算法。", "return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns true if both objects are", "result[attr] = \"****\" else: result[attr] = value return result def to_str(self): \"\"\"Returns the", "PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。", "'id': 'str', 'status': 'str', 'order_id': 'str', 'name': 'str', 'type': 'str', 'brand': 'str', 'push_support':", "None: self.domain_count = domain_count if wildcard_count is not None: self.wildcard_count = wildcard_count if", "The wildcard_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._wildcard_count @wildcard_count.setter def wildcard_count(self,", "- UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The status of this ShowCertificateResponse.", "self.revoke_reason = revoke_reason if signature_algrithm is not None: self.signature_algrithm = signature_algrithm if issue_time", "\"\"\"Sets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The validation_method of this", "def __init__(self, id=None, status=None, order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None,", "\"\"\" return self._sans @sans.setter def sans(self, sans): \"\"\"Sets the sans of this ShowCertificateResponse.", "authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The authentification of this ShowCertificateResponse. :type:", "item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else: if attr", "'str', 'name': 'str', 'type': 'str', 'brand': 'str', 'push_support': 'str', 'revoke_reason': 'str', 'signature_algrithm': 'str',", "= authentification @property def id(self): \"\"\"Gets the id of this ShowCertificateResponse. 证书id。 :return:", "@property def type(self): \"\"\"Gets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return:", "of this ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period: The validity_period of this ShowCertificateResponse. :type: int", "the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param", "domain_count: The domain_count of this ShowCertificateResponse. :type: int \"\"\" self._domain_count = domain_count @property", "The sans of this ShowCertificateResponse. :type: str \"\"\" self._sans = sans @property def", "@domain_count.setter def domain_count(self, domain_count): \"\"\"Sets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count:", "this ShowCertificateResponse. :rtype: str \"\"\" return self._not_after @not_after.setter def not_after(self, not_after): \"\"\"Sets the", "int \"\"\" self._wildcard_count = wildcard_count @property def authentification(self): \"\"\"Gets the authentification of this", "= issue_time @property def not_before(self): \"\"\"Gets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :return:", "'list[Authentification]' } attribute_map = { 'id': 'id', 'status': 'status', 'order_id': 'order_id', 'name': 'name',", "push_support @property def revoke_reason(self): \"\"\"Gets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :return: The", "= None self.discriminator = None if id is not None: self.id = id", "The id of this ShowCertificateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self,", "def signature_algrithm(self): \"\"\"Gets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :return: The signature_algrithm of", "wildcard_count: The wildcard_count of this ShowCertificateResponse. :type: int \"\"\" self._wildcard_count = wildcard_count @property", "def domain(self, domain): \"\"\"Sets the domain of this ShowCertificateResponse. 证书绑定域名。 :param domain: The", "dict\"\"\" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr)", "ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before: The not_before of this ShowCertificateResponse. :type: str \"\"\" self._not_before", "the order_id of this ShowCertificateResponse. 订单id。 :return: The order_id of this ShowCertificateResponse. :rtype:", "'domain': 'str', 'sans': 'str', 'domain_count': 'int', 'wildcard_count': 'int', 'authentification': 'list[Authentification]' } attribute_map =", "The validity_period of this ShowCertificateResponse. :type: int \"\"\" self._validity_period = validity_period @property def", "if name is not None: self.name = name if type is not None:", "if both objects are equal\"\"\" if not isinstance(other, ShowCertificateResponse): return False return self.__dict__", "signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse", "@name.setter def name(self, name): \"\"\"Sets the name of this ShowCertificateResponse. 证书名称。 :param name:", "domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse - a model defined in huaweicloud", "ShowCertificateResponse. :rtype: str \"\"\" return self._sans @sans.setter def sans(self, sans): \"\"\"Sets the sans", ":type: str \"\"\" self._type = type @property def brand(self): \"\"\"Gets the brand of", "- SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param domain_type: The domain_type of this ShowCertificateResponse.", "= validation_method if domain_type is not None: self.domain_type = domain_type if domain is", "of this ShowCertificateResponse. :type: str \"\"\" self._domain_type = domain_type @property def domain(self): \"\"\"Gets", "the model properties as a dict\"\"\" result = {} for attr, _ in", "wildcard_count @property def authentification(self): \"\"\"Gets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The", "self._wildcard_count = None self._authentification = None self.discriminator = None if id is not", "this ShowCertificateResponse. :type: str \"\"\" self._status = status @property def order_id(self): \"\"\"Gets the", "order_id of this ShowCertificateResponse. 订单id。 :param order_id: The order_id of this ShowCertificateResponse. :type:", "'domain_type': 'str', 'domain': 'str', 'sans': 'str', 'domain_count': 'int', 'wildcard_count': 'int', 'authentification': 'list[Authentification]' }", "brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The brand of this ShowCertificateResponse.", "证书是否支持推送。 :return: The push_support of this ShowCertificateResponse. :rtype: str \"\"\" return self._push_support @push_support.setter", "ShowCertificateResponse. 订单id。 :return: The order_id of this ShowCertificateResponse. :rtype: str \"\"\" return self._order_id", "ShowCertificateResponse. :rtype: str \"\"\" return self._not_after @not_after.setter def not_after(self, not_after): \"\"\"Sets the not_after", "for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr]", ":param push_support: The push_support of this ShowCertificateResponse. :type: str \"\"\" self._push_support = push_support", "type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None,", "domain_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._domain_count @domain_count.setter def domain_count(self, domain_count):", "@property def id(self): \"\"\"Gets the id of this ShowCertificateResponse. 证书id。 :return: The id", "the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :param revoke_reason: The revoke_reason of this ShowCertificateResponse.", "the model\"\"\" import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding(\"utf-8\") return", "MULTI_DOMAIN:多域名 :return: The domain_type of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain_type @domain_type.setter", "\"\"\"Gets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :return: The validity_period of this ShowCertificateResponse.", "@sans.setter def sans(self, sans): \"\"\"Sets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans:", "证书绑定的附加域名信息。 :return: The sans of this ShowCertificateResponse. :rtype: str \"\"\" return self._sans @sans.setter", "order_id of this ShowCertificateResponse. 订单id。 :return: The order_id of this ShowCertificateResponse. :rtype: str", "is not None: self.issue_time = issue_time if not_before is not None: self.not_before =", ":return: The order_id of this ShowCertificateResponse. :rtype: str \"\"\" return self._order_id @order_id.setter def", "\"\"\"Gets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The sans of this ShowCertificateResponse.", "validity_period of this ShowCertificateResponse. :type: int \"\"\" self._validity_period = validity_period @property def validation_method(self):", "list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value", "domain_type if domain is not None: self.domain = domain if sans is not", "this ShowCertificateResponse. :rtype: str \"\"\" return self._domain_type @domain_type.setter def domain_type(self, domain_type): \"\"\"Sets the", "this ShowCertificateResponse. :rtype: str \"\"\" return self._validation_method @validation_method.setter def validation_method(self, validation_method): \"\"\"Sets the", "the name of this ShowCertificateResponse. 证书名称。 :return: The name of this ShowCertificateResponse. :rtype:", "attribute name and the value is attribute type. attribute_map (dict): The key is", "'not_after': 'not_after', 'validity_period': 'validity_period', 'validation_method': 'validation_method', 'domain_type': 'domain_type', 'domain': 'domain', 'sans': 'sans', 'domain_count':", "def brand(self, brand): \"\"\"Sets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand:", "return self._brand @brand.setter def brand(self, brand): \"\"\"Sets the brand of this ShowCertificateResponse. 证书品牌。取值如下:", "issue_time of this ShowCertificateResponse. :type: str \"\"\" self._issue_time = issue_time @property def not_before(self):", "'status', 'order_id': 'order_id', 'name': 'name', 'type': 'type', 'brand': 'brand', 'push_support': 'push_support', 'revoke_reason': 'revoke_reason',", "@property def domain_count(self): \"\"\"Gets the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :return: The domain_count", "if validation_method is not None: self.validation_method = validation_method if domain_type is not None:", "not None: self.not_after = not_after if validity_period is not None: self.validity_period = validity_period", "'str', 'not_before': 'str', 'not_after': 'str', 'validity_period': 'int', 'validation_method': 'str', 'domain_type': 'str', 'domain': 'str',", "= revoke_reason if signature_algrithm is not None: self.signature_algrithm = signature_algrithm if issue_time is", "name @property def type(self): \"\"\"Gets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。", "\"\"\" return self._revoke_reason @revoke_reason.setter def revoke_reason(self, revoke_reason): \"\"\"Sets the revoke_reason of this ShowCertificateResponse.", "sans if domain_count is not None: self.domain_count = domain_count if wildcard_count is not", "this ShowCertificateResponse. :type: str \"\"\" self._not_after = not_after @property def validity_period(self): \"\"\"Gets the", "of this ShowCertificateResponse. 证书名称。 :param name: The name of this ShowCertificateResponse. :type: str", "\"\"\"ShowCertificateResponse - a model defined in huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id = None", "- SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The status of this ShowCertificateResponse. :type: str", "\"\"\" self._status = status @property def order_id(self): \"\"\"Gets the order_id of this ShowCertificateResponse.", "= {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value,", "lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"):", "of this ShowCertificateResponse. 证书吊销原因。 :param revoke_reason: The revoke_reason of this ShowCertificateResponse. :type: str", "- CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 -", "= validity_period @property def validation_method(self): \"\"\"Gets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return:", "signature_algrithm(self, signature_algrithm): \"\"\"Sets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :param signature_algrithm: The signature_algrithm", "\"\"\"Gets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。", "'issue_time': 'str', 'not_before': 'str', 'not_after': 'str', 'validity_period': 'int', 'validation_method': 'str', 'domain_type': 'str', 'domain':", "= id @property def status(self): \"\"\"Gets the status of this ShowCertificateResponse. 证书状态。取值如下: -", "\"\"\"Sets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :param push_support: The push_support of this", "domain of this ShowCertificateResponse. 证书绑定域名。 :return: The domain of this ShowCertificateResponse. :rtype: str", "'str', 'domain_count': 'int', 'wildcard_count': 'int', 'authentification': 'list[Authentification]' } attribute_map = { 'id': 'id',", "not None: self.revoke_reason = revoke_reason if signature_algrithm is not None: self.signature_algrithm = signature_algrithm", "this ShowCertificateResponse. 证书名称。 :return: The name of this ShowCertificateResponse. :rtype: str \"\"\" return", "def validation_method(self, validation_method): \"\"\"Sets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The", "of this ShowCertificateResponse. :type: str \"\"\" self._name = name @property def type(self): \"\"\"Gets", "\"\"\" self._issue_time = issue_time @property def not_before(self): \"\"\"Gets the not_before of this ShowCertificateResponse.", "ShowCertificateResponse. 证书绑定域名。 :param domain: The domain of this ShowCertificateResponse. :type: str \"\"\" self._domain", "def push_support(self, push_support): \"\"\"Sets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :param push_support: The", "The order_id of this ShowCertificateResponse. :rtype: str \"\"\" return self._order_id @order_id.setter def order_id(self,", "\"\"\" self._id = id @property def status(self): \"\"\"Gets the status of this ShowCertificateResponse.", "证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand of this ShowCertificateResponse. :rtype: str \"\"\" return self._brand", "of this ShowCertificateResponse. :type: int \"\"\" self._domain_count = domain_count @property def wildcard_count(self): \"\"\"Gets", "self.brand = brand if push_support is not None: self.push_support = push_support if revoke_reason", "of this ShowCertificateResponse. 证书吊销原因。 :return: The revoke_reason of this ShowCertificateResponse. :rtype: str \"\"\"", "ShowCertificateResponse. 证书名称。 :param name: The name of this ShowCertificateResponse. :type: str \"\"\" self._name", "ShowCertificateResponse. :rtype: list[Authentification] \"\"\" return self._authentification @authentification.setter def authentification(self, authentification): \"\"\"Sets the authentification", "this ShowCertificateResponse. :rtype: int \"\"\" return self._validity_period @validity_period.setter def validity_period(self, validity_period): \"\"\"Sets the", ":return: The authentification of this ShowCertificateResponse. :rtype: list[Authentification] \"\"\" return self._authentification @authentification.setter def", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_before @not_before.setter def not_before(self, not_before): \"\"\"Sets", "= wildcard_count @property def authentification(self): \"\"\"Gets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return:", "self._type = type @property def brand(self): \"\"\"Gets the brand of this ShowCertificateResponse. 证书品牌。取值如下:", "this ShowCertificateResponse. :type: str \"\"\" self._type = type @property def brand(self): \"\"\"Gets the", "revoke_reason(self, revoke_reason): \"\"\"Sets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :param revoke_reason: The revoke_reason", "@validity_period.setter def validity_period(self, validity_period): \"\"\"Sets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period:", "ShowCertificateResponse. :type: str \"\"\" self._name = name @property def type(self): \"\"\"Gets the type", "validity_period(self, validity_period): \"\"\"Sets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period: The validity_period", "elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda", "'validity_period', 'validation_method': 'validation_method', 'domain_type': 'domain_type', 'domain': 'domain', 'sans': 'sans', 'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count',", "sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): \"\"\"For `print`\"\"\" return self.to_str() def __eq__(self, other):", "result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value ))", "is not None: self.authentification = authentification @property def id(self): \"\"\"Gets the id of", "@property def status(self): \"\"\"Gets the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 -", "from huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types", "if domain_count is not None: self.domain_count = domain_count if wildcard_count is not None:", "'type': 'str', 'brand': 'str', 'push_support': 'str', 'revoke_reason': 'str', 'signature_algrithm': 'str', 'issue_time': 'str', 'not_before':", "ShowCertificateResponse. :type: str \"\"\" self._type = type @property def brand(self): \"\"\"Gets the brand", "self.sans = sans if domain_count is not None: self.domain_count = domain_count if wildcard_count", ":return: The domain_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._domain_count @domain_count.setter def", "wildcard_count if authentification is not None: self.authentification = authentification @property def id(self): \"\"\"Gets", "is not None: self.domain_count = domain_count if wildcard_count is not None: self.wildcard_count =", "this ShowCertificateResponse. :type: str \"\"\" self._signature_algrithm = signature_algrithm @property def issue_time(self): \"\"\"Gets the", "status @property def order_id(self): \"\"\"Gets the order_id of this ShowCertificateResponse. 订单id。 :return: The", "to_dict(self): \"\"\"Returns the model properties as a dict\"\"\" result = {} for attr,", "def validation_method(self): \"\"\"Gets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method of", "this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time: The issue_time of this ShowCertificateResponse. :type: str \"\"\"", "self._authentification = None self.discriminator = None if id is not None: self.id =", "this ShowCertificateResponse. :rtype: str \"\"\" return self._type @type.setter def type(self, type): \"\"\"Sets the", "\"\"\"Gets the id of this ShowCertificateResponse. 证书id。 :return: The id of this ShowCertificateResponse.", "# coding: utf-8 import re import six from huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils", "= \"****\" else: result[attr] = value return result def to_str(self): \"\"\"Returns the string", "\"\"\" return self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self, signature_algrithm): \"\"\"Sets the signature_algrithm of this ShowCertificateResponse.", "'name': 'name', 'type': 'type', 'brand': 'brand', 'push_support': 'push_support', 'revoke_reason': 'revoke_reason', 'signature_algrithm': 'signature_algrithm', 'issue_time':", "ensure_ascii=False) def __repr__(self): \"\"\"For `print`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns true if", "ShowCertificateResponse. :rtype: str \"\"\" return self._domain_type @domain_type.setter def domain_type(self, domain_type): \"\"\"Sets the domain_type", "{ 'id': 'str', 'status': 'str', 'order_id': 'str', 'name': 'str', 'type': 'str', 'brand': 'str',", "ShowCertificateResponse. :rtype: int \"\"\" return self._domain_count @domain_count.setter def domain_count(self, domain_count): \"\"\"Sets the domain_count", "= signature_algrithm @property def issue_time(self): \"\"\"Gets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return:", "validity_period(self): \"\"\"Gets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :return: The validity_period of this", "证书失效时间,没有获取到有效值时为空。 :param not_after: The not_after of this ShowCertificateResponse. :type: str \"\"\" self._not_after =", "signature_algrithm(self): \"\"\"Gets the signature_algrithm of this ShowCertificateResponse. 签名算法。 :return: The signature_algrithm of this", "\"\"\"Sets the name of this ShowCertificateResponse. 证书名称。 :param name: The name of this", "The domain_type of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain_type @domain_type.setter def domain_type(self,", "self._name = None self._type = None self._brand = None self._push_support = None self._revoke_reason", "type of this ShowCertificateResponse. :rtype: str \"\"\" return self._type @type.setter def type(self, type):", "this ShowCertificateResponse. :type: str \"\"\" self._brand = brand @property def push_support(self): \"\"\"Gets the", "this ShowCertificateResponse. 证书是否支持推送。 :return: The push_support of this ShowCertificateResponse. :rtype: str \"\"\" return", "is not None: self.not_before = not_before if not_after is not None: self.not_after =", "ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification of this ShowCertificateResponse. :rtype: list[Authentification] \"\"\" return self._authentification", "DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type of this ShowCertificateResponse. :rtype: str \"\"\" return self._type", "this ShowCertificateResponse. :type: str \"\"\" self._validation_method = validation_method @property def domain_type(self): \"\"\"Gets the", "is not None: self.not_after = not_after if validity_period is not None: self.validity_period =", "@not_after.setter def not_after(self, not_after): \"\"\"Sets the not_after of this ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after:", "ShowCertificateResponse. :type: str \"\"\" self._order_id = order_id @property def name(self): \"\"\"Gets the name", "super(ShowCertificateResponse, self).__init__() self._id = None self._status = None self._order_id = None self._name =", "key is attribute name and the value is attribute type. attribute_map (dict): The", "'str', 'issue_time': 'str', 'not_before': 'str', 'not_after': 'str', 'validity_period': 'int', 'validation_method': 'str', 'domain_type': 'str',", "ShowCertificateResponse. 证书id。 :return: The id of this ShowCertificateResponse. :rtype: str \"\"\" return self._id", "of this ShowCertificateResponse. :type: int \"\"\" self._validity_period = validity_period @property def validation_method(self): \"\"\"Gets", "sans): \"\"\"Sets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans: The sans of", "result[attr] = value return result def to_str(self): \"\"\"Returns the string representation of the", "证书签发时间,没有获取到有效值时为空。 :return: The issue_time of this ShowCertificateResponse. :rtype: str \"\"\" return self._issue_time @issue_time.setter", "return self.to_str() def __eq__(self, other): \"\"\"Returns true if both objects are equal\"\"\" if", "of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The validation_method of this ShowCertificateResponse. :type: str", "CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。", ":return: The not_after of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_after @not_after.setter def", "openapi_types (dict): The key is attribute name and the value is attribute type.", "ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count: The domain_count of this ShowCertificateResponse. :type: int \"\"\" self._domain_count", "not None: self.status = status if order_id is not None: self.order_id = order_id", "\"\"\" self._order_id = order_id @property def name(self): \"\"\"Gets the name of this ShowCertificateResponse.", "域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification of this ShowCertificateResponse. :rtype: list[Authentification] \"\"\" return self._authentification @authentification.setter", "domain is not None: self.domain = domain if sans is not None: self.sans", "utf-8 import re import six from huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization", "of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The issue_time of this ShowCertificateResponse. :rtype: str \"\"\"", "id): \"\"\"Sets the id of this ShowCertificateResponse. 证书id。 :param id: The id of", "return False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns true if both", "signature_algrithm: The signature_algrithm of this ShowCertificateResponse. :type: str \"\"\" self._signature_algrithm = signature_algrithm @property", "证书有效期,按月为单位。 :return: The validity_period of this ShowCertificateResponse. :rtype: int \"\"\" return self._validity_period @validity_period.setter", "str \"\"\" self._domain = domain @property def sans(self): \"\"\"Gets the sans of this", "'signature_algrithm', 'issue_time': 'issue_time', 'not_before': 'not_before', 'not_after': 'not_after', 'validity_period': 'validity_period', 'validation_method': 'validation_method', 'domain_type': 'domain_type',", ":param name: The name of this ShowCertificateResponse. :type: str \"\"\" self._name = name", "'authentification': 'authentification' } def __init__(self, id=None, status=None, order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None,", "openapi_types = { 'id': 'str', 'status': 'str', 'order_id': 'str', 'name': 'str', 'type': 'str',", "@push_support.setter def push_support(self, push_support): \"\"\"Sets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :param push_support:", "ShowCertificateResponse. :type: str \"\"\" self._not_after = not_after @property def validity_period(self): \"\"\"Gets the validity_period", ":return: The wildcard_count of this ShowCertificateResponse. :rtype: int \"\"\" return self._wildcard_count @wildcard_count.setter def", "\"\"\" return self._status @status.setter def status(self, status): \"\"\"Sets the status of this ShowCertificateResponse.", "@wildcard_count.setter def wildcard_count(self, wildcard_count): \"\"\"Sets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count:", "\"\"\"Sets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The brand of", "@property def order_id(self): \"\"\"Gets the order_id of this ShowCertificateResponse. 订单id。 :return: The order_id", "sans=None, domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse - a model defined in huaweicloud sdk\"\"\" super(ShowCertificateResponse,", "= not_after @property def validity_period(self): \"\"\"Gets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :return:", "return self._order_id @order_id.setter def order_id(self, order_id): \"\"\"Sets the order_id of this ShowCertificateResponse. 订单id。", "@property def wildcard_count(self): \"\"\"Gets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :return: The wildcard_count", "ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time: The issue_time of this ShowCertificateResponse. :type: str \"\"\" self._issue_time", "def issue_time(self): \"\"\"Gets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :return: The issue_time of", "signature_algrithm of this ShowCertificateResponse. :type: str \"\"\" self._signature_algrithm = signature_algrithm @property def issue_time(self):", "= None self._domain_type = None self._domain = None self._sans = None self._domain_count =", "\"\"\"Gets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method of this ShowCertificateResponse.", "证书有效期,按月为单位。 :param validity_period: The validity_period of this ShowCertificateResponse. :type: int \"\"\" self._validity_period =", "the string representation of the model\"\"\" import simplejson as json if six.PY2: import", "status: The status of this ShowCertificateResponse. :type: str \"\"\" self._status = status @property", "域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return: The domain_type of this ShowCertificateResponse.", "def domain_type(self, domain_type): \"\"\"Sets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 -", "list[Authentification] \"\"\" return self._authentification @authentification.setter def authentification(self, authentification): \"\"\"Sets the authentification of this", "if status is not None: self.status = status if order_id is not None:", "name of this ShowCertificateResponse. :type: str \"\"\" self._name = name @property def type(self):", "return self._name @name.setter def name(self, name): \"\"\"Sets the name of this ShowCertificateResponse. 证书名称。", "ShowCertificateResponse. 证书吊销原因。 :return: The revoke_reason of this ShowCertificateResponse. :rtype: str \"\"\" return self._revoke_reason", "key in definition. \"\"\" sensitive_list = [] openapi_types = { 'id': 'str', 'status':", "'str', 'domain_type': 'str', 'domain': 'str', 'sans': 'str', 'domain_count': 'int', 'wildcard_count': 'int', 'authentification': 'list[Authentification]'", "the value is json key in definition. \"\"\" sensitive_list = [] openapi_types =", ")) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map(", "return self._sans @sans.setter def sans(self, sans): \"\"\"Sets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。", "self._signature_algrithm = None self._issue_time = None self._not_before = None self._not_after = None self._validity_period", ":param type: The type of this ShowCertificateResponse. :type: str \"\"\" self._type = type", "attribute_map = { 'id': 'id', 'status': 'status', 'order_id': 'order_id', 'name': 'name', 'type': 'type',", "证书绑定域名。 :return: The domain of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain @domain.setter", "this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return: The domain_type of", "this ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period: The validity_period of this ShowCertificateResponse. :type: int \"\"\"", "None: self.brand = brand if push_support is not None: self.push_support = push_support if", "of this ShowCertificateResponse. :type: str \"\"\" self._signature_algrithm = signature_algrithm @property def issue_time(self): \"\"\"Gets", "this ShowCertificateResponse. 签名算法。 :return: The signature_algrithm of this ShowCertificateResponse. :rtype: str \"\"\" return", "ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The type of this ShowCertificateResponse. :type: str", "if sans is not None: self.sans = sans if domain_count is not None:", "ShowCertificateResponse. 证书失效时间,没有获取到有效值时为空。 :param not_after: The not_after of this ShowCertificateResponse. :type: str \"\"\" self._not_after", "return self._validation_method @validation_method.setter def validation_method(self, validation_method): \"\"\"Sets the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。", "= order_id @property def name(self): \"\"\"Gets the name of this ShowCertificateResponse. 证书名称。 :return:", "= None self._push_support = None self._revoke_reason = None self._signature_algrithm = None self._issue_time =", "re import six from huaweicloudsdkcore.sdk_response import SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class ShowCertificateResponse(SdkResponse):", "brand if push_support is not None: self.push_support = push_support if revoke_reason is not", "of this ShowCertificateResponse. :type: int \"\"\" self._wildcard_count = wildcard_count @property def authentification(self): \"\"\"Gets", ":rtype: str \"\"\" return self._id @id.setter def id(self, id): \"\"\"Sets the id of", "str \"\"\" return self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self, signature_algrithm): \"\"\"Sets the signature_algrithm of this", "self._brand = None self._push_support = None self._revoke_reason = None self._signature_algrithm = None self._issue_time", "name and the value is attribute type. attribute_map (dict): The key is attribute", "None: self.id = id if status is not None: self.status = status if", "\"\"\"Gets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type of", "'not_before': 'not_before', 'not_after': 'not_after', 'validity_period': 'validity_period', 'validation_method': 'validation_method', 'domain_type': 'domain_type', 'domain': 'domain', 'sans':", "self._type @type.setter def type(self, type): \"\"\"Sets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、", "The name of this ShowCertificateResponse. :rtype: str \"\"\" return self._name @name.setter def name(self,", "is not None: self.status = status if order_id is not None: self.order_id =", "name: The name of this ShowCertificateResponse. :type: str \"\"\" self._name = name @property", ":return: The status of this ShowCertificateResponse. :rtype: str \"\"\" return self._status @status.setter def", "self._status = status @property def order_id(self): \"\"\"Gets the order_id of this ShowCertificateResponse. 订单id。", "The authentification of this ShowCertificateResponse. :rtype: list[Authentification] \"\"\" return self._authentification @authentification.setter def authentification(self,", "if wildcard_count is not None: self.wildcard_count = wildcard_count if authentification is not None:", "is not None: self.brand = brand if push_support is not None: self.push_support =", "- SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return: The domain_type of this ShowCertificateResponse. :rtype:", "of this ShowCertificateResponse. 证书是否支持推送。 :param push_support: The push_support of this ShowCertificateResponse. :type: str", "'domain_count': 'int', 'wildcard_count': 'int', 'authentification': 'list[Authentification]' } attribute_map = { 'id': 'id', 'status':", "return self._status @status.setter def status(self, status): \"\"\"Sets the status of this ShowCertificateResponse. 证书状态。取值如下:", "self.domain_count = domain_count if wildcard_count is not None: self.wildcard_count = wildcard_count if authentification", "dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else:", "this ShowCertificateResponse. 证书绑定域名。 :return: The domain of this ShowCertificateResponse. :rtype: str \"\"\" return", "signature_algrithm of this ShowCertificateResponse. 签名算法。 :return: The signature_algrithm of this ShowCertificateResponse. :rtype: str", "@property def domain(self): \"\"\"Gets the domain of this ShowCertificateResponse. 证书绑定域名。 :return: The domain", "push_support is not None: self.push_support = push_support if revoke_reason is not None: self.revoke_reason", "type @property def brand(self): \"\"\"Gets the brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return:", "of this ShowCertificateResponse. :type: str \"\"\" self._id = id @property def status(self): \"\"\"Gets", "validation_method @property def domain_type(self): \"\"\"Gets the domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名", "sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The sans of this ShowCertificateResponse. :rtype: str", "authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :return: The authentification of this ShowCertificateResponse. :rtype: list[Authentification]", "- WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return: The domain_type of this ShowCertificateResponse. :rtype: str \"\"\"", "domain_type of this ShowCertificateResponse. :type: str \"\"\" self._domain_type = domain_type @property def domain(self):", "domain_type of this ShowCertificateResponse. 域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名 :param domain_type:", "= authentification def to_dict(self): \"\"\"Returns the model properties as a dict\"\"\" result =", "= [] openapi_types = { 'id': 'str', 'status': 'str', 'order_id': 'str', 'name': 'str',", "isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else", "not_before: The not_before of this ShowCertificateResponse. :type: str \"\"\" self._not_before = not_before @property", "wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count: The wildcard_count of this ShowCertificateResponse. :type:", "The status of this ShowCertificateResponse. :type: str \"\"\" self._status = status @property def", "self._name = name @property def type(self): \"\"\"Gets the type of this ShowCertificateResponse. 证书类型。取值如下:", "\"\"\" return self._order_id @order_id.setter def order_id(self, order_id): \"\"\"Sets the order_id of this ShowCertificateResponse.", "self.id = id if status is not None: self.status = status if order_id", "\"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value,", "authentification(self, authentification): \"\"\"Sets the authentification of this ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The authentification", ":type: str \"\"\" self._sans = sans @property def domain_count(self): \"\"\"Gets the domain_count of", "self._sans @sans.setter def sans(self, sans): \"\"\"Sets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :param", "= order_id if name is not None: self.name = name if type is", "The not_after of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_after @not_after.setter def not_after(self,", "id: The id of this ShowCertificateResponse. :type: str \"\"\" self._id = id @property", "str \"\"\" return self._type @type.setter def type(self, type): \"\"\"Sets the type of this", "domain_type of this ShowCertificateResponse. :rtype: str \"\"\" return self._domain_type @domain_type.setter def domain_type(self, domain_type):", "SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :return: The status of this ShowCertificateResponse. :rtype: str \"\"\" return", "the status of this ShowCertificateResponse. 证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 -", "else: result[attr] = value return result def to_str(self): \"\"\"Returns the string representation of", "model defined in huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id = None self._status = None", "\"\"\"Gets the order_id of this ShowCertificateResponse. 订单id。 :return: The order_id of this ShowCertificateResponse.", "self.status = status if order_id is not None: self.order_id = order_id if name", "str \"\"\" return self._sans @sans.setter def sans(self, sans): \"\"\"Sets the sans of this", "self.signature_algrithm = signature_algrithm if issue_time is not None: self.issue_time = issue_time if not_before", "'str', 'order_id': 'str', 'name': 'str', 'type': 'str', 'brand': 'str', 'push_support': 'str', 'revoke_reason': 'str',", "str \"\"\" self._order_id = order_id @property def name(self): \"\"\"Gets the name of this", "证书名称。 :param name: The name of this ShowCertificateResponse. :type: str \"\"\" self._name =", "return self._domain @domain.setter def domain(self, domain): \"\"\"Sets the domain of this ShowCertificateResponse. 证书绑定域名。", "brand of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand of this ShowCertificateResponse. :rtype:", ":type: str \"\"\" self._validation_method = validation_method @property def domain_type(self): \"\"\"Gets the domain_type of", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_after @not_after.setter def not_after(self, not_after): \"\"\"Sets", "str \"\"\" self._revoke_reason = revoke_reason @property def signature_algrithm(self): \"\"\"Gets the signature_algrithm of this", "value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] =", "return self._push_support @push_support.setter def push_support(self, push_support): \"\"\"Sets the push_support of this ShowCertificateResponse. 证书是否支持推送。", "- REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The status of", "self._issue_time @issue_time.setter def issue_time(self, issue_time): \"\"\"Sets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param", "'sans', 'domain_count': 'domain_count', 'wildcard_count': 'wildcard_count', 'authentification': 'authentification' } def __init__(self, id=None, status=None, order_id=None,", "= domain_type @property def domain(self): \"\"\"Gets the domain of this ShowCertificateResponse. 证书绑定域名。 :return:", "= None self._issue_time = None self._not_before = None self._not_after = None self._validity_period =", "'int', 'validation_method': 'str', 'domain_type': 'str', 'domain': 'str', 'sans': 'str', 'domain_count': 'int', 'wildcard_count': 'int',", "\"\"\" return self._validity_period @validity_period.setter def validity_period(self, validity_period): \"\"\"Sets the validity_period of this ShowCertificateResponse.", "False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns true if both objects", "if id is not None: self.id = id if status is not None:", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._revoke_reason @revoke_reason.setter def revoke_reason(self, revoke_reason): \"\"\"Sets", "sans: The sans of this ShowCertificateResponse. :type: str \"\"\" self._sans = sans @property", ":param not_before: The not_before of this ShowCertificateResponse. :type: str \"\"\" self._not_before = not_before", "\"\"\" return self._not_after @not_after.setter def not_after(self, not_after): \"\"\"Sets the not_after of this ShowCertificateResponse.", ":rtype: int \"\"\" return self._domain_count @domain_count.setter def domain_count(self, domain_count): \"\"\"Sets the domain_count of", "\"\"\" return self._domain @domain.setter def domain(self, domain): \"\"\"Sets the domain of this ShowCertificateResponse.", "ShowCertificateResponse. :rtype: str \"\"\" return self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self, signature_algrithm): \"\"\"Sets the signature_algrithm", "\"\"\"Gets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :return: The revoke_reason of this ShowCertificateResponse.", ":param id: The id of this ShowCertificateResponse. :type: str \"\"\" self._id = id", "the validation_method of this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method of this ShowCertificateResponse. :rtype:", "None: self.not_after = not_after if validity_period is not None: self.validity_period = validity_period if", "def issue_time(self, issue_time): \"\"\"Sets the issue_time of this ShowCertificateResponse. 证书签发时间,没有获取到有效值时为空。 :param issue_time: The", "self._wildcard_count @wildcard_count.setter def wildcard_count(self, wildcard_count): \"\"\"Sets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :param", "brand @property def push_support(self): \"\"\"Gets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :return: The", "def wildcard_count(self, wildcard_count): \"\"\"Sets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count: The", "huaweicloud sdk\"\"\" super(ShowCertificateResponse, self).__init__() self._id = None self._status = None self._order_id = None", "import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding(\"utf-8\") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。", "brand of this ShowCertificateResponse. :type: str \"\"\" self._brand = brand @property def push_support(self):", "of this ShowCertificateResponse. 证书可绑定域名个数。 :param domain_count: The domain_count of this ShowCertificateResponse. :type: int", "SdkResponse from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class ShowCertificateResponse(SdkResponse): \"\"\" Attributes: openapi_types (dict): The key", "= brand @property def push_support(self): \"\"\"Gets the push_support of this ShowCertificateResponse. 证书是否支持推送。 :return:", "WILDCARD:通配符 - MULTI_DOMAIN:多域名 :return: The domain_type of this ShowCertificateResponse. :rtype: str \"\"\" return", "this ShowCertificateResponse. :type: str \"\"\" self._not_before = not_before @property def not_after(self): \"\"\"Gets the", "str \"\"\" self._validation_method = validation_method @property def domain_type(self): \"\"\"Gets the domain_type of this", "sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans: The sans of this ShowCertificateResponse. :type:", "'issue_time': 'issue_time', 'not_before': 'not_before', 'not_after': 'not_after', 'validity_period': 'validity_period', 'validation_method': 'validation_method', 'domain_type': 'domain_type', 'domain':", "of this ShowCertificateResponse. 证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :return: The brand of this ShowCertificateResponse. :rtype: str", "attribute type. attribute_map (dict): The key is attribute name and the value is", "= None self._order_id = None self._name = None self._type = None self._brand =", "brand is not None: self.brand = brand if push_support is not None: self.push_support", "None self._domain = None self._sans = None self._domain_count = None self._wildcard_count = None", "- UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 -", "the domain_count of this ShowCertificateResponse. 证书可绑定域名个数。 :return: The domain_count of this ShowCertificateResponse. :rtype:", "__ne__(self, other): \"\"\"Returns true if both objects are not equal\"\"\" return not self", "value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1],", "\"\"\" sensitive_list = [] openapi_types = { 'id': 'str', 'status': 'str', 'order_id': 'str',", "of this ShowCertificateResponse. :rtype: str \"\"\" return self._push_support @push_support.setter def push_support(self, push_support): \"\"\"Sets", "validation_method of this ShowCertificateResponse. :type: str \"\"\" self._validation_method = validation_method @property def domain_type(self):", "if not isinstance(other, ShowCertificateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other):", "as a dict\"\"\" result = {} for attr, _ in six.iteritems(self.openapi_types): value =", "@domain.setter def domain(self, domain): \"\"\"Sets the domain of this ShowCertificateResponse. 证书绑定域名。 :param domain:", "type(self): \"\"\"Gets the type of this ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type", "revoke_reason of this ShowCertificateResponse. :type: str \"\"\" self._revoke_reason = revoke_reason @property def signature_algrithm(self):", "证书生效时间,没有获取到有效值时为空。 :param not_before: The not_before of this ShowCertificateResponse. :type: str \"\"\" self._not_before =", "domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None): \"\"\"ShowCertificateResponse - a model defined in huaweicloud sdk\"\"\"", "'domain_count', 'wildcard_count': 'wildcard_count', 'authentification': 'authentification' } def __init__(self, id=None, status=None, order_id=None, name=None, type=None,", "not None: self.wildcard_count = wildcard_count if authentification is not None: self.authentification = authentification", "self._push_support = None self._revoke_reason = None self._signature_algrithm = None self._issue_time = None self._not_before", "EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status:", ":param brand: The brand of this ShowCertificateResponse. :type: str \"\"\" self._brand = brand", "validity_period): \"\"\"Sets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。 :param validity_period: The validity_period of", "the id of this ShowCertificateResponse. 证书id。 :param id: The id of this ShowCertificateResponse.", "证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type of this ShowCertificateResponse. :rtype: str \"\"\" return", "int \"\"\" return self._wildcard_count @wildcard_count.setter def wildcard_count(self, wildcard_count): \"\"\"Sets the wildcard_count of this", ":type: str \"\"\" self._order_id = order_id @property def name(self): \"\"\"Gets the name of", "签名算法。 :param signature_algrithm: The signature_algrithm of this ShowCertificateResponse. :type: str \"\"\" self._signature_algrithm =", "The domain of this ShowCertificateResponse. :type: str \"\"\" self._domain = domain @property def", "'revoke_reason': 'str', 'signature_algrithm': 'str', 'issue_time': 'str', 'not_before': 'str', 'not_after': 'str', 'validity_period': 'int', 'validation_method':", "CANCELSUPPLEMENTING:取消新增附加域名审核中。 :param status: The status of this ShowCertificateResponse. :type: str \"\"\" self._status =", ":type: str \"\"\" self._id = id @property def status(self): \"\"\"Gets the status of", "\"\"\"Sets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before: The not_before of this", "\"\"\"Gets the name of this ShowCertificateResponse. 证书名称。 :return: The name of this ShowCertificateResponse.", "str \"\"\" return self._not_after @not_after.setter def not_after(self, not_after): \"\"\"Sets the not_after of this", "this ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :param validation_method: The validation_method of this ShowCertificateResponse. :type: str \"\"\"", "ShowCertificateResponse. 证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :return: The type of this ShowCertificateResponse. :rtype: str \"\"\"", "= None self._domain = None self._sans = None self._domain_count = None self._wildcard_count =", "None self._status = None self._order_id = None self._name = None self._type = None", "ShowCertificateResponse. 域名认证方式,取值如下:DNS、FILE、EMAIL。 :return: The validation_method of this ShowCertificateResponse. :rtype: str \"\"\" return self._validation_method", "证书吊销原因。 :return: The revoke_reason of this ShowCertificateResponse. :rtype: str \"\"\" return self._revoke_reason @revoke_reason.setter", "\"\"\" return self._validation_method @validation_method.setter def validation_method(self, validation_method): \"\"\"Sets the validation_method of this ShowCertificateResponse.", "sans of this ShowCertificateResponse. :type: str \"\"\" self._sans = sans @property def domain_count(self):", "证书失效时间,没有获取到有效值时为空。 :return: The not_after of this ShowCertificateResponse. :rtype: str \"\"\" return self._not_after @not_after.setter", "list[Authentification] \"\"\" self._authentification = authentification def to_dict(self): \"\"\"Returns the model properties as a", "@property def sans(self): \"\"\"Gets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The sans", "value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict()", "= sans if domain_count is not None: self.domain_count = domain_count if wildcard_count is", "return self._validity_period @validity_period.setter def validity_period(self, validity_period): \"\"\"Sets the validity_period of this ShowCertificateResponse. 证书有效期,按月为单位。", ":return: The push_support of this ShowCertificateResponse. :rtype: str \"\"\" return self._push_support @push_support.setter def", "证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。 :param brand: The brand of this ShowCertificateResponse. :type: str \"\"\" self._brand", ":param domain_count: The domain_count of this ShowCertificateResponse. :type: int \"\"\" self._domain_count = domain_count", "EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。 :param type: The type of this ShowCertificateResponse. :type: str \"\"\" self._type =", "def revoke_reason(self): \"\"\"Gets the revoke_reason of this ShowCertificateResponse. 证书吊销原因。 :return: The revoke_reason of", "__repr__(self): \"\"\"For `print`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns true if both objects", "sans(self): \"\"\"Gets the sans of this ShowCertificateResponse. 证书绑定的附加域名信息。 :return: The sans of this", "issue_time of this ShowCertificateResponse. :rtype: str \"\"\" return self._issue_time @issue_time.setter def issue_time(self, issue_time):", "ShowCertificateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self, id): \"\"\"Sets the id", "= list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif", "if attr in self.sensitive_list: result[attr] = \"****\" else: result[attr] = value return result", "'order_id': 'str', 'name': 'str', 'type': 'str', 'brand': 'str', 'push_support': 'str', 'revoke_reason': 'str', 'signature_algrithm':", "return self._signature_algrithm @signature_algrithm.setter def signature_algrithm(self, signature_algrithm): \"\"\"Sets the signature_algrithm of this ShowCertificateResponse. 签名算法。", "(item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else: if attr in", "this ShowCertificateResponse. 证书绑定的附加域名信息。 :param sans: The sans of this ShowCertificateResponse. :type: str \"\"\"", "ShowCertificateResponse. 域名所有权认证信息,详情请参见Authentification字段数据结构说明。 :param authentification: The authentification of this ShowCertificateResponse. :type: list[Authentification] \"\"\" self._authentification", "str \"\"\" self._not_after = not_after @property def validity_period(self): \"\"\"Gets the validity_period of this", "\"\"\" self._sans = sans @property def domain_count(self): \"\"\"Gets the domain_count of this ShowCertificateResponse.", "if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict()", "@not_before.setter def not_before(self, not_before): \"\"\"Sets the not_before of this ShowCertificateResponse. 证书生效时间,没有获取到有效值时为空。 :param not_before:", "The issue_time of this ShowCertificateResponse. :type: str \"\"\" self._issue_time = issue_time @property def", "self.push_support = push_support if revoke_reason is not None: self.revoke_reason = revoke_reason if signature_algrithm", "not None: self.validation_method = validation_method if domain_type is not None: self.domain_type = domain_type", "name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None,", "str \"\"\" return self._domain_type @domain_type.setter def domain_type(self, domain_type): \"\"\"Sets the domain_type of this", "wildcard_count(self, wildcard_count): \"\"\"Sets the wildcard_count of this ShowCertificateResponse. 证书可绑定附加域名个数。 :param wildcard_count: The wildcard_count", "'id': 'id', 'status': 'status', 'order_id': 'order_id', 'name': 'name', 'type': 'type', 'brand': 'brand', 'push_support':", "of this ShowCertificateResponse. 订单id。 :return: The order_id of this ShowCertificateResponse. :rtype: str \"\"\"" ]
[ ".tissue import ( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr,", "from .tissue import ( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask,", ".interface import ( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, ) from .timelapse import", "neighbor_array_nr, ) __all__ = [ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\",", ".timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue import ( epithelium_watershed, largest_object_mask,", "from .timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue import ( epithelium_watershed,", "\"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\", \"select_border_adjacent\", \"select_in_field\", \"select_mask_adjacent\", \"segment_hemijunctions\", \"cell_edges_mask\", \"cell_interiors_mask\", \"cell_vertices_mask\", \"neighbor_array_nr\"", "images.\"\"\" from .interface import ( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, ) from", "( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__", "<reponame>a9w/Fat2_polarizes_WAVE<filename>code/functions/segment/__init__.py \"\"\"Functions for segmenting images.\"\"\" from .interface import ( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface,", "import ( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, ) from .timelapse import (", "select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__ = [ \"interface_endpoints_mask\",", "cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__ = [ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\",", "segment_hemijunctions_timelapse, ) from .tissue import ( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask,", "cell_vertices_mask, neighbor_array_nr, ) __all__ = [ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\",", "select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__ = [ \"interface_endpoints_mask\", \"interface_endpoints_coords\",", "\"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\", \"select_border_adjacent\", \"select_in_field\", \"select_mask_adjacent\", \"segment_hemijunctions\", \"cell_edges_mask\", \"cell_interiors_mask\", \"cell_vertices_mask\",", "edge_between_neighbors, ) from .timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue import", "interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, ) from .timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse,", "from .interface import ( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, ) from .timelapse", "( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue import ( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field,", "__all__ = [ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\",", "for segmenting images.\"\"\" from .interface import ( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors,", "= [ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\",", ") __all__ = [ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\",", "select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__ = [ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\",", "interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, ) from .timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse,", ") from .timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue import (", "segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue import ( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent,", "\"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\", \"select_border_adjacent\", \"select_in_field\", \"select_mask_adjacent\", \"segment_hemijunctions\", \"cell_edges_mask\",", "\"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\", \"select_border_adjacent\", \"select_in_field\", \"select_mask_adjacent\", \"segment_hemijunctions\",", "largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__ = [", "trim_interface, refine_junction, edge_between_neighbors, ) from .timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from", "import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue import ( epithelium_watershed, largest_object_mask, select_border_adjacent,", "[ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\", \"select_border_adjacent\",", "\"\"\"Functions for segmenting images.\"\"\" from .interface import ( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction,", "( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, ) from .timelapse import ( segment_epithelium_timelapse,", "\"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\", \"select_border_adjacent\", \"select_in_field\", \"select_mask_adjacent\", \"segment_hemijunctions\", \"cell_edges_mask\", \"cell_interiors_mask\", \"cell_vertices_mask\", \"neighbor_array_nr\" ]", ") from .tissue import ( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask,", "largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue import ( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions,", "segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__ = [ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\",", "interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, ) from .timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, )", "import ( epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, )", "epithelium_watershed, largest_object_mask, select_border_adjacent, select_in_field, select_mask_adjacent, segment_hemijunctions, cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__ =", "cell_edges_mask, cell_interiors_mask, cell_vertices_mask, neighbor_array_nr, ) __all__ = [ \"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\",", "\"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\", \"select_border_adjacent\", \"select_in_field\", \"select_mask_adjacent\", \"segment_hemijunctions\", \"cell_edges_mask\", \"cell_interiors_mask\",", "segmenting images.\"\"\" from .interface import ( interface_endpoints_mask, interface_endpoints_coords, interface_shape_edge_method, trim_interface, refine_junction, edge_between_neighbors, )", "\"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\", \"select_border_adjacent\", \"select_in_field\", \"select_mask_adjacent\",", "refine_junction, edge_between_neighbors, ) from .timelapse import ( segment_epithelium_timelapse, largest_object_mask_timelapse, segment_hemijunctions_timelapse, ) from .tissue", "\"interface_endpoints_mask\", \"interface_endpoints_coords\", \"interface_shape_edge_method\", \"trim_interface\", \"refine_junction\", \"edge_between_neighbors\", \"segment_epithelium_timelapse\", \"largest_object_mask_timelapse\", \"segment_hemijunctions_timelapse\", \"epithelium_watershed\", \"largest_object_mask\", \"select_border_adjacent\", \"select_in_field\"," ]
[ "or NodeGateway() def get(self) -> Optional[dict]: network = self.node_gateway.get_network() if network is not", "from gateways.node_gateway import NodeGateway class GetNetwork: def __init__(self, node_gateway: Optional[NodeGateway] = None) ->", "self.node_gateway = node_gateway or NodeGateway() def get(self) -> Optional[dict]: network = self.node_gateway.get_network() if", "node_gateway or NodeGateway() def get(self) -> Optional[dict]: network = self.node_gateway.get_network() if network is", "-> Optional[dict]: network = self.node_gateway.get_network() if network is not None: return network.to_dict() return", "def __init__(self, node_gateway: Optional[NodeGateway] = None) -> None: self.node_gateway = node_gateway or NodeGateway()", "NodeGateway() def get(self) -> Optional[dict]: network = self.node_gateway.get_network() if network is not None:", "node_gateway: Optional[NodeGateway] = None) -> None: self.node_gateway = node_gateway or NodeGateway() def get(self)", "def get(self) -> Optional[dict]: network = self.node_gateway.get_network() if network is not None: return", "= node_gateway or NodeGateway() def get(self) -> Optional[dict]: network = self.node_gateway.get_network() if network", "Optional from gateways.node_gateway import NodeGateway class GetNetwork: def __init__(self, node_gateway: Optional[NodeGateway] = None)", "None) -> None: self.node_gateway = node_gateway or NodeGateway() def get(self) -> Optional[dict]: network", "get(self) -> Optional[dict]: network = self.node_gateway.get_network() if network is not None: return network.to_dict()", "gateways.node_gateway import NodeGateway class GetNetwork: def __init__(self, node_gateway: Optional[NodeGateway] = None) -> None:", "typing import Optional from gateways.node_gateway import NodeGateway class GetNetwork: def __init__(self, node_gateway: Optional[NodeGateway]", "class GetNetwork: def __init__(self, node_gateway: Optional[NodeGateway] = None) -> None: self.node_gateway = node_gateway", "Optional[dict]: network = self.node_gateway.get_network() if network is not None: return network.to_dict() return None", "GetNetwork: def __init__(self, node_gateway: Optional[NodeGateway] = None) -> None: self.node_gateway = node_gateway or", "-> None: self.node_gateway = node_gateway or NodeGateway() def get(self) -> Optional[dict]: network =", "Optional[NodeGateway] = None) -> None: self.node_gateway = node_gateway or NodeGateway() def get(self) ->", "__init__(self, node_gateway: Optional[NodeGateway] = None) -> None: self.node_gateway = node_gateway or NodeGateway() def", "import Optional from gateways.node_gateway import NodeGateway class GetNetwork: def __init__(self, node_gateway: Optional[NodeGateway] =", "= None) -> None: self.node_gateway = node_gateway or NodeGateway() def get(self) -> Optional[dict]:", "<gh_stars>0 from typing import Optional from gateways.node_gateway import NodeGateway class GetNetwork: def __init__(self,", "NodeGateway class GetNetwork: def __init__(self, node_gateway: Optional[NodeGateway] = None) -> None: self.node_gateway =", "from typing import Optional from gateways.node_gateway import NodeGateway class GetNetwork: def __init__(self, node_gateway:", "import NodeGateway class GetNetwork: def __init__(self, node_gateway: Optional[NodeGateway] = None) -> None: self.node_gateway", "None: self.node_gateway = node_gateway or NodeGateway() def get(self) -> Optional[dict]: network = self.node_gateway.get_network()" ]
[ "coding: utf-8 -*- \"\"\" A simple plugin loading mechanism \"\"\" # Copyright ©", "mechanism \"\"\" # Copyright © 2015 Tiger Computing Ltd # This file is", "# Load directories containing __init__.py full_path = os.path.join(plugin_dir, dirent) if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path,", "a directory. This function will list the contents of ``plugin_dir`` and load any", "a package name as supplied to this function in the optional ``package`` parameter.", "directories containing __init__.py full_path = os.path.join(plugin_dir, dirent) if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent)", "the plugins into. .. versionadded:: 1.1.0 \"\"\" plugin_dir = os.path.realpath(plugin_dir) # Discover the", "this is not provided, this defaults to ``pytiger.utils.plugins``. The module name supplied in", "known to Python (i.e. in ``sys.modules``). The function returns a list of python", "loaded within a package name as supplied to this function in the optional", "this defaults to ``pytiger.utils.plugins``. The module name supplied in ``package`` must already be", "load(plugin_dir, package=__name__): \"\"\" Load Python modules and packages from a directory. This function", "file is part of pytiger and distributed under the terms # of a", "= os.path.realpath(plugin_dir) # Discover the list of plugins plugins = [] for dirent", "[] for dirent in os.listdir(plugin_dir): # skip __init__.py if dirent.startswith('__'): continue # Load", "of a BSD-like license # See the file COPYING for details # Idea", "and packages from a directory. This function will list the contents of ``plugin_dir``", "parameter. If this is not provided, this defaults to ``pytiger.utils.plugins``. The module name", "``__init__.py`` file) found within it. Sub-directories are not searched. Modules are compiled as", "of plugins plugins = [] for dirent in os.listdir(plugin_dir): # skip __init__.py if", "\"\"\" # Copyright © 2015 Tiger Computing Ltd # This file is part", "directory. This function will list the contents of ``plugin_dir`` and load any Python", "Computing Ltd # This file is part of pytiger and distributed under the", "COPYING for details # Idea borrowed and adapted from: # https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582", "and load any Python modules (files ending ``.py``) or packages (directories with a", "packages (directories with a ``__init__.py`` file) found within it. Sub-directories are not searched.", "are not searched. Modules are compiled as they are loaded, if necessary. Plugins", "simple plugin loading mechanism \"\"\" # Copyright © 2015 Tiger Computing Ltd #", "for details # Idea borrowed and adapted from: # https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import", "name supplied in ``package`` must already be known to Python (i.e. in ``sys.modules``).", "# See the file COPYING for details # Idea borrowed and adapted from:", "the directory to load plugins from. :param str package: Python package to load", "within it. Sub-directories are not searched. Modules are compiled as they are loaded,", "plugins.append(dirent) # Now load the plugin modules modules = [] for plugin in", "Plugins are loaded within a package name as supplied to this function in", "\"\"\" plugin_dir = os.path.realpath(plugin_dir) # Discover the list of plugins plugins = []", "Idea borrowed and adapted from: # https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import imp import os", "__init__.py full_path = os.path.join(plugin_dir, dirent) if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) # Now", "dirent in os.listdir(plugin_dir): # skip __init__.py if dirent.startswith('__'): continue # Load .py files", "not provided, this defaults to ``pytiger.utils.plugins``. The module name supplied in ``package`` must", "# https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import imp import os def load(plugin_dir, package=__name__): \"\"\" Load", "Python modules (files ending ``.py``) or packages (directories with a ``__init__.py`` file) found", "# Copyright © 2015 Tiger Computing Ltd # This file is part of", "# skip __init__.py if dirent.startswith('__'): continue # Load .py files as plugins if", "in the optional ``package`` parameter. If this is not provided, this defaults to", "full_path = os.path.join(plugin_dir, dirent) if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) # Now load", ":param str plugin_dir: The path to the directory to load plugins from. :param", "[plugin_dir]) module = imp.load_module(package + '.' + plugin, f, path, desc) modules.append(module) return", "skip __init__.py if dirent.startswith('__'): continue # Load .py files as plugins if dirent.endswith('.py'):", "# Discover the list of plugins plugins = [] for dirent in os.listdir(plugin_dir):", "and distributed under the terms # of a BSD-like license # See the", "If this is not provided, this defaults to ``pytiger.utils.plugins``. The module name supplied", "= [] for plugin in plugins: f, path, desc = imp.find_module(plugin, [plugin_dir]) module", "plugin_dir: The path to the directory to load plugins from. :param str package:", "per loaded module or package. :param str plugin_dir: The path to the directory", "to ``pytiger.utils.plugins``. The module name supplied in ``package`` must already be known to", "The module name supplied in ``package`` must already be known to Python (i.e.", "terms # of a BSD-like license # See the file COPYING for details", "it. Sub-directories are not searched. Modules are compiled as they are loaded, if", ".. versionadded:: 1.1.0 \"\"\" plugin_dir = os.path.realpath(plugin_dir) # Discover the list of plugins", "plugin_dir = os.path.realpath(plugin_dir) # Discover the list of plugins plugins = [] for", "desc = imp.find_module(plugin, [plugin_dir]) module = imp.load_module(package + '.' + plugin, f, path,", "from: # https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import imp import os def load(plugin_dir, package=__name__): \"\"\"", "= os.path.join(plugin_dir, dirent) if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) # Now load the", "borrowed and adapted from: # https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import imp import os def", "a list of python module objects, one per loaded module or package. :param", "packages from a directory. This function will list the contents of ``plugin_dir`` and", "or packages (directories with a ``__init__.py`` file) found within it. Sub-directories are not", "-*- coding: utf-8 -*- \"\"\" A simple plugin loading mechanism \"\"\" # Copyright", "the terms # of a BSD-like license # See the file COPYING for", "package. :param str plugin_dir: The path to the directory to load plugins from.", "of pytiger and distributed under the terms # of a BSD-like license #", "modules modules = [] for plugin in plugins: f, path, desc = imp.find_module(plugin,", "-*- \"\"\" A simple plugin loading mechanism \"\"\" # Copyright © 2015 Tiger", "BSD-like license # See the file COPYING for details # Idea borrowed and", "os.path.realpath(plugin_dir) # Discover the list of plugins plugins = [] for dirent in", "1.1.0 \"\"\" plugin_dir = os.path.realpath(plugin_dir) # Discover the list of plugins plugins =", "ending ``.py``) or packages (directories with a ``__init__.py`` file) found within it. Sub-directories", "package=__name__): \"\"\" Load Python modules and packages from a directory. This function will", "or package. :param str plugin_dir: The path to the directory to load plugins", "module or package. :param str plugin_dir: The path to the directory to load", "searched. Modules are compiled as they are loaded, if necessary. Plugins are loaded", "of python module objects, one per loaded module or package. :param str plugin_dir:", "loaded, if necessary. Plugins are loaded within a package name as supplied to", "Ltd # This file is part of pytiger and distributed under the terms", "the optional ``package`` parameter. If this is not provided, this defaults to ``pytiger.utils.plugins``.", "list the contents of ``plugin_dir`` and load any Python modules (files ending ``.py``)", "is not provided, this defaults to ``pytiger.utils.plugins``. The module name supplied in ``package``", "into. .. versionadded:: 1.1.0 \"\"\" plugin_dir = os.path.realpath(plugin_dir) # Discover the list of", "function in the optional ``package`` parameter. If this is not provided, this defaults", "A simple plugin loading mechanism \"\"\" # Copyright © 2015 Tiger Computing Ltd", "plugins.append(os.path.splitext(dirent)[0]) continue # Load directories containing __init__.py full_path = os.path.join(plugin_dir, dirent) if os.path.isdir(full_path):", "Load Python modules and packages from a directory. This function will list the", "os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) # Now load the plugin modules modules =", "= imp.find_module(plugin, [plugin_dir]) module = imp.load_module(package + '.' + plugin, f, path, desc)", "the file COPYING for details # Idea borrowed and adapted from: # https://copyninja.info/blog/dynamic-module-loading.html", "returns a list of python module objects, one per loaded module or package.", "they are loaded, if necessary. Plugins are loaded within a package name as", "to Python (i.e. in ``sys.modules``). The function returns a list of python module", "f, path, desc = imp.find_module(plugin, [plugin_dir]) module = imp.load_module(package + '.' + plugin,", "file) found within it. Sub-directories are not searched. Modules are compiled as they", "continue # Load directories containing __init__.py full_path = os.path.join(plugin_dir, dirent) if os.path.isdir(full_path): if", "2015 Tiger Computing Ltd # This file is part of pytiger and distributed", "os def load(plugin_dir, package=__name__): \"\"\" Load Python modules and packages from a directory.", "the plugin modules modules = [] for plugin in plugins: f, path, desc", "as they are loaded, if necessary. Plugins are loaded within a package name", "a BSD-like license # See the file COPYING for details # Idea borrowed", "list of plugins plugins = [] for dirent in os.listdir(plugin_dir): # skip __init__.py", "if necessary. Plugins are loaded within a package name as supplied to this", "plugins plugins = [] for dirent in os.listdir(plugin_dir): # skip __init__.py if dirent.startswith('__'):", "under the terms # of a BSD-like license # See the file COPYING", "found within it. Sub-directories are not searched. Modules are compiled as they are", "supplied to this function in the optional ``package`` parameter. If this is not", "must already be known to Python (i.e. in ``sys.modules``). The function returns a", "http://stackoverflow.com/a/3381582 import imp import os def load(plugin_dir, package=__name__): \"\"\" Load Python modules and", "function will list the contents of ``plugin_dir`` and load any Python modules (files", "plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue # Load directories containing __init__.py full_path = os.path.join(plugin_dir,", "this function in the optional ``package`` parameter. If this is not provided, this", "plugins into. .. versionadded:: 1.1.0 \"\"\" plugin_dir = os.path.realpath(plugin_dir) # Discover the list", "os.path.join(plugin_dir, dirent) if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) # Now load the plugin", "Load directories containing __init__.py full_path = os.path.join(plugin_dir, dirent) if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')):", "# http://stackoverflow.com/a/3381582 import imp import os def load(plugin_dir, package=__name__): \"\"\" Load Python modules", "The function returns a list of python module objects, one per loaded module", "already be known to Python (i.e. in ``sys.modules``). The function returns a list", "dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue # Load directories containing __init__.py full_path = os.path.join(plugin_dir, dirent) if", ":param str package: Python package to load the plugins into. .. versionadded:: 1.1.0", "plugins: f, path, desc = imp.find_module(plugin, [plugin_dir]) module = imp.load_module(package + '.' +", "distributed under the terms # of a BSD-like license # See the file", "(i.e. in ``sys.modules``). The function returns a list of python module objects, one", "objects, one per loaded module or package. :param str plugin_dir: The path to", "modules (files ending ``.py``) or packages (directories with a ``__init__.py`` file) found within", "modules = [] for plugin in plugins: f, path, desc = imp.find_module(plugin, [plugin_dir])", "the contents of ``plugin_dir`` and load any Python modules (files ending ``.py``) or", "See the file COPYING for details # Idea borrowed and adapted from: #", "This file is part of pytiger and distributed under the terms # of", "def load(plugin_dir, package=__name__): \"\"\" Load Python modules and packages from a directory. This", "``package`` parameter. If this is not provided, this defaults to ``pytiger.utils.plugins``. The module", "plugins from. :param str package: Python package to load the plugins into. ..", "https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import imp import os def load(plugin_dir, package=__name__): \"\"\" Load Python", "``plugin_dir`` and load any Python modules (files ending ``.py``) or packages (directories with", "utf-8 -*- \"\"\" A simple plugin loading mechanism \"\"\" # Copyright © 2015", "within a package name as supplied to this function in the optional ``package``", "loaded module or package. :param str plugin_dir: The path to the directory to", "to the directory to load plugins from. :param str package: Python package to", "contents of ``plugin_dir`` and load any Python modules (files ending ``.py``) or packages", "list of python module objects, one per loaded module or package. :param str", "not searched. Modules are compiled as they are loaded, if necessary. Plugins are", "The path to the directory to load plugins from. :param str package: Python", "as plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue # Load directories containing __init__.py full_path =", "load any Python modules (files ending ``.py``) or packages (directories with a ``__init__.py``", "one per loaded module or package. :param str plugin_dir: The path to the", "package to load the plugins into. .. versionadded:: 1.1.0 \"\"\" plugin_dir = os.path.realpath(plugin_dir)", "load the plugin modules modules = [] for plugin in plugins: f, path,", "© 2015 Tiger Computing Ltd # This file is part of pytiger and", "function returns a list of python module objects, one per loaded module or", "in ``package`` must already be known to Python (i.e. in ``sys.modules``). The function", "\"\"\" A simple plugin loading mechanism \"\"\" # Copyright © 2015 Tiger Computing", "package: Python package to load the plugins into. .. versionadded:: 1.1.0 \"\"\" plugin_dir", "dirent.startswith('__'): continue # Load .py files as plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue #", "# Now load the plugin modules modules = [] for plugin in plugins:", "if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue # Load directories containing __init__.py full_path = os.path.join(plugin_dir, dirent)", "str plugin_dir: The path to the directory to load plugins from. :param str", "__init__.py if dirent.startswith('__'): continue # Load .py files as plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0])", "will list the contents of ``plugin_dir`` and load any Python modules (files ending", "provided, this defaults to ``pytiger.utils.plugins``. The module name supplied in ``package`` must already", "in ``sys.modules``). The function returns a list of python module objects, one per", "Discover the list of plugins plugins = [] for dirent in os.listdir(plugin_dir): #", "plugins = [] for dirent in os.listdir(plugin_dir): # skip __init__.py if dirent.startswith('__'): continue", "from a directory. This function will list the contents of ``plugin_dir`` and load", "Now load the plugin modules modules = [] for plugin in plugins: f,", "module = imp.load_module(package + '.' + plugin, f, path, desc) modules.append(module) return modules", "file COPYING for details # Idea borrowed and adapted from: # https://copyninja.info/blog/dynamic-module-loading.html #", "Tiger Computing Ltd # This file is part of pytiger and distributed under", "with a ``__init__.py`` file) found within it. Sub-directories are not searched. Modules are", "optional ``package`` parameter. If this is not provided, this defaults to ``pytiger.utils.plugins``. The", "and adapted from: # https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import imp import os def load(plugin_dir,", "compiled as they are loaded, if necessary. Plugins are loaded within a package", "name as supplied to this function in the optional ``package`` parameter. If this", "Load .py files as plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue # Load directories containing", "if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) # Now load the plugin modules modules = []", "``package`` must already be known to Python (i.e. in ``sys.modules``). The function returns", "as supplied to this function in the optional ``package`` parameter. If this is", "# Idea borrowed and adapted from: # https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import imp import", "continue # Load .py files as plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue # Load", "from. :param str package: Python package to load the plugins into. .. versionadded::", "defaults to ``pytiger.utils.plugins``. The module name supplied in ``package`` must already be known", "# Load .py files as plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue # Load directories", "modules and packages from a directory. This function will list the contents of", ".py files as plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue # Load directories containing __init__.py", "os.listdir(plugin_dir): # skip __init__.py if dirent.startswith('__'): continue # Load .py files as plugins", "<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\" A simple plugin loading mechanism \"\"\"", "(directories with a ``__init__.py`` file) found within it. Sub-directories are not searched. Modules", "is part of pytiger and distributed under the terms # of a BSD-like", "any Python modules (files ending ``.py``) or packages (directories with a ``__init__.py`` file)", "load the plugins into. .. versionadded:: 1.1.0 \"\"\" plugin_dir = os.path.realpath(plugin_dir) # Discover", "import imp import os def load(plugin_dir, package=__name__): \"\"\" Load Python modules and packages", "# -*- coding: utf-8 -*- \"\"\" A simple plugin loading mechanism \"\"\" #", "are loaded within a package name as supplied to this function in the", "part of pytiger and distributed under the terms # of a BSD-like license", "# This file is part of pytiger and distributed under the terms #", "files as plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue # Load directories containing __init__.py full_path", "plugin in plugins: f, path, desc = imp.find_module(plugin, [plugin_dir]) module = imp.load_module(package +", "Python modules and packages from a directory. This function will list the contents", "be known to Python (i.e. in ``sys.modules``). The function returns a list of", "if dirent.startswith('__'): continue # Load .py files as plugins if dirent.endswith('.py'): plugins.append(os.path.splitext(dirent)[0]) continue", "package name as supplied to this function in the optional ``package`` parameter. If", "Sub-directories are not searched. Modules are compiled as they are loaded, if necessary.", "path, desc = imp.find_module(plugin, [plugin_dir]) module = imp.load_module(package + '.' + plugin, f,", "are compiled as they are loaded, if necessary. Plugins are loaded within a", "[] for plugin in plugins: f, path, desc = imp.find_module(plugin, [plugin_dir]) module =", "This function will list the contents of ``plugin_dir`` and load any Python modules", "to load plugins from. :param str package: Python package to load the plugins", "for dirent in os.listdir(plugin_dir): # skip __init__.py if dirent.startswith('__'): continue # Load .py", "os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) # Now load the plugin modules modules = [] for", "str package: Python package to load the plugins into. .. versionadded:: 1.1.0 \"\"\"", "adapted from: # https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import imp import os def load(plugin_dir, package=__name__):", "# of a BSD-like license # See the file COPYING for details #", "``.py``) or packages (directories with a ``__init__.py`` file) found within it. Sub-directories are", "if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) # Now load the plugin modules modules", "Copyright © 2015 Tiger Computing Ltd # This file is part of pytiger", "plugin modules modules = [] for plugin in plugins: f, path, desc =", "module name supplied in ``package`` must already be known to Python (i.e. in", "imp.find_module(plugin, [plugin_dir]) module = imp.load_module(package + '.' + plugin, f, path, desc) modules.append(module)", "Modules are compiled as they are loaded, if necessary. Plugins are loaded within", "(files ending ``.py``) or packages (directories with a ``__init__.py`` file) found within it.", "versionadded:: 1.1.0 \"\"\" plugin_dir = os.path.realpath(plugin_dir) # Discover the list of plugins plugins", "containing __init__.py full_path = os.path.join(plugin_dir, dirent) if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) #", "to this function in the optional ``package`` parameter. If this is not provided,", "imp import os def load(plugin_dir, package=__name__): \"\"\" Load Python modules and packages from", "in os.listdir(plugin_dir): # skip __init__.py if dirent.startswith('__'): continue # Load .py files as", "``pytiger.utils.plugins``. The module name supplied in ``package`` must already be known to Python", "supplied in ``package`` must already be known to Python (i.e. in ``sys.modules``). The", "pytiger and distributed under the terms # of a BSD-like license # See", "python module objects, one per loaded module or package. :param str plugin_dir: The", "the list of plugins plugins = [] for dirent in os.listdir(plugin_dir): # skip", "'__init__.py')): plugins.append(dirent) # Now load the plugin modules modules = [] for plugin", "Python package to load the plugins into. .. versionadded:: 1.1.0 \"\"\" plugin_dir =", "for plugin in plugins: f, path, desc = imp.find_module(plugin, [plugin_dir]) module = imp.load_module(package", "plugin loading mechanism \"\"\" # Copyright © 2015 Tiger Computing Ltd # This", "in plugins: f, path, desc = imp.find_module(plugin, [plugin_dir]) module = imp.load_module(package + '.'", "of ``plugin_dir`` and load any Python modules (files ending ``.py``) or packages (directories", "necessary. Plugins are loaded within a package name as supplied to this function", "are loaded, if necessary. Plugins are loaded within a package name as supplied", "path to the directory to load plugins from. :param str package: Python package", "directory to load plugins from. :param str package: Python package to load the", "module objects, one per loaded module or package. :param str plugin_dir: The path", "= [] for dirent in os.listdir(plugin_dir): # skip __init__.py if dirent.startswith('__'): continue #", "Python (i.e. in ``sys.modules``). The function returns a list of python module objects,", "a ``__init__.py`` file) found within it. Sub-directories are not searched. Modules are compiled", "details # Idea borrowed and adapted from: # https://copyninja.info/blog/dynamic-module-loading.html # http://stackoverflow.com/a/3381582 import imp", "\"\"\" Load Python modules and packages from a directory. This function will list", "load plugins from. :param str package: Python package to load the plugins into.", "dirent) if os.path.isdir(full_path): if os.path.isfile(os.path.join(full_path, '__init__.py')): plugins.append(dirent) # Now load the plugin modules", "license # See the file COPYING for details # Idea borrowed and adapted", "import os def load(plugin_dir, package=__name__): \"\"\" Load Python modules and packages from a", "``sys.modules``). The function returns a list of python module objects, one per loaded", "to load the plugins into. .. versionadded:: 1.1.0 \"\"\" plugin_dir = os.path.realpath(plugin_dir) #", "loading mechanism \"\"\" # Copyright © 2015 Tiger Computing Ltd # This file" ]
[ "read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\"", "thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if", "# Then set TARGET_ID to the string this call returns. current_session = sessions.get_current_session()", "larger thumb_read_buffer = Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length)", "= buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer)) ==0: with open(filename,", "async def get_media_info(): sessions = await MediaManager.request_async() # This source_app_user_model_id check and if", "only get a certain player/program's media # (e.g. only chrome.exe's media not any", "Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'):", "song_attr[0] != '_'} # converts winrt vector to list info_dict['genres'] = list(info_dict['genres']) #", "any other program's). # To get the ID, use a breakpoint() to run", "import json from winrt.windows.media.control import \\ GlobalSystemMediaTransportControlsSessionManager as MediaManager from winrt.windows.storage.streams import \\", "buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer)) ==0: with open(filename, 'wb+')", "async def read_stream_into_buffer(stream_ref, buffer): readable_stream = await stream_ref.open_read_async() readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD) if __name__", "def read_stream_into_buffer(stream_ref, buffer): readable_stream = await stream_ref.open_read_async() readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD) if __name__ ==", "media # (e.g. only chrome.exe's media not any other program's). # To get", "buffer): readable_stream = await stream_ref.open_read_async() readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD) if __name__ == '__main__': print(json.dumps(asyncio.run(get_media_info())))", "# song_attr[0] != '_' ignores system attributes info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr", "to be larger thumb_read_buffer = Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer", "# while the media you want to get is playing. # Then set", "sessions.get_current_session() # while the media you want to get is playing. # Then", "if statement is optional # Use it if you want to only get", "a breakpoint() to run sessions.get_current_session() # while the media you want to get", "= Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if not", "call returns. current_session = sessions.get_current_session() if current_session: # there needs to be a", "# print(\"something went wrong with getting thumbnail\") info_dict[\"thumbnail\"]=\" \" return info_dict return None", "not any other program's). # To get the ID, use a breakpoint() to", "get the ID, use a breakpoint() to run sessions.get_current_session() # while the media", "# there needs to be a media session running info = await current_session.try_get_media_properties_async()", "if not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer)) ==0: with open(filename, 'wb+') as", "# To get the ID, use a breakpoint() to run sessions.get_current_session() # while", "(e.g. only chrome.exe's media not any other program's). # To get the ID,", "player/program's media # (e.g. only chrome.exe's media not any other program's). # To", "first thumb_stream_ref = info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename) # 5MB (5 million", "if not len(bytearray(byte_buffer)) ==0: with open(filename, 'wb+') as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception", "buffer - thumbnail unlikely to be larger thumb_read_buffer = Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer)", "source_app_user_model_id check and if statement is optional # Use it if you want", "= sessions.get_current_session() if current_session: # there needs to be a media session running", "None async def read_stream_into_buffer(stream_ref, buffer): readable_stream = await stream_ref.open_read_async() readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD) if", "winrt vector to list info_dict['genres'] = list(info_dict['genres']) # create the current_media_info dict with", "a media session running info = await current_session.try_get_media_properties_async() # song_attr[0] != '_' ignores", "= DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer))", "the current_media_info dict with the earlier code first thumb_stream_ref = info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\"", "if song_attr[0] != '_'} # converts winrt vector to list info_dict['genres'] = list(info_dict['genres'])", "= await current_session.try_get_media_properties_async() # song_attr[0] != '_' ignores system attributes info_dict = {song_attr:", "fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception as e: # print(e) # print(\"something went wrong with", "- thumbnail unlikely to be larger thumb_read_buffer = Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader", "getting thumbnail\") info_dict[\"thumbnail\"]=\" \" return info_dict return None async def read_stream_into_buffer(stream_ref, buffer): readable_stream", "json from winrt.windows.media.control import \\ GlobalSystemMediaTransportControlsSessionManager as MediaManager from winrt.windows.storage.streams import \\ DataReader,", "you want to only get a certain player/program's media # (e.g. only chrome.exe's", "(5 million byte) buffer - thumbnail unlikely to be larger thumb_read_buffer = Buffer(5000000)", "want to get is playing. # Then set TARGET_ID to the string this", "be larger thumb_read_buffer = Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer =", "thumb_read_buffer = Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if", "optional # Use it if you want to only get a certain player/program's", "InputStreamOptions async def get_media_info(): sessions = await MediaManager.request_async() # This source_app_user_model_id check and", "await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'): os.makedirs('static')", "byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer)) ==0: with", "TARGET_ID to the string this call returns. current_session = sessions.get_current_session() if current_session: #", "filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename) # 5MB (5 million byte) buffer - thumbnail unlikely", "with open(filename, 'wb+') as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception as e: # print(e)", "vector to list info_dict['genres'] = list(info_dict['genres']) # create the current_media_info dict with the", "if current_session: # there needs to be a media session running info =", "certain player/program's media # (e.g. only chrome.exe's media not any other program's). #", "ID, use a breakpoint() to run sessions.get_current_session() # while the media you want", "as MediaManager from winrt.windows.storage.streams import \\ DataReader, Buffer, InputStreamOptions async def get_media_info(): sessions", "dir(info) if song_attr[0] != '_'} # converts winrt vector to list info_dict['genres'] =", "the string this call returns. current_session = sessions.get_current_session() if current_session: # there needs", "try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename) # 5MB (5 million byte) buffer - thumbnail", "except Exception as e: # print(e) # print(\"something went wrong with getting thumbnail\")", "MediaManager from winrt.windows.storage.streams import \\ DataReader, Buffer, InputStreamOptions async def get_media_info(): sessions =", "MediaManager.request_async() # This source_app_user_model_id check and if statement is optional # Use it", "list info_dict['genres'] = list(info_dict['genres']) # create the current_media_info dict with the earlier code", "song_attr in dir(info) if song_attr[0] != '_'} # converts winrt vector to list", "thumbnail\") info_dict[\"thumbnail\"]=\" \" return info_dict return None async def read_stream_into_buffer(stream_ref, buffer): readable_stream =", "to run sessions.get_current_session() # while the media you want to get is playing.", "info = await current_session.try_get_media_properties_async() # song_attr[0] != '_' ignores system attributes info_dict =", "as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception as e: # print(e) # print(\"something went", "'_' ignores system attributes info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr in dir(info) if", "\" return info_dict return None async def read_stream_into_buffer(stream_ref, buffer): readable_stream = await stream_ref.open_read_async()", "print(e) # print(\"something went wrong with getting thumbnail\") info_dict[\"thumbnail\"]=\" \" return info_dict return", "return None async def read_stream_into_buffer(stream_ref, buffer): readable_stream = await stream_ref.open_read_async() readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD)", "be a media session running info = await current_session.try_get_media_properties_async() # song_attr[0] != '_'", "if os.path.exists(filename): os.remove(filename) # 5MB (5 million byte) buffer - thumbnail unlikely to", "not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer)) ==0: with open(filename, 'wb+') as fobj:", "get a certain player/program's media # (e.g. only chrome.exe's media not any other", "while the media you want to get is playing. # Then set TARGET_ID", "'_'} # converts winrt vector to list info_dict['genres'] = list(info_dict['genres']) # create the", "to the string this call returns. current_session = sessions.get_current_session() if current_session: # there", "This source_app_user_model_id check and if statement is optional # Use it if you", "ignores system attributes info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr in dir(info) if song_attr[0]", "import \\ DataReader, Buffer, InputStreamOptions async def get_media_info(): sessions = await MediaManager.request_async() #", "to list info_dict['genres'] = list(info_dict['genres']) # create the current_media_info dict with the earlier", "dict with the earlier code first thumb_stream_ref = info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename):", "Use it if you want to only get a certain player/program's media #", "returns. current_session = sessions.get_current_session() if current_session: # there needs to be a media", "current_session: # there needs to be a media session running info = await", "current_session.try_get_media_properties_async() # song_attr[0] != '_' ignores system attributes info_dict = {song_attr: info.__getattribute__(song_attr) for", "!= '_' ignores system attributes info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr in dir(info)", "info_dict['genres'] = list(info_dict['genres']) # create the current_media_info dict with the earlier code first", "needs to be a media session running info = await current_session.try_get_media_properties_async() # song_attr[0]", "'wb+') as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception as e: # print(e) # print(\"something", "the earlier code first thumb_stream_ref = info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename) #", "return info_dict return None async def read_stream_into_buffer(stream_ref, buffer): readable_stream = await stream_ref.open_read_async() readable_stream.read_async(buffer,", "print(\"something went wrong with getting thumbnail\") info_dict[\"thumbnail\"]=\" \" return info_dict return None async", "# converts winrt vector to list info_dict['genres'] = list(info_dict['genres']) # create the current_media_info", "info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr in dir(info) if song_attr[0] != '_'} #", "current_media_info dict with the earlier code first thumb_stream_ref = info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if", "session running info = await current_session.try_get_media_properties_async() # song_attr[0] != '_' ignores system attributes", "running info = await current_session.try_get_media_properties_async() # song_attr[0] != '_' ignores system attributes info_dict", "unlikely to be larger thumb_read_buffer = Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader = DataReader.from_buffer(thumb_read_buffer)", "# This source_app_user_model_id check and if statement is optional # Use it if", "went wrong with getting thumbnail\") info_dict[\"thumbnail\"]=\" \" return info_dict return None async def", "fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception as e: # print(e) # print(\"something went wrong", "byte) buffer - thumbnail unlikely to be larger thumb_read_buffer = Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref,", "buffer_reader = DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not", "the media you want to get is playing. # Then set TARGET_ID to", "DataReader, Buffer, InputStreamOptions async def get_media_info(): sessions = await MediaManager.request_async() # This source_app_user_model_id", "other program's). # To get the ID, use a breakpoint() to run sessions.get_current_session()", "use a breakpoint() to run sessions.get_current_session() # while the media you want to", "!= '_'} # converts winrt vector to list info_dict['genres'] = list(info_dict['genres']) # create", "len(bytearray(byte_buffer)) ==0: with open(filename, 'wb+') as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception as e:", "info_dict return None async def read_stream_into_buffer(stream_ref, buffer): readable_stream = await stream_ref.open_read_async() readable_stream.read_async(buffer, buffer.capacity,", "million byte) buffer - thumbnail unlikely to be larger thumb_read_buffer = Buffer(5000000) await", "check and if statement is optional # Use it if you want to", "chrome.exe's media not any other program's). # To get the ID, use a", "this call returns. current_session = sessions.get_current_session() if current_session: # there needs to be", "{song_attr: info.__getattribute__(song_attr) for song_attr in dir(info) if song_attr[0] != '_'} # converts winrt", "= info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename) # 5MB (5 million byte) buffer", "# print(e) # print(\"something went wrong with getting thumbnail\") info_dict[\"thumbnail\"]=\" \" return info_dict", "converts winrt vector to list info_dict['genres'] = list(info_dict['genres']) # create the current_media_info dict", "To get the ID, use a breakpoint() to run sessions.get_current_session() # while the", "earlier code first thumb_stream_ref = info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename) # 5MB", "it if you want to only get a certain player/program's media # (e.g.", "media session running info = await current_session.try_get_media_properties_async() # song_attr[0] != '_' ignores system", "info.__getattribute__(song_attr) for song_attr in dir(info) if song_attr[0] != '_'} # converts winrt vector", "==0: with open(filename, 'wb+') as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception as e: #", "not len(bytearray(byte_buffer)) ==0: with open(filename, 'wb+') as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception as", "Then set TARGET_ID to the string this call returns. current_session = sessions.get_current_session() if", "get_media_info(): sessions = await MediaManager.request_async() # This source_app_user_model_id check and if statement is", "current_session = sessions.get_current_session() if current_session: # there needs to be a media session", "string this call returns. current_session = sessions.get_current_session() if current_session: # there needs to", "with getting thumbnail\") info_dict[\"thumbnail\"]=\" \" return info_dict return None async def read_stream_into_buffer(stream_ref, buffer):", "await MediaManager.request_async() # This source_app_user_model_id check and if statement is optional # Use", "os.remove(filename) # 5MB (5 million byte) buffer - thumbnail unlikely to be larger", "# 5MB (5 million byte) buffer - thumbnail unlikely to be larger thumb_read_buffer", "is optional # Use it if you want to only get a certain", "wrong with getting thumbnail\") info_dict[\"thumbnail\"]=\" \" return info_dict return None async def read_stream_into_buffer(stream_ref,", "= await MediaManager.request_async() # This source_app_user_model_id check and if statement is optional #", "winrt.windows.media.control import \\ GlobalSystemMediaTransportControlsSessionManager as MediaManager from winrt.windows.storage.streams import \\ DataReader, Buffer, InputStreamOptions", "\\ GlobalSystemMediaTransportControlsSessionManager as MediaManager from winrt.windows.storage.streams import \\ DataReader, Buffer, InputStreamOptions async def", "os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer)) ==0: with open(filename, 'wb+') as fobj: fobj.write(bytearray(byte_buffer))", "# Use it if you want to only get a certain player/program's media", "from winrt.windows.media.control import \\ GlobalSystemMediaTransportControlsSessionManager as MediaManager from winrt.windows.storage.streams import \\ DataReader, Buffer,", "5MB (5 million byte) buffer - thumbnail unlikely to be larger thumb_read_buffer =", "thumbnail unlikely to be larger thumb_read_buffer = Buffer(5000000) await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer) buffer_reader =", "from winrt.windows.storage.streams import \\ DataReader, Buffer, InputStreamOptions async def get_media_info(): sessions = await", "# (e.g. only chrome.exe's media not any other program's). # To get the", "media not any other program's). # To get the ID, use a breakpoint()", "os import json from winrt.windows.media.control import \\ GlobalSystemMediaTransportControlsSessionManager as MediaManager from winrt.windows.storage.streams import", "and if statement is optional # Use it if you want to only", "if you want to only get a certain player/program's media # (e.g. only", "to get is playing. # Then set TARGET_ID to the string this call", "code first thumb_stream_ref = info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename) # 5MB (5", "for song_attr in dir(info) if song_attr[0] != '_'} # converts winrt vector to", "thumb_stream_ref = info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename) # 5MB (5 million byte)", "attributes info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr in dir(info) if song_attr[0] != '_'}", "create the current_media_info dict with the earlier code first thumb_stream_ref = info_dict['thumbnail'] try:", "media you want to get is playing. # Then set TARGET_ID to the", "info_dict[\"thumbnail\"]=\" \" return info_dict return None async def read_stream_into_buffer(stream_ref, buffer): readable_stream = await", "os.path.exists(filename): os.remove(filename) # 5MB (5 million byte) buffer - thumbnail unlikely to be", "sessions.get_current_session() if current_session: # there needs to be a media session running info", "# create the current_media_info dict with the earlier code first thumb_stream_ref = info_dict['thumbnail']", "= list(info_dict['genres']) # create the current_media_info dict with the earlier code first thumb_stream_ref", "set TARGET_ID to the string this call returns. current_session = sessions.get_current_session() if current_session:", "is playing. # Then set TARGET_ID to the string this call returns. current_session", "e: # print(e) # print(\"something went wrong with getting thumbnail\") info_dict[\"thumbnail\"]=\" \" return", "info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename) # 5MB (5 million byte) buffer -", "import asyncio import os import json from winrt.windows.media.control import \\ GlobalSystemMediaTransportControlsSessionManager as MediaManager", "open(filename, 'wb+') as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except Exception as e: # print(e) #", "to only get a certain player/program's media # (e.g. only chrome.exe's media not", "info_dict[\"thumbnail\"]=filename[1:] except Exception as e: # print(e) # print(\"something went wrong with getting", "as e: # print(e) # print(\"something went wrong with getting thumbnail\") info_dict[\"thumbnail\"]=\" \"", "= {song_attr: info.__getattribute__(song_attr) for song_attr in dir(info) if song_attr[0] != '_'} # converts", "DataReader.from_buffer(thumb_read_buffer) byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length) if not os.path.exists('static'): os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer)) ==0:", "def get_media_info(): sessions = await MediaManager.request_async() # This source_app_user_model_id check and if statement", "sessions = await MediaManager.request_async() # This source_app_user_model_id check and if statement is optional", "you want to get is playing. # Then set TARGET_ID to the string", "with the earlier code first thumb_stream_ref = info_dict['thumbnail'] try: filename=\"./static/media_thumb.jpg\" if os.path.exists(filename): os.remove(filename)", "filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer)) ==0: with open(filename, 'wb+') as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:] except", "statement is optional # Use it if you want to only get a", "list(info_dict['genres']) # create the current_media_info dict with the earlier code first thumb_stream_ref =", "playing. # Then set TARGET_ID to the string this call returns. current_session =", "the ID, use a breakpoint() to run sessions.get_current_session() # while the media you", "GlobalSystemMediaTransportControlsSessionManager as MediaManager from winrt.windows.storage.streams import \\ DataReader, Buffer, InputStreamOptions async def get_media_info():", "os.makedirs('static') filename=\"./static/media_thumb.jpg\" if not len(bytearray(byte_buffer)) ==0: with open(filename, 'wb+') as fobj: fobj.write(bytearray(byte_buffer)) info_dict[\"thumbnail\"]=filename[1:]", "Buffer, InputStreamOptions async def get_media_info(): sessions = await MediaManager.request_async() # This source_app_user_model_id check", "run sessions.get_current_session() # while the media you want to get is playing. #", "winrt.windows.storage.streams import \\ DataReader, Buffer, InputStreamOptions async def get_media_info(): sessions = await MediaManager.request_async()", "program's). # To get the ID, use a breakpoint() to run sessions.get_current_session() #", "in dir(info) if song_attr[0] != '_'} # converts winrt vector to list info_dict['genres']", "import \\ GlobalSystemMediaTransportControlsSessionManager as MediaManager from winrt.windows.storage.streams import \\ DataReader, Buffer, InputStreamOptions async", "system attributes info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr in dir(info) if song_attr[0] !=", "only chrome.exe's media not any other program's). # To get the ID, use", "breakpoint() to run sessions.get_current_session() # while the media you want to get is", "asyncio import os import json from winrt.windows.media.control import \\ GlobalSystemMediaTransportControlsSessionManager as MediaManager from", "get is playing. # Then set TARGET_ID to the string this call returns.", "await current_session.try_get_media_properties_async() # song_attr[0] != '_' ignores system attributes info_dict = {song_attr: info.__getattribute__(song_attr)", "import os import json from winrt.windows.media.control import \\ GlobalSystemMediaTransportControlsSessionManager as MediaManager from winrt.windows.storage.streams", "there needs to be a media session running info = await current_session.try_get_media_properties_async() #", "\\ DataReader, Buffer, InputStreamOptions async def get_media_info(): sessions = await MediaManager.request_async() # This", "to be a media session running info = await current_session.try_get_media_properties_async() # song_attr[0] !=", "read_stream_into_buffer(stream_ref, buffer): readable_stream = await stream_ref.open_read_async() readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD) if __name__ == '__main__':", "a certain player/program's media # (e.g. only chrome.exe's media not any other program's).", "song_attr[0] != '_' ignores system attributes info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr in", "want to only get a certain player/program's media # (e.g. only chrome.exe's media", "Exception as e: # print(e) # print(\"something went wrong with getting thumbnail\") info_dict[\"thumbnail\"]=\"" ]
[ "import FWCore.ParameterSet.Config as cms #from ..modules.hltL1TkMuons_cfi import * from ..modules.hltDoubleMuon7DZ1p0_cfi import * from", "* L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence + # hltL1TkMuons + hltL1TkDoubleMuFiltered7 + hltL1TkSingleMuFiltered15 +", "import * from ..modules.hltDoubleMuon7DZ1p0_cfi import * from ..modules.hltL1TkDoubleMuFiltered7_cfi import * from ..modules.hltL1TkSingleMuFiltered15_cfi import", "..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence + # hltL1TkMuons + hltL1TkDoubleMuFiltered7 +", "import * from ..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7 = cms.Path(", "* from ..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence", "* from ..modules.hltL1TkSingleMuFiltered15_cfi import * from ..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTEndSequence_cfi import *", "from ..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence + # hltL1TkMuons + hltL1TkDoubleMuFiltered7", "import * L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence + # hltL1TkMuons + hltL1TkDoubleMuFiltered7 + hltL1TkSingleMuFiltered15", "L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence + # hltL1TkMuons + hltL1TkDoubleMuFiltered7 + hltL1TkSingleMuFiltered15 + hltDoubleMuon7DZ1p0", "cms #from ..modules.hltL1TkMuons_cfi import * from ..modules.hltDoubleMuon7DZ1p0_cfi import * from ..modules.hltL1TkDoubleMuFiltered7_cfi import *", "cms.Path( HLTBeginSequence + # hltL1TkMuons + hltL1TkDoubleMuFiltered7 + hltL1TkSingleMuFiltered15 + hltDoubleMuon7DZ1p0 + HLTEndSequence", "from ..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence +", "from ..modules.hltL1TkDoubleMuFiltered7_cfi import * from ..modules.hltL1TkSingleMuFiltered15_cfi import * from ..sequences.HLTBeginSequence_cfi import * from", "import * from ..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence + # hltL1TkMuons", "FWCore.ParameterSet.Config as cms #from ..modules.hltL1TkMuons_cfi import * from ..modules.hltDoubleMuon7DZ1p0_cfi import * from ..modules.hltL1TkDoubleMuFiltered7_cfi", "as cms #from ..modules.hltL1TkMuons_cfi import * from ..modules.hltDoubleMuon7DZ1p0_cfi import * from ..modules.hltL1TkDoubleMuFiltered7_cfi import", "* from ..modules.hltL1TkDoubleMuFiltered7_cfi import * from ..modules.hltL1TkSingleMuFiltered15_cfi import * from ..sequences.HLTBeginSequence_cfi import *", "..modules.hltDoubleMuon7DZ1p0_cfi import * from ..modules.hltL1TkDoubleMuFiltered7_cfi import * from ..modules.hltL1TkSingleMuFiltered15_cfi import * from ..sequences.HLTBeginSequence_cfi", "..modules.hltL1TkDoubleMuFiltered7_cfi import * from ..modules.hltL1TkSingleMuFiltered15_cfi import * from ..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTEndSequence_cfi", "..modules.hltL1TkMuons_cfi import * from ..modules.hltDoubleMuon7DZ1p0_cfi import * from ..modules.hltL1TkDoubleMuFiltered7_cfi import * from ..modules.hltL1TkSingleMuFiltered15_cfi", "= cms.Path( HLTBeginSequence + # hltL1TkMuons + hltL1TkDoubleMuFiltered7 + hltL1TkSingleMuFiltered15 + hltDoubleMuon7DZ1p0 +", "..modules.hltL1TkSingleMuFiltered15_cfi import * from ..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7 =", "..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence + #", "from ..modules.hltDoubleMuon7DZ1p0_cfi import * from ..modules.hltL1TkDoubleMuFiltered7_cfi import * from ..modules.hltL1TkSingleMuFiltered15_cfi import * from", "import * from ..modules.hltL1TkSingleMuFiltered15_cfi import * from ..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTEndSequence_cfi import", "HLTBeginSequence + # hltL1TkMuons + hltL1TkDoubleMuFiltered7 + hltL1TkSingleMuFiltered15 + hltDoubleMuon7DZ1p0 + HLTEndSequence )", "import * from ..modules.hltL1TkDoubleMuFiltered7_cfi import * from ..modules.hltL1TkSingleMuFiltered15_cfi import * from ..sequences.HLTBeginSequence_cfi import", "from ..modules.hltL1TkSingleMuFiltered15_cfi import * from ..sequences.HLTBeginSequence_cfi import * from ..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7", "* from ..sequences.HLTEndSequence_cfi import * L1T_DoubleTkMuon_15_7 = cms.Path( HLTBeginSequence + # hltL1TkMuons +", "* from ..modules.hltDoubleMuon7DZ1p0_cfi import * from ..modules.hltL1TkDoubleMuFiltered7_cfi import * from ..modules.hltL1TkSingleMuFiltered15_cfi import *", "#from ..modules.hltL1TkMuons_cfi import * from ..modules.hltDoubleMuon7DZ1p0_cfi import * from ..modules.hltL1TkDoubleMuFiltered7_cfi import * from" ]
[ "dim == 2: lambdas = eigs_2d(M) else: lambdas = eigs_3d(M) # lambdas =", "mat[0] + mat[4] + mat[8] t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5]) p", "* mat[0, 1]**2 tmp1 = Piecewise( (a / 2, delta < 1e-10), ((a", "j in range(0, dim): if i <= j: M[i, j] = Symbol('m[' +", "p = 2.0 * sqrt(p) tmp1 = Piecewise( (b / 3.0, p <", "* (mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8])) q", "}\\n\\n\" lambdaa = Symbol('lambda', real=True) for dim in dims: print(\"processing \" + str(dim))", "* (sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8])) p", "mat[1, 1])**2 + 4 * mat[0, 1]**2 tmp1 = Piecewise( (a / 2,", "= 0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4] -", "2.0 * pi) / 3.0)) / 3.0, True) ) tmp3 = Piecewise( (b", "= parse_args() dims = [2, 3] cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma", "b = mat[0] + mat[4] + mat[8] t = sqr(mat[1]) + sqr(mat[2]) +", "27.0 * (mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1]))", "9.0 * b * t q -= 3.0 * (mat[0] + mat[4]) *", "* mat[8] + 3.0 * mat[1] * mat[2] * mat[5]) q += 2.0", "mat[8]) * (mat[4] + mat[8]) q -= 27.0 * (mat[0] * sqr(mat[5]) +", "type=str, help=\"path to the output folder\") return parser.parse_args() if __name__ == \"__main__\": args", "= cpp + \"\\n\" hpp = hpp + \"\\n\" cpp = cpp +", "= mat[0] + mat[4] + mat[8] t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5])", "return parser.parse_args() if __name__ == \"__main__\": args = parse_args() dims = [2, 3]", "eigs_3d(mat): b = mat[0] + mat[4] + mat[8] t = sqr(mat[1]) + sqr(mat[2])", "polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp + \"template<typename T>\\nT int_pow(T", "* sqrt(p) tmp1 = Piecewise( (b / 3.0, p < 1e-10), ((b +", "2.0 * (mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8]))", "\"__main__\": args = parse_args() dims = [2, 3] cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp", "real=True) if dim == 2: lambdas = eigs_2d(M) else: lambdas = eigs_3d(M) #", "autogen \" + \"{\\n\" hpp = hpp + \"namespace polyfem {\\nnamespace autogen \"", "res = res*val; return res; }\\n\\n\" lambdaa = Symbol('lambda', real=True) for dim in", "return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p) def parse_args(): parser = argparse.ArgumentParser( description=__doc__,", "<= -1.0), (acos(x), True)) return tmp.subs(x, x) def eigs_2d(mat): a = mat[0, 0]", "# lambdas = simplify(lambdas) c99 = pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99", "simplify(lambdas) c99 = pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)',", "trunc_acos(0.5 * q / sqrt(p * sqr(p))) p = 2.0 * sqrt(p) tmp1", "= res*val; return res; }\\n\\n\" lambdaa = Symbol('lambda', real=True) for dim in dims:", "mat[8] * sqr(mat[8])) q += 9.0 * b * t q -= 3.0", "< 1e-10), ((b + p * cos((delta - 2.0 * pi) / 3.0))", "0] - mat[1, 1])**2 + 4 * mat[0, 1]**2 tmp1 = Piecewise( (a", "a = mat[0, 0] + mat[1, 1] delta = (mat[0, 0] - mat[1,", "exp <=0 ? T(0.): val; for(int i = 1; i < exp; ++i)", "delta) def eigs_3d(mat): b = mat[0] + mat[4] + mat[8] t = sqr(mat[1])", "os import re import argparse # local import pretty_print def sqr(a): return a", "1e-10), ((b + p * cos((delta + 2.0 * pi) / 3.0)) /", "= re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99 = c99.replace(\"0.0\", \"T(0)\") c99 = c99.replace(\" M_PI\", \"", "(mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8])) q +=", "return res; }\\n\\n\" lambdaa = Symbol('lambda', real=True) for dim in dims: print(\"processing \"", "to the output folder\") return parser.parse_args() if __name__ == \"__main__\": args = parse_args()", "argparse # local import pretty_print def sqr(a): return a * a def trunc_acos(x):", "parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path to the output folder\")", "j] = Symbol('m[' + str(j) + ',' + str(i) + ']', real=True) if", "4 * mat[0, 1]**2 tmp1 = Piecewise( (a / 2, delta < 1e-10),", "1, 0, 3, 1> &res)\" hpp = hpp + signature + \" {\\nres.resize(\"", "True) ) return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p) def parse_args(): parser =", "\" T(M_PI)\") signature = \"template<typename T>\\nvoid eigs_\" + str(dim) + \"d(const Eigen::Matrix<T, Eigen::Dynamic,", "']', real=True) else: M[i, j] = Symbol('m[' + str(j) + ',' + str(i)", "cos((delta + 2.0 * pi) / 3.0)) / 3.0, True) ) tmp3 =", "sqr(mat[1])) delta = trunc_acos(0.5 * q / sqrt(p * sqr(p))) p = 2.0", "T>\\nT int_pow(T val, int exp) { T res = exp <=0 ? T(0.):", "= hpp + signature + \" {\\nres.resize(\" + str(dim) + \");\\n\" + c99", "3.0, True) ) tmp2 = Piecewise( (b / 3.0, p < 1e-10), ((b", "pi) / 3.0)) / 3.0, True) ) return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p,", "+ sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8])) p += 3.0 * t", "mat[4] * mat[8] + 3.0 * mat[1] * mat[2] * mat[5]) q +=", "with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as file: file.write(cpp) with open(os.path.join(path, \"auto_eigs.hpp\"), \"w\") as file:", ") tmp3 = Piecewise( (b / 3.0, p < 1e-10), ((b + p", "= hpp + \"\\n\" cpp = cpp + \"\\n}}\\n\" hpp = hpp +", "sqrt(delta)) / 2.0, True) ) tmp2 = Piecewise( (a / 2, delta <", "',' + str(j) + ']', real=True) else: M[i, j] = Symbol('m[' + str(j)", "1> &res)\" hpp = hpp + signature + \" {\\nres.resize(\" + str(dim) +", "trunc_acos(x): tmp = Piecewise((0.0, x >= 1.0), (pi, x <= -1.0), (acos(x), True))", "\"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)\" hpp = hpp + signature +", "/ 2.0, True) ) tmp2 = Piecewise( (a / 2, delta < 1e-10),", "import * import os import re import argparse # local import pretty_print def", "&m, \" signature += \"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)\" hpp =", "cpp = cpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp =", "c99.replace(\" M_PI\", \" T(M_PI)\") signature = \"template<typename T>\\nvoid eigs_\" + str(dim) + \"d(const", "__name__ == \"__main__\": args = parse_args() dims = [2, 3] cpp = \"#include", "description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path to the output folder\") return parser.parse_args() if __name__", "zeros(dim, dim) for i in range(0, dim): for j in range(0, dim): if", ") tmp2 = Piecewise( (b / 3.0, p < 1e-10), ((b + p", "= eigs_2d(M) else: lambdas = eigs_3d(M) # lambdas = simplify(lambdas) c99 = pretty_print.C99_print(lambdas)", "x >= 1.0), (pi, x <= -1.0), (acos(x), True)) return tmp.subs(x, x) def", "\"{\\n\" hpp = hpp + \"template<typename T>\\nT int_pow(T val, int exp) { T", "= Piecewise( (a / 2, delta < 1e-10), ((a + sqrt(delta)) / 2.0,", "+= 2.0 * (mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8] *", "3.0, True) ) tmp3 = Piecewise( (b / 3.0, p < 1e-10), ((b", "18.0 * (mat[0] * mat[4] * mat[8] + 3.0 * mat[1] * mat[2]", "lambdaa = Symbol('lambda', real=True) for dim in dims: print(\"processing \" + str(dim)) M", "mat[8]) + sqr(mat[4] - mat[8])) p += 3.0 * t q = 18.0", "= 2.0 * sqrt(p) tmp1 = Piecewise( (b / 3.0, p < 1e-10),", "* (mat[0] * mat[4] * mat[8] + 3.0 * mat[1] * mat[2] *", "(mat[0] * mat[4] * mat[8] + 3.0 * mat[1] * mat[2] * mat[5])", "+ mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1])) delta = trunc_acos(0.5 * q", "i in range(0, dim): for j in range(0, dim): if i <= j:", "signature = \"template<typename T>\\nvoid eigs_\" + str(dim) + \"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0,", "\"\\n}}\\n\" hpp = hpp + \"\\n}}\\n\" path = os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"),", "os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as file: file.write(cpp) with open(os.path.join(path, \"auto_eigs.hpp\"), \"w\")", "cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp = cpp +", "def trunc_acos(x): tmp = Piecewise((0.0, x >= 1.0), (pi, x <= -1.0), (acos(x),", "local import pretty_print def sqr(a): return a * a def trunc_acos(x): tmp =", "+ \"template<typename T>\\nT int_pow(T val, int exp) { T res = exp <=0", "sympy import * from sympy.matrices import * import os import re import argparse", "Symbol('m[' + str(i) + ',' + str(j) + ']', real=True) else: M[i, j]", "+ ',' + str(i) + ']', real=True) if dim == 2: lambdas =", "eigs_2d(M) else: lambdas = eigs_3d(M) # lambdas = simplify(lambdas) c99 = pretty_print.C99_print(lambdas) c99", "\"\\n\" cpp = cpp + \"\\n}}\\n\" hpp = hpp + \"\\n}}\\n\" path =", "lambdas = simplify(lambdas) c99 = pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99 =", "signature += \"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)\" hpp = hpp +", "<polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp = cpp + \"namespace polyfem {\\nnamespace", "+= \"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)\" hpp = hpp + signature", "autogen \" + \"{\\n\" hpp = hpp + \"template<typename T>\\nT int_pow(T val, int", "p * cos((delta + 2.0 * pi) / 3.0)) / 3.0, True) )", "i < exp; ++i) res = res*val; return res; }\\n\\n\" lambdaa = Symbol('lambda',", "p) def parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path to the", "1e-10), ((a + sqrt(delta)) / 2.0, True) ) return tmp1.subs(delta, delta), tmp2.subs(delta, delta)", "tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p) def parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)", "mat[5]) q += 2.0 * (mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4]) +", "= Piecewise( (a / 2, delta < 1e-10), ((a - sqrt(delta)) / 2.0,", "+ mat[8]) * (mat[4] + mat[8]) q -= 27.0 * (mat[0] * sqr(mat[5])", "sqr(p))) p = 2.0 * sqrt(p) tmp1 = Piecewise( (b / 3.0, p", "+ \"\\n}}\\n\" hpp = hpp + \"\\n}}\\n\" path = os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path,", "mat[4]) * (mat[0] + mat[8]) * (mat[4] + mat[8]) q -= 27.0 *", "/ sqrt(p * sqr(p))) p = 2.0 * sqrt(p) tmp1 = Piecewise( (b", "for i in range(0, dim): for j in range(0, dim): if i <=", "+ sqrt(delta)) / 2.0, True) ) return tmp1.subs(delta, delta), tmp2.subs(delta, delta) def eigs_3d(mat):", "\"T(0)\") c99 = c99.replace(\" M_PI\", \" T(M_PI)\") signature = \"template<typename T>\\nvoid eigs_\" +", "\" + \"{\\n\" hpp = hpp + \"template<typename T>\\nT int_pow(T val, int exp)", "q -= 27.0 * (mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8]", "for j in range(0, dim): if i <= j: M[i, j] = Symbol('m['", "p += 3.0 * t q = 18.0 * (mat[0] * mat[4] *", "c99 = c99.replace(\" M_PI\", \" T(M_PI)\") signature = \"template<typename T>\\nvoid eigs_\" + str(dim)", "1])**2 + 4 * mat[0, 1]**2 tmp1 = Piecewise( (a / 2, delta", "((b + p * cos(delta / 3.0)) / 3.0, True) ) tmp2 =", "dim) for i in range(0, dim): for j in range(0, dim): if i", "q = 18.0 * (mat[0] * mat[4] * mat[8] + 3.0 * mat[1]", "val; for(int i = 1; i < exp; ++i) res = res*val; return", "res*val; return res; }\\n\\n\" lambdaa = Symbol('lambda', real=True) for dim in dims: print(\"processing", "/ 3.0, True) ) tmp3 = Piecewise( (b / 3.0, p < 1e-10),", "\"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp + \"namespace polyfem", "help=\"path to the output folder\") return parser.parse_args() if __name__ == \"__main__\": args =", "lambdas = eigs_2d(M) else: lambdas = eigs_3d(M) # lambdas = simplify(lambdas) c99 =", "for dim in dims: print(\"processing \" + str(dim)) M = zeros(dim, dim) for", "cpp = cpp + \"\\n}}\\n\" hpp = hpp + \"\\n}}\\n\" path = os.path.abspath(args.output)", "def parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path to the output", "+ str(dim) + \"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, \" signature", "tmp2 = Piecewise( (b / 3.0, p < 1e-10), ((b + p *", "+ str(dim)) M = zeros(dim, dim) for i in range(0, dim): for j", "{\\nres.resize(\" + str(dim) + \");\\n\" + c99 + \"\\n}\\n\\n\" cpp = cpp +", "\"{\\n\" hpp = hpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp", "= \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp = cpp + \"namespace", "(mat[0, 0] - mat[1, 1])**2 + 4 * mat[0, 1]**2 tmp1 = Piecewise(", "/ 3.0, p < 1e-10), ((b + p * cos(delta / 3.0)) /", "int_pow(T val, int exp) { T res = exp <=0 ? T(0.): val;", "real=True) for dim in dims: print(\"processing \" + str(dim)) M = zeros(dim, dim)", "T(M_PI)\") signature = \"template<typename T>\\nvoid eigs_\" + str(dim) + \"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic,", "3.0, p < 1e-10), ((b + p * cos(delta / 3.0)) / 3.0,", "hpp = hpp + \"template<typename T>\\nT int_pow(T val, int exp) { T res", "&res)\" hpp = hpp + signature + \" {\\nres.resize(\" + str(dim) + \");\\n\"", "(mat[0] + mat[4]) * (mat[0] + mat[8]) * (mat[4] + mat[8]) q -=", "def sqr(a): return a * a def trunc_acos(x): tmp = Piecewise((0.0, x >=", "+ ']', real=True) else: M[i, j] = Symbol('m[' + str(j) + ',' +", "+= 9.0 * b * t q -= 3.0 * (mat[0] + mat[4])", "True)) return tmp.subs(x, x) def eigs_2d(mat): a = mat[0, 0] + mat[1, 1]", "* sqr(mat[4]) + mat[8] * sqr(mat[8])) q += 9.0 * b * t", "\"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp = cpp + \"namespace polyfem", "argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path to the output folder\") return parser.parse_args() if", "((a - sqrt(delta)) / 2.0, True) ) tmp2 = Piecewise( (a / 2,", "c99) c99 = c99.replace(\"0.0\", \"T(0)\") c99 = c99.replace(\" M_PI\", \" T(M_PI)\") signature =", "3.0)) / 3.0, True) ) tmp3 = Piecewise( (b / 3.0, p <", "= Symbol('lambda', real=True) for dim in dims: print(\"processing \" + str(dim)) M =", "sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5]) p = 0.5 * (sqr(mat[0] - mat[4]) +", "3.0)) / 3.0, True) ) tmp2 = Piecewise( (b / 3.0, p <", "< 1e-10), ((a + sqrt(delta)) / 2.0, True) ) return tmp1.subs(delta, delta), tmp2.subs(delta,", "args = parse_args() dims = [2, 3] cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp =", "= Piecewise((0.0, x >= 1.0), (pi, x <= -1.0), (acos(x), True)) return tmp.subs(x,", "Symbol('lambda', real=True) for dim in dims: print(\"processing \" + str(dim)) M = zeros(dim,", "real=True) else: M[i, j] = Symbol('m[' + str(j) + ',' + str(i) +", "cpp = cpp + \"\\n\" hpp = hpp + \"\\n\" cpp = cpp", "T res = exp <=0 ? T(0.): val; for(int i = 1; i", "* (mat[0] + mat[4]) * (mat[0] + mat[8]) * (mat[4] + mat[8]) q", "str(j) + ']', real=True) else: M[i, j] = Symbol('m[' + str(j) + ','", "+ mat[8] t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5]) p = 0.5 *", "2, delta < 1e-10), ((a + sqrt(delta)) / 2.0, True) ) return tmp1.subs(delta,", "dims: print(\"processing \" + str(dim)) M = zeros(dim, dim) for i in range(0,", "1]**2 tmp1 = Piecewise( (a / 2, delta < 1e-10), ((a - sqrt(delta))", "delta = (mat[0, 0] - mat[1, 1])**2 + 4 * mat[0, 1]**2 tmp1", "(a / 2, delta < 1e-10), ((a - sqrt(delta)) / 2.0, True) )", "{\\nnamespace autogen \" + \"{\\n\" hpp = hpp + \"template<typename T>\\nT int_pow(T val,", "+ 2.0 * pi) / 3.0)) / 3.0, True) ) tmp3 = Piecewise(", "hpp + \"\\n\" cpp = cpp + \"\\n}}\\n\" hpp = hpp + \"\\n}}\\n\"", "2: lambdas = eigs_2d(M) else: lambdas = eigs_3d(M) # lambdas = simplify(lambdas) c99", "if dim == 2: lambdas = eigs_2d(M) else: lambdas = eigs_3d(M) # lambdas", "2.0 * pi) / 3.0)) / 3.0, True) ) return tmp1.subs(p, p), tmp2.subs(p,", "((b + p * cos((delta - 2.0 * pi) / 3.0)) / 3.0,", "mat[1, 1] delta = (mat[0, 0] - mat[1, 1])**2 + 4 * mat[0,", "c99) c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99 = c99.replace(\"0.0\", \"T(0)\") c99 = c99.replace(\"", "mat[8])) p += 3.0 * t q = 18.0 * (mat[0] * mat[4]", "formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path to the output folder\") return parser.parse_args() if __name__ ==", "p < 1e-10), ((b + p * cos((delta + 2.0 * pi) /", "+ sqr(mat[5]) p = 0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8])", "parser.add_argument(\"output\", type=str, help=\"path to the output folder\") return parser.parse_args() if __name__ == \"__main__\":", "/ 2.0, True) ) return tmp1.subs(delta, delta), tmp2.subs(delta, delta) def eigs_3d(mat): b =", "2.0, True) ) return tmp1.subs(delta, delta), tmp2.subs(delta, delta) def eigs_3d(mat): b = mat[0]", "- mat[8]) + sqr(mat[4] - mat[8])) p += 3.0 * t q =", "sqr(mat[4] - mat[8])) p += 3.0 * t q = 18.0 * (mat[0]", "* pi) / 3.0)) / 3.0, True) ) tmp3 = Piecewise( (b /", "[2, 3] cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp =", "+ mat[1, 1] delta = (mat[0, 0] - mat[1, 1])**2 + 4 *", "tmp = Piecewise((0.0, x >= 1.0), (pi, x <= -1.0), (acos(x), True)) return", "= hpp + \"template<typename T>\\nT int_pow(T val, int exp) { T res =", "Symbol('m[' + str(j) + ',' + str(i) + ']', real=True) if dim ==", "== 2: lambdas = eigs_2d(M) else: lambdas = eigs_3d(M) # lambdas = simplify(lambdas)", "val, int exp) { T res = exp <=0 ? T(0.): val; for(int", "= \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp = cpp + \"namespace polyfem {\\nnamespace autogen \"", "pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99 =", "* (mat[0] + mat[8]) * (mat[4] + mat[8]) q -= 27.0 * (mat[0]", "0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8]))", "* sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8])) q += 9.0", "\" + str(dim)) M = zeros(dim, dim) for i in range(0, dim): for", "+ c99 + \"\\n}\\n\\n\" cpp = cpp + \"\\n\" hpp = hpp +", "* t q -= 3.0 * (mat[0] + mat[4]) * (mat[0] + mat[8])", "delta), tmp2.subs(delta, delta) def eigs_3d(mat): b = mat[0] + mat[4] + mat[8] t", "* cos(delta / 3.0)) / 3.0, True) ) tmp2 = Piecewise( (b /", "= exp <=0 ? T(0.): val; for(int i = 1; i < exp;", "= \"template<typename T>\\nvoid eigs_\" + str(dim) + \"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3,", "= argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path to the output folder\") return parser.parse_args()", "res; }\\n\\n\" lambdaa = Symbol('lambda', real=True) for dim in dims: print(\"processing \" +", "Eigen::Dynamic, 1, 0, 3, 1> &res)\" hpp = hpp + signature + \"", "sqr(mat[2]) + mat[8] * sqr(mat[1])) delta = trunc_acos(0.5 * q / sqrt(p *", "\"template<typename T>\\nT int_pow(T val, int exp) { T res = exp <=0 ?", "tmp2.subs(p, p), tmp3.subs(p, p) def parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str,", "0, 3, 3> &m, \" signature += \"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1>", "+ p * cos((delta - 2.0 * pi) / 3.0)) / 3.0, True)", "once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp = cpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\"", "3.0 * mat[1] * mat[2] * mat[5]) q += 2.0 * (mat[0] *", "Piecewise( (b / 3.0, p < 1e-10), ((b + p * cos((delta +", "Eigen::Dynamic, 0, 3, 3> &m, \" signature += \"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3,", "* cos((delta + 2.0 * pi) / 3.0)) / 3.0, True) ) tmp3", "polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp + \"namespace polyfem {\\nnamespace", "= re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99 = c99.replace(\"0.0\", \"T(0)\")", "if i <= j: M[i, j] = Symbol('m[' + str(i) + ',' +", "* sqr(mat[8])) q += 9.0 * b * t q -= 3.0 *", "= Piecewise( (b / 3.0, p < 1e-10), ((b + p * cos((delta", "M[i, j] = Symbol('m[' + str(j) + ',' + str(i) + ']', real=True)", "+ sqr(mat[4] - mat[8])) p += 3.0 * t q = 18.0 *", "hpp = hpp + \"\\n\" cpp = cpp + \"\\n}}\\n\" hpp = hpp", "(acos(x), True)) return tmp.subs(x, x) def eigs_2d(mat): a = mat[0, 0] + mat[1,", "+ mat[4]) * (mat[0] + mat[8]) * (mat[4] + mat[8]) q -= 27.0", "+ mat[8] * sqr(mat[8])) q += 9.0 * b * t q -=", "str(i) + ']', real=True) if dim == 2: lambdas = eigs_2d(M) else: lambdas", "a def trunc_acos(x): tmp = Piecewise((0.0, x >= 1.0), (pi, x <= -1.0),", "* mat[4] * mat[8] + 3.0 * mat[1] * mat[2] * mat[5]) q", "= Piecewise( (b / 3.0, p < 1e-10), ((b + p * cos(delta", "range(0, dim): for j in range(0, dim): if i <= j: M[i, j]", "folder\") return parser.parse_args() if __name__ == \"__main__\": args = parse_args() dims = [2,", "\" signature += \"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)\" hpp = hpp", "cpp + \"\\n}}\\n\" hpp = hpp + \"\\n}}\\n\" path = os.path.abspath(args.output) print(\"saving...\") with", "* mat[1] * mat[2] * mat[5]) q += 2.0 * (mat[0] * sqr(mat[0])", "* cos((delta - 2.0 * pi) / 3.0)) / 3.0, True) ) return", "= zeros(dim, dim) for i in range(0, dim): for j in range(0, dim):", "+ \"{\\n\" hpp = hpp + \"template<typename T>\\nT int_pow(T val, int exp) {", "= Symbol('m[' + str(j) + ',' + str(i) + ']', real=True) if dim", "1e-10), ((b + p * cos((delta - 2.0 * pi) / 3.0)) /", "\" + \"{\\n\" hpp = hpp + \"namespace polyfem {\\nnamespace autogen \" +", "hpp + \"\\n}}\\n\" path = os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as file:", "def eigs_3d(mat): b = mat[0] + mat[4] + mat[8] t = sqr(mat[1]) +", "((a + sqrt(delta)) / 2.0, True) ) return tmp1.subs(delta, delta), tmp2.subs(delta, delta) def", "* q / sqrt(p * sqr(p))) p = 2.0 * sqrt(p) tmp1 =", "c99 = c99.replace(\"0.0\", \"T(0)\") c99 = c99.replace(\" M_PI\", \" T(M_PI)\") signature = \"template<typename", "hpp = \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp = cpp + \"namespace polyfem {\\nnamespace autogen", "< 1e-10), ((b + p * cos((delta + 2.0 * pi) / 3.0))", "(mat[0] + mat[8]) * (mat[4] + mat[8]) q -= 27.0 * (mat[0] *", "print(\"processing \" + str(dim)) M = zeros(dim, dim) for i in range(0, dim):", "<Eigen/Dense>\\n\\n\" cpp = cpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp", "sqr(mat[8])) q += 9.0 * b * t q -= 3.0 * (mat[0]", "in dims: print(\"processing \" + str(dim)) M = zeros(dim, dim) for i in", "\"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp = cpp + \"namespace polyfem {\\nnamespace autogen \" +", "0, 3, 1> &res)\" hpp = hpp + signature + \" {\\nres.resize(\" +", "sqr(mat[4]) + mat[8] * sqr(mat[8])) q += 9.0 * b * t q", "t q -= 3.0 * (mat[0] + mat[4]) * (mat[0] + mat[8]) *", "* mat[5]) q += 2.0 * (mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4])", "+ \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp + \"namespace", "+ str(i) + ',' + str(j) + ']', real=True) else: M[i, j] =", "parse_args() dims = [2, 3] cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma once\\n\\n#include", "dims = [2, 3] cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\"", "+= 3.0 * t q = 18.0 * (mat[0] * mat[4] * mat[8]", "parser.parse_args() if __name__ == \"__main__\": args = parse_args() dims = [2, 3] cpp", "mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1])) delta = trunc_acos(0.5 * q /", "cpp + \"\\n\" hpp = hpp + \"\\n\" cpp = cpp + \"\\n}}\\n\"", "hpp = hpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp =", "str(dim) + \"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, \" signature +=", "+ sqr(mat[2]) + sqr(mat[5]) p = 0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0]", "(a / 2, delta < 1e-10), ((a + sqrt(delta)) / 2.0, True) )", "/ 3.0, p < 1e-10), ((b + p * cos((delta - 2.0 *", "eigs_\" + str(dim) + \"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, \"", "str(i) + ',' + str(j) + ']', real=True) else: M[i, j] = Symbol('m['", "= sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5]) p = 0.5 * (sqr(mat[0] - mat[4])", "Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, \" signature += \"Eigen::Matrix<T, Eigen::Dynamic, 1, 0,", "* b * t q -= 3.0 * (mat[0] + mat[4]) * (mat[0]", "+ \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp + \"template<typename", "sqr(mat[2]) + sqr(mat[5]) p = 0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0] -", "- sqrt(delta)) / 2.0, True) ) tmp2 = Piecewise( (a / 2, delta", "c99 = pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99)", "pretty_print def sqr(a): return a * a def trunc_acos(x): tmp = Piecewise((0.0, x", "* (mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1])) delta", "= hpp + \"\\n}}\\n\" path = os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as", "import os import re import argparse # local import pretty_print def sqr(a): return", "parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path to the output folder\") return", "+ signature + \" {\\nres.resize(\" + str(dim) + \");\\n\" + c99 + \"\\n}\\n\\n\"", "\" {\\nres.resize(\" + str(dim) + \");\\n\" + c99 + \"\\n}\\n\\n\" cpp = cpp", "\"\\n\" hpp = hpp + \"\\n\" cpp = cpp + \"\\n}}\\n\" hpp =", "sqrt(p * sqr(p))) p = 2.0 * sqrt(p) tmp1 = Piecewise( (b /", "* (mat[4] + mat[8]) q -= 27.0 * (mat[0] * sqr(mat[5]) + mat[4]", "= eigs_3d(M) # lambdas = simplify(lambdas) c99 = pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)',", "3.0, True) ) return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p) def parse_args(): parser", "= pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99", "-= 27.0 * (mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8] *", "= trunc_acos(0.5 * q / sqrt(p * sqr(p))) p = 2.0 * sqrt(p)", "(b / 3.0, p < 1e-10), ((b + p * cos((delta + 2.0", "{ T res = exp <=0 ? T(0.): val; for(int i = 1;", "1; i < exp; ++i) res = res*val; return res; }\\n\\n\" lambdaa =", "= (mat[0, 0] - mat[1, 1])**2 + 4 * mat[0, 1]**2 tmp1 =", "+ p * cos((delta + 2.0 * pi) / 3.0)) / 3.0, True)", "q += 9.0 * b * t q -= 3.0 * (mat[0] +", "dim): if i <= j: M[i, j] = Symbol('m[' + str(i) + ','", "p), tmp3.subs(p, p) def parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path", "(b / 3.0, p < 1e-10), ((b + p * cos(delta / 3.0))", "def eigs_2d(mat): a = mat[0, 0] + mat[1, 1] delta = (mat[0, 0]", "-1.0), (acos(x), True)) return tmp.subs(x, x) def eigs_2d(mat): a = mat[0, 0] +", "p = 0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4]", "+ \");\\n\" + c99 + \"\\n}\\n\\n\" cpp = cpp + \"\\n\" hpp =", "sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8])) p += 3.0 * t q", "the output folder\") return parser.parse_args() if __name__ == \"__main__\": args = parse_args() dims", "* import os import re import argparse # local import pretty_print def sqr(a):", "cos(delta / 3.0)) / 3.0, True) ) tmp2 = Piecewise( (b / 3.0,", "= cpp + \"\\n}}\\n\" hpp = hpp + \"\\n}}\\n\" path = os.path.abspath(args.output) print(\"saving...\")", "import re import argparse # local import pretty_print def sqr(a): return a *", "\"\\n}}\\n\" path = os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as file: file.write(cpp) with", "mat[1] * mat[2] * mat[5]) q += 2.0 * (mat[0] * sqr(mat[0]) +", "3] cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp = cpp", "res = exp <=0 ? T(0.): val; for(int i = 1; i <", "+ str(j) + ',' + str(i) + ']', real=True) if dim == 2:", "hpp + signature + \" {\\nres.resize(\" + str(dim) + \");\\n\" + c99 +", "r'res(\\1)', c99) c99 = c99.replace(\"0.0\", \"T(0)\") c99 = c99.replace(\" M_PI\", \" T(M_PI)\") signature", "c99 + \"\\n}\\n\\n\" cpp = cpp + \"\\n\" hpp = hpp + \"\\n\"", "\"template<typename T>\\nvoid eigs_\" + str(dim) + \"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3>", "i = 1; i < exp; ++i) res = res*val; return res; }\\n\\n\"", "\"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp + \"template<typename T>\\nT", "+ \" {\\nres.resize(\" + str(dim) + \");\\n\" + c99 + \"\\n}\\n\\n\" cpp =", ") return tmp1.subs(delta, delta), tmp2.subs(delta, delta) def eigs_3d(mat): b = mat[0] + mat[4]", "* a def trunc_acos(x): tmp = Piecewise((0.0, x >= 1.0), (pi, x <=", "/ 3.0, True) ) return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p) def parse_args():", "3, 3> &m, \" signature += \"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)\"", "\"\\n}\\n\\n\" cpp = cpp + \"\\n\" hpp = hpp + \"\\n\" cpp =", "Piecewise( (a / 2, delta < 1e-10), ((a - sqrt(delta)) / 2.0, True)", "++i) res = res*val; return res; }\\n\\n\" lambdaa = Symbol('lambda', real=True) for dim", "True) ) tmp2 = Piecewise( (b / 3.0, p < 1e-10), ((b +", "1e-10), ((a - sqrt(delta)) / 2.0, True) ) tmp2 = Piecewise( (a /", "+ \"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, \" signature += \"Eigen::Matrix<T,", "+ ',' + str(j) + ']', real=True) else: M[i, j] = Symbol('m[' +", "open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as file: file.write(cpp) with open(os.path.join(path, \"auto_eigs.hpp\"), \"w\") as file: file.write(hpp)", "sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1])) delta = trunc_acos(0.5 *", "j: M[i, j] = Symbol('m[' + str(i) + ',' + str(j) + ']',", "q / sqrt(p * sqr(p))) p = 2.0 * sqrt(p) tmp1 = Piecewise(", "mat[0, 0] + mat[1, 1] delta = (mat[0, 0] - mat[1, 1])**2 +", "exp; ++i) res = res*val; return res; }\\n\\n\" lambdaa = Symbol('lambda', real=True) for", "tmp1 = Piecewise( (a / 2, delta < 1e-10), ((a - sqrt(delta)) /", "pi) / 3.0)) / 3.0, True) ) tmp3 = Piecewise( (b / 3.0,", "p), tmp2.subs(p, p), tmp3.subs(p, p) def parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\",", "import argparse # local import pretty_print def sqr(a): return a * a def", "* from sympy.matrices import * import os import re import argparse # local", "return a * a def trunc_acos(x): tmp = Piecewise((0.0, x >= 1.0), (pi,", "+ \"\\n}\\n\\n\" cpp = cpp + \"\\n\" hpp = hpp + \"\\n\" cpp", "/ 3.0)) / 3.0, True) ) tmp3 = Piecewise( (b / 3.0, p", "hpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp +", "True) ) tmp2 = Piecewise( (a / 2, delta < 1e-10), ((a +", "< exp; ++i) res = res*val; return res; }\\n\\n\" lambdaa = Symbol('lambda', real=True)", "(sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8])) p +=", "re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99 = c99.replace(\"0.0\", \"T(0)\") c99", "# local import pretty_print def sqr(a): return a * a def trunc_acos(x): tmp", "eigs_2d(mat): a = mat[0, 0] + mat[1, 1] delta = (mat[0, 0] -", "in range(0, dim): if i <= j: M[i, j] = Symbol('m[' + str(i)", "2.0, True) ) tmp2 = Piecewise( (a / 2, delta < 1e-10), ((a", "True) ) return tmp1.subs(delta, delta), tmp2.subs(delta, delta) def eigs_3d(mat): b = mat[0] +", "return tmp.subs(x, x) def eigs_2d(mat): a = mat[0, 0] + mat[1, 1] delta", "= simplify(lambdas) c99 = pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99 = re.sub(r\"result_(\\d{1})\",", "x <= -1.0), (acos(x), True)) return tmp.subs(x, x) def eigs_2d(mat): a = mat[0,", "* sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1])) delta = trunc_acos(0.5", "+ str(dim) + \");\\n\" + c99 + \"\\n}\\n\\n\" cpp = cpp + \"\\n\"", "mat[8]) q -= 27.0 * (mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2]) +", "+ \"{\\n\" hpp = hpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\"", "mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8])) p += 3.0 *", "- mat[8])) p += 3.0 * t q = 18.0 * (mat[0] *", "dim): for j in range(0, dim): if i <= j: M[i, j] =", "range(0, dim): if i <= j: M[i, j] = Symbol('m[' + str(i) +", "* t q = 18.0 * (mat[0] * mat[4] * mat[8] + 3.0", "1e-10), ((b + p * cos(delta / 3.0)) / 3.0, True) ) tmp2", "3.0, p < 1e-10), ((b + p * cos((delta - 2.0 * pi)", "re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99 = c99.replace(\"0.0\", \"T(0)\") c99 = c99.replace(\" M_PI\", \" T(M_PI)\")", "c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99) c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99 = c99.replace(\"0.0\",", "(pi, x <= -1.0), (acos(x), True)) return tmp.subs(x, x) def eigs_2d(mat): a =", "\");\\n\" + c99 + \"\\n}\\n\\n\" cpp = cpp + \"\\n\" hpp = hpp", "Piecewise( (a / 2, delta < 1e-10), ((a + sqrt(delta)) / 2.0, True)", "2.0 * sqrt(p) tmp1 = Piecewise( (b / 3.0, p < 1e-10), ((b", "from sympy.matrices import * import os import re import argparse # local import", "= cpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp", "hpp + \"template<typename T>\\nT int_pow(T val, int exp) { T res = exp", "sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8])) q += 9.0 *", "r'm(\\1,\\2)', c99) c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99 = c99.replace(\"0.0\", \"T(0)\") c99 =", "M[i, j] = Symbol('m[' + str(i) + ',' + str(j) + ']', real=True)", "+ 4 * mat[0, 1]**2 tmp1 = Piecewise( (a / 2, delta <", "lambdas = eigs_3d(M) # lambdas = simplify(lambdas) c99 = pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\",", "+ \"\\n\" cpp = cpp + \"\\n}}\\n\" hpp = hpp + \"\\n}}\\n\" path", "sqrt(delta)) / 2.0, True) ) return tmp1.subs(delta, delta), tmp2.subs(delta, delta) def eigs_3d(mat): b", "sqr(mat[5]) p = 0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8]) +", "- mat[1, 1])**2 + 4 * mat[0, 1]**2 tmp1 = Piecewise( (a /", "True) ) tmp3 = Piecewise( (b / 3.0, p < 1e-10), ((b +", "+ mat[8] * sqr(mat[1])) delta = trunc_acos(0.5 * q / sqrt(p * sqr(p)))", "i <= j: M[i, j] = Symbol('m[' + str(i) + ',' + str(j)", "a * a def trunc_acos(x): tmp = Piecewise((0.0, x >= 1.0), (pi, x", "mat[8] t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5]) p = 0.5 * (sqr(mat[0]", "T>\\nvoid eigs_\" + str(dim) + \"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m,", "Piecewise((0.0, x >= 1.0), (pi, x <= -1.0), (acos(x), True)) return tmp.subs(x, x)", "* sqr(mat[1])) delta = trunc_acos(0.5 * q / sqrt(p * sqr(p))) p =", "3.0)) / 3.0, True) ) return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p) def", "\"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, \" signature += \"Eigen::Matrix<T, Eigen::Dynamic,", "p * cos((delta - 2.0 * pi) / 3.0)) / 3.0, True) )", "sqrt(p) tmp1 = Piecewise( (b / 3.0, p < 1e-10), ((b + p", "return tmp1.subs(delta, delta), tmp2.subs(delta, delta) def eigs_3d(mat): b = mat[0] + mat[4] +", "eigs_3d(M) # lambdas = simplify(lambdas) c99 = pretty_print.C99_print(lambdas) c99 = re.sub(r\"m\\[(\\d{1}),(\\d{1})\\]\", r'm(\\1,\\2)', c99)", "< 1e-10), ((b + p * cos(delta / 3.0)) / 3.0, True) )", "\"auto_eigs.cpp\"), \"w\") as file: file.write(cpp) with open(os.path.join(path, \"auto_eigs.hpp\"), \"w\") as file: file.write(hpp) print(\"done!\")", "3, 1> &res)\" hpp = hpp + signature + \" {\\nres.resize(\" + str(dim)", "3.0 * t q = 18.0 * (mat[0] * mat[4] * mat[8] +", "p < 1e-10), ((b + p * cos((delta - 2.0 * pi) /", "print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as file: file.write(cpp) with open(os.path.join(path, \"auto_eigs.hpp\"), \"w\") as", "- 2.0 * pi) / 3.0)) / 3.0, True) ) return tmp1.subs(p, p),", "= os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as file: file.write(cpp) with open(os.path.join(path, \"auto_eigs.hpp\"),", "+ mat[4] + mat[8] t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5]) p =", "- mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8])) p += 3.0", "+ p * cos(delta / 3.0)) / 3.0, True) ) tmp2 = Piecewise(", "mat[2] * mat[5]) q += 2.0 * (mat[0] * sqr(mat[0]) + mat[4] *", "delta = trunc_acos(0.5 * q / sqrt(p * sqr(p))) p = 2.0 *", "str(j) + ',' + str(i) + ']', real=True) if dim == 2: lambdas", "hpp = hpp + signature + \" {\\nres.resize(\" + str(dim) + \");\\n\" +", "= hpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp", "tmp1.subs(delta, delta), tmp2.subs(delta, delta) def eigs_3d(mat): b = mat[0] + mat[4] + mat[8]", "+ ']', real=True) if dim == 2: lambdas = eigs_2d(M) else: lambdas =", "= [2, 3] cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\" hpp = \"#pragma once\\n\\n#include <Eigen/Dense>\\n\\n\" cpp", "T(0.): val; for(int i = 1; i < exp; ++i) res = res*val;", "+ str(i) + ']', real=True) if dim == 2: lambdas = eigs_2d(M) else:", "mat[0, 1]**2 tmp1 = Piecewise( (a / 2, delta < 1e-10), ((a -", "j] = Symbol('m[' + str(i) + ',' + str(j) + ']', real=True) else:", "b * t q -= 3.0 * (mat[0] + mat[4]) * (mat[0] +", "* pi) / 3.0)) / 3.0, True) ) return tmp1.subs(p, p), tmp2.subs(p, p),", "t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5]) p = 0.5 * (sqr(mat[0] -", "int exp) { T res = exp <=0 ? T(0.): val; for(int i", "= 1; i < exp; ++i) res = res*val; return res; }\\n\\n\" lambdaa", "= mat[0, 0] + mat[1, 1] delta = (mat[0, 0] - mat[1, 1])**2", ">= 1.0), (pi, x <= -1.0), (acos(x), True)) return tmp.subs(x, x) def eigs_2d(mat):", "sqr(a): return a * a def trunc_acos(x): tmp = Piecewise((0.0, x >= 1.0),", "/ 2, delta < 1e-10), ((a - sqrt(delta)) / 2.0, True) ) tmp2", "/ 3.0, p < 1e-10), ((b + p * cos((delta + 2.0 *", "/ 3.0)) / 3.0, True) ) tmp2 = Piecewise( (b / 3.0, p", "3> &m, \" signature += \"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)\" hpp", "/ 3.0)) / 3.0, True) ) return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p)", "mat[8] + 3.0 * mat[1] * mat[2] * mat[5]) q += 2.0 *", "* sqr(p))) p = 2.0 * sqrt(p) tmp1 = Piecewise( (b / 3.0,", "',' + str(i) + ']', real=True) if dim == 2: lambdas = eigs_2d(M)", "= Symbol('m[' + str(i) + ',' + str(j) + ']', real=True) else: M[i,", "* mat[2] * mat[5]) q += 2.0 * (mat[0] * sqr(mat[0]) + mat[4]", "from sympy import * from sympy.matrices import * import os import re import", "{\\nnamespace autogen \" + \"{\\n\" hpp = hpp + \"namespace polyfem {\\nnamespace autogen", "exp) { T res = exp <=0 ? T(0.): val; for(int i =", "re import argparse # local import pretty_print def sqr(a): return a * a", "* sqr(mat[2]) + mat[8] * sqr(mat[1])) delta = trunc_acos(0.5 * q / sqrt(p", "+ 3.0 * mat[1] * mat[2] * mat[5]) q += 2.0 * (mat[0]", "p * cos(delta / 3.0)) / 3.0, True) ) tmp2 = Piecewise( (b", "-= 3.0 * (mat[0] + mat[4]) * (mat[0] + mat[8]) * (mat[4] +", "+ mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8])) q += 9.0 * b", "? T(0.): val; for(int i = 1; i < exp; ++i) res =", ") return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p) def parse_args(): parser = argparse.ArgumentParser(", "== \"__main__\": args = parse_args() dims = [2, 3] cpp = \"#include <polyfem/auto_eigs.hpp>\\n\\n\\n\"", "p < 1e-10), ((b + p * cos(delta / 3.0)) / 3.0, True)", "output folder\") return parser.parse_args() if __name__ == \"__main__\": args = parse_args() dims =", "= 18.0 * (mat[0] * mat[4] * mat[8] + 3.0 * mat[1] *", "cpp + \"namespace polyfem {\\nnamespace autogen \" + \"{\\n\" hpp = hpp +", "+ str(j) + ']', real=True) else: M[i, j] = Symbol('m[' + str(j) +", "import pretty_print def sqr(a): return a * a def trunc_acos(x): tmp = Piecewise((0.0,", "sympy.matrices import * import os import re import argparse # local import pretty_print", "else: M[i, j] = Symbol('m[' + str(j) + ',' + str(i) + ']',", "t q = 18.0 * (mat[0] * mat[4] * mat[8] + 3.0 *", "cos((delta - 2.0 * pi) / 3.0)) / 3.0, True) ) return tmp1.subs(p,", "for(int i = 1; i < exp; ++i) res = res*val; return res;", "= c99.replace(\"0.0\", \"T(0)\") c99 = c99.replace(\" M_PI\", \" T(M_PI)\") signature = \"template<typename T>\\nvoid", "3.0, p < 1e-10), ((b + p * cos((delta + 2.0 * pi)", "x) def eigs_2d(mat): a = mat[0, 0] + mat[1, 1] delta = (mat[0,", "Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, \" signature += \"Eigen::Matrix<T, Eigen::Dynamic, 1,", "<=0 ? T(0.): val; for(int i = 1; i < exp; ++i) res", ") tmp2 = Piecewise( (a / 2, delta < 1e-10), ((a + sqrt(delta))", "import * from sympy.matrices import * import os import re import argparse #", "dim in dims: print(\"processing \" + str(dim)) M = zeros(dim, dim) for i", "mat[4] + mat[8] t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5]) p = 0.5", "+ \"\\n\" hpp = hpp + \"\\n\" cpp = cpp + \"\\n}}\\n\" hpp", "(mat[4] + mat[8]) q -= 27.0 * (mat[0] * sqr(mat[5]) + mat[4] *", "c99.replace(\"0.0\", \"T(0)\") c99 = c99.replace(\" M_PI\", \" T(M_PI)\") signature = \"template<typename T>\\nvoid eigs_\"", "Piecewise( (b / 3.0, p < 1e-10), ((b + p * cos((delta -", "(b / 3.0, p < 1e-10), ((b + p * cos((delta - 2.0", "1.0), (pi, x <= -1.0), (acos(x), True)) return tmp.subs(x, x) def eigs_2d(mat): a", "M = zeros(dim, dim) for i in range(0, dim): for j in range(0,", "mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8])) q += 9.0 * b *", "(mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1])) delta =", "tmp2 = Piecewise( (a / 2, delta < 1e-10), ((a + sqrt(delta)) /", "/ 2, delta < 1e-10), ((a + sqrt(delta)) / 2.0, True) ) return", "delta < 1e-10), ((a - sqrt(delta)) / 2.0, True) ) tmp2 = Piecewise(", "tmp1 = Piecewise( (b / 3.0, p < 1e-10), ((b + p *", "<= j: M[i, j] = Symbol('m[' + str(i) + ',' + str(j) +", "']', real=True) if dim == 2: lambdas = eigs_2d(M) else: lambdas = eigs_3d(M)", "M_PI\", \" T(M_PI)\") signature = \"template<typename T>\\nvoid eigs_\" + str(dim) + \"d(const Eigen::Matrix<T,", "in range(0, dim): for j in range(0, dim): if i <= j: M[i,", "3.0 * (mat[0] + mat[4]) * (mat[0] + mat[8]) * (mat[4] + mat[8])", "/ 3.0, True) ) tmp2 = Piecewise( (b / 3.0, p < 1e-10),", "+ \"\\n}}\\n\" path = os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as file: file.write(cpp)", "mat[8] * sqr(mat[1])) delta = trunc_acos(0.5 * q / sqrt(p * sqr(p))) p", "else: lambdas = eigs_3d(M) # lambdas = simplify(lambdas) c99 = pretty_print.C99_print(lambdas) c99 =", "+ mat[8]) q -= 27.0 * (mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2])", "signature + \" {\\nres.resize(\" + str(dim) + \");\\n\" + c99 + \"\\n}\\n\\n\" cpp", "2, delta < 1e-10), ((a - sqrt(delta)) / 2.0, True) ) tmp2 =", "= c99.replace(\" M_PI\", \" T(M_PI)\") signature = \"template<typename T>\\nvoid eigs_\" + str(dim) +", "tmp2.subs(delta, delta) def eigs_3d(mat): b = mat[0] + mat[4] + mat[8] t =", "< 1e-10), ((a - sqrt(delta)) / 2.0, True) ) tmp2 = Piecewise( (a", "delta < 1e-10), ((a + sqrt(delta)) / 2.0, True) ) return tmp1.subs(delta, delta),", "tmp3.subs(p, p) def parse_args(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"output\", type=str, help=\"path to", "0] + mat[1, 1] delta = (mat[0, 0] - mat[1, 1])**2 + 4", "str(dim)) M = zeros(dim, dim) for i in range(0, dim): for j in", "1] delta = (mat[0, 0] - mat[1, 1])**2 + 4 * mat[0, 1]**2", "((b + p * cos((delta + 2.0 * pi) / 3.0)) / 3.0,", "q += 2.0 * (mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8]", "q -= 3.0 * (mat[0] + mat[4]) * (mat[0] + mat[8]) * (mat[4]", "Piecewise( (b / 3.0, p < 1e-10), ((b + p * cos(delta /", "tmp3 = Piecewise( (b / 3.0, p < 1e-10), ((b + p *", "str(dim) + \");\\n\" + c99 + \"\\n}\\n\\n\" cpp = cpp + \"\\n\" hpp", "path = os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\") as file: file.write(cpp) with open(os.path.join(path,", "if __name__ == \"__main__\": args = parse_args() dims = [2, 3] cpp =", "c99 = re.sub(r\"result_(\\d{1})\", r'res(\\1)', c99) c99 = c99.replace(\"0.0\", \"T(0)\") c99 = c99.replace(\" M_PI\",", "hpp = hpp + \"\\n}}\\n\" path = os.path.abspath(args.output) print(\"saving...\") with open(os.path.join(path, \"auto_eigs.cpp\"), \"w\")", "tmp.subs(x, x) def eigs_2d(mat): a = mat[0, 0] + mat[1, 1] delta =" ]
[ "meridian.acupoints import xuanlu22 from meridian.acupoints import xuanli22 from meridian.acupoints import qubin14 from meridian.acupoints", "from meridian.acupoints import daimai44 from meridian.acupoints import wushu31 from meridian.acupoints import weidao24 from", "touqiaoyin241 from meridian.acupoints import wangu23 from meridian.acupoints import benshen32 from meridian.acupoints import yangbai22", "import xuanlu22 from meridian.acupoints import xuanli22 from meridian.acupoints import qubin14 from meridian.acupoints import", "from meridian.acupoints import juliao12 from meridian.acupoints import huantiao24 from meridian.acupoints import fengshi14 from", "from meridian.acupoints import heyan24 from meridian.acupoints import xuanlu22 from meridian.acupoints import xuanli22 from", "import wushu31 from meridian.acupoints import weidao24 from meridian.acupoints import juliao12 from meridian.acupoints import", "waiqiu41 from meridian.acupoints import guangming12 from meridian.acupoints import yangfu23 from meridian.acupoints import xuanzhong21", "from meridian.acupoints import qiuxu11 from meridian.acupoints import zulinqi224 from meridian.acupoints import diwuhui434 from", "import wangu23 from meridian.acupoints import benshen32 from meridian.acupoints import yangbai22 from meridian.acupoints import", "from meridian.acupoints import benshen32 from meridian.acupoints import yangbai22 from meridian.acupoints import toulinqi221 from", "meridian.acupoints import toulinqi221 from meridian.acupoints import muchuang41 from meridian.acupoints import zhengying42 from meridian.acupoints", "from meridian.acupoints import xuanlu22 from meridian.acupoints import xuanli22 from meridian.acupoints import qubin14 from", "meridian.acupoints import daimai44 from meridian.acupoints import wushu31 from meridian.acupoints import weidao24 from meridian.acupoints", "meridian.acupoints import heyan24 from meridian.acupoints import xuanlu22 from meridian.acupoints import xuanli22 from meridian.acupoints", "from meridian.acupoints import zhongdu12 from meridian.acupoints import xiyangguan121 from meridian.acupoints import yanglingquan222 from", "import xiyangguan121 from meridian.acupoints import yanglingquan222 from meridian.acupoints import yangjiao21 from meridian.acupoints import", "yangfu23 from meridian.acupoints import xuanzhong21 from meridian.acupoints import qiuxu11 from meridian.acupoints import zulinqi224", "from meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经' ABBR=u'GB' NAME='gallbladder' FULLNAME='GallbladderChannelofFoot-Shaoyang' SEQ=8 if __name__ ==", "meridian.acupoints import wangu23 from meridian.acupoints import benshen32 from meridian.acupoints import yangbai22 from meridian.acupoints", "jianjing13 from meridian.acupoints import yuanye14 from meridian.acupoints import zhejin21 from meridian.acupoints import riyue44", "''' from meridian.acupoints import tongziliao232 from meridian.acupoints import tinghui14 from meridian.acupoints import shangguan41", "fubai22 from meridian.acupoints import touqiaoyin241 from meridian.acupoints import wangu23 from meridian.acupoints import benshen32", "meridian.acupoints import benshen32 from meridian.acupoints import yangbai22 from meridian.acupoints import toulinqi221 from meridian.acupoints", "from meridian.acupoints import xuanli22 from meridian.acupoints import qubin14 from meridian.acupoints import shuaigu43 from", "shangguan41 from meridian.acupoints import heyan24 from meridian.acupoints import xuanlu22 from meridian.acupoints import xuanli22", "import zhengying42 from meridian.acupoints import chengling22 from meridian.acupoints import naokong31 from meridian.acupoints import", "from meridian.acupoints import jingmen12 from meridian.acupoints import daimai44 from meridian.acupoints import wushu31 from", "meridian.acupoints import yangfu23 from meridian.acupoints import xuanzhong21 from meridian.acupoints import qiuxu11 from meridian.acupoints", "import juliao12 from meridian.acupoints import huantiao24 from meridian.acupoints import fengshi14 from meridian.acupoints import", "import diwuhui434 from meridian.acupoints import xiaxi21 from meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经' ABBR=u'GB'", "meridian.acupoints import yanglingquan222 from meridian.acupoints import yangjiao21 from meridian.acupoints import waiqiu41 from meridian.acupoints", "weidao24 from meridian.acupoints import juliao12 from meridian.acupoints import huantiao24 from meridian.acupoints import fengshi14", "from meridian.acupoints import fengchi12 from meridian.acupoints import jianjing13 from meridian.acupoints import yuanye14 from", "meridian.acupoints import yuanye14 from meridian.acupoints import zhejin21 from meridian.acupoints import riyue44 from meridian.acupoints", "meridian.acupoints import naokong31 from meridian.acupoints import fengchi12 from meridian.acupoints import jianjing13 from meridian.acupoints", "meridian.acupoints import jianjing13 from meridian.acupoints import yuanye14 from meridian.acupoints import zhejin21 from meridian.acupoints", "from meridian.acupoints import yuanye14 from meridian.acupoints import zhejin21 from meridian.acupoints import riyue44 from", "from meridian.acupoints import wangu23 from meridian.acupoints import benshen32 from meridian.acupoints import yangbai22 from", "import shuaigu43 from meridian.acupoints import tianchong11 from meridian.acupoints import fubai22 from meridian.acupoints import", "xuanzhong21 from meridian.acupoints import qiuxu11 from meridian.acupoints import zulinqi224 from meridian.acupoints import diwuhui434", "import yuanye14 from meridian.acupoints import zhejin21 from meridian.acupoints import riyue44 from meridian.acupoints import", "meridian.acupoints import yangjiao21 from meridian.acupoints import waiqiu41 from meridian.acupoints import guangming12 from meridian.acupoints", "huantiao24 from meridian.acupoints import fengshi14 from meridian.acupoints import zhongdu12 from meridian.acupoints import xiyangguan121", "import naokong31 from meridian.acupoints import fengchi12 from meridian.acupoints import jianjing13 from meridian.acupoints import", "from meridian.acupoints import wushu31 from meridian.acupoints import weidao24 from meridian.acupoints import juliao12 from", "from meridian.acupoints import xiaxi21 from meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经' ABBR=u'GB' NAME='gallbladder' FULLNAME='GallbladderChannelofFoot-Shaoyang'", "meridian.acupoints import yangbai22 from meridian.acupoints import toulinqi221 from meridian.acupoints import muchuang41 from meridian.acupoints", "import touqiaoyin241 from meridian.acupoints import wangu23 from meridian.acupoints import benshen32 from meridian.acupoints import", "import qiuxu11 from meridian.acupoints import zulinqi224 from meridian.acupoints import diwuhui434 from meridian.acupoints import", "from meridian.acupoints import fubai22 from meridian.acupoints import touqiaoyin241 from meridian.acupoints import wangu23 from", "toulinqi221 from meridian.acupoints import muchuang41 from meridian.acupoints import zhengying42 from meridian.acupoints import chengling22", "meridian.acupoints import juliao12 from meridian.acupoints import huantiao24 from meridian.acupoints import fengshi14 from meridian.acupoints", "from meridian.acupoints import guangming12 from meridian.acupoints import yangfu23 from meridian.acupoints import xuanzhong21 from", "from meridian.acupoints import yanglingquan222 from meridian.acupoints import yangjiao21 from meridian.acupoints import waiqiu41 from", "import xuanzhong21 from meridian.acupoints import qiuxu11 from meridian.acupoints import zulinqi224 from meridian.acupoints import", "diwuhui434 from meridian.acupoints import xiaxi21 from meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经' ABBR=u'GB' NAME='gallbladder'", "xuanlu22 from meridian.acupoints import xuanli22 from meridian.acupoints import qubin14 from meridian.acupoints import shuaigu43", "import tongziliao232 from meridian.acupoints import tinghui14 from meridian.acupoints import shangguan41 from meridian.acupoints import", "meridian.acupoints import tinghui14 from meridian.acupoints import shangguan41 from meridian.acupoints import heyan24 from meridian.acupoints", "fengshi14 from meridian.acupoints import zhongdu12 from meridian.acupoints import xiyangguan121 from meridian.acupoints import yanglingquan222", "meridian.acupoints import tongziliao232 from meridian.acupoints import tinghui14 from meridian.acupoints import shangguan41 from meridian.acupoints", "meridian.acupoints import xuanli22 from meridian.acupoints import qubin14 from meridian.acupoints import shuaigu43 from meridian.acupoints", "meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经' ABBR=u'GB' NAME='gallbladder' FULLNAME='GallbladderChannelofFoot-Shaoyang' SEQ=8 if __name__ == '__main__':", "meridian.acupoints import xiaxi21 from meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经' ABBR=u'GB' NAME='gallbladder' FULLNAME='GallbladderChannelofFoot-Shaoyang' SEQ=8", "from meridian.acupoints import diwuhui434 from meridian.acupoints import xiaxi21 from meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng'", "juliao12 from meridian.acupoints import huantiao24 from meridian.acupoints import fengshi14 from meridian.acupoints import zhongdu12", "import tinghui14 from meridian.acupoints import shangguan41 from meridian.acupoints import heyan24 from meridian.acupoints import", "jingmen12 from meridian.acupoints import daimai44 from meridian.acupoints import wushu31 from meridian.acupoints import weidao24", "import yangfu23 from meridian.acupoints import xuanzhong21 from meridian.acupoints import qiuxu11 from meridian.acupoints import", "meridian.acupoints import guangming12 from meridian.acupoints import yangfu23 from meridian.acupoints import xuanzhong21 from meridian.acupoints", "''' @author: sheng @license: ''' from meridian.acupoints import tongziliao232 from meridian.acupoints import tinghui14", "meridian.acupoints import chengling22 from meridian.acupoints import naokong31 from meridian.acupoints import fengchi12 from meridian.acupoints", "meridian.acupoints import qiuxu11 from meridian.acupoints import zulinqi224 from meridian.acupoints import diwuhui434 from meridian.acupoints", "sheng @license: ''' from meridian.acupoints import tongziliao232 from meridian.acupoints import tinghui14 from meridian.acupoints", "from meridian.acupoints import tongziliao232 from meridian.acupoints import tinghui14 from meridian.acupoints import shangguan41 from", "zulinqi224 from meridian.acupoints import diwuhui434 from meridian.acupoints import xiaxi21 from meridian.acupoints import zuqiaoyin241", "from meridian.acupoints import yangfu23 from meridian.acupoints import xuanzhong21 from meridian.acupoints import qiuxu11 from", "from meridian.acupoints import xiyangguan121 from meridian.acupoints import yanglingquan222 from meridian.acupoints import yangjiao21 from", "import xuanli22 from meridian.acupoints import qubin14 from meridian.acupoints import shuaigu43 from meridian.acupoints import", "from meridian.acupoints import muchuang41 from meridian.acupoints import zhengying42 from meridian.acupoints import chengling22 from", "zhejin21 from meridian.acupoints import riyue44 from meridian.acupoints import jingmen12 from meridian.acupoints import daimai44", "from meridian.acupoints import fengshi14 from meridian.acupoints import zhongdu12 from meridian.acupoints import xiyangguan121 from", "import zhongdu12 from meridian.acupoints import xiyangguan121 from meridian.acupoints import yanglingquan222 from meridian.acupoints import", "import heyan24 from meridian.acupoints import xuanlu22 from meridian.acupoints import xuanli22 from meridian.acupoints import", "meridian.acupoints import weidao24 from meridian.acupoints import juliao12 from meridian.acupoints import huantiao24 from meridian.acupoints", "qubin14 from meridian.acupoints import shuaigu43 from meridian.acupoints import tianchong11 from meridian.acupoints import fubai22", "meridian.acupoints import fengchi12 from meridian.acupoints import jianjing13 from meridian.acupoints import yuanye14 from meridian.acupoints", "qiuxu11 from meridian.acupoints import zulinqi224 from meridian.acupoints import diwuhui434 from meridian.acupoints import xiaxi21", "from meridian.acupoints import waiqiu41 from meridian.acupoints import guangming12 from meridian.acupoints import yangfu23 from", "from meridian.acupoints import tinghui14 from meridian.acupoints import shangguan41 from meridian.acupoints import heyan24 from", "heyan24 from meridian.acupoints import xuanlu22 from meridian.acupoints import xuanli22 from meridian.acupoints import qubin14", "from meridian.acupoints import xuanzhong21 from meridian.acupoints import qiuxu11 from meridian.acupoints import zulinqi224 from", "xuanli22 from meridian.acupoints import qubin14 from meridian.acupoints import shuaigu43 from meridian.acupoints import tianchong11", "import zhejin21 from meridian.acupoints import riyue44 from meridian.acupoints import jingmen12 from meridian.acupoints import", "wangu23 from meridian.acupoints import benshen32 from meridian.acupoints import yangbai22 from meridian.acupoints import toulinqi221", "import yangbai22 from meridian.acupoints import toulinqi221 from meridian.acupoints import muchuang41 from meridian.acupoints import", "import qubin14 from meridian.acupoints import shuaigu43 from meridian.acupoints import tianchong11 from meridian.acupoints import", "meridian.acupoints import waiqiu41 from meridian.acupoints import guangming12 from meridian.acupoints import yangfu23 from meridian.acupoints", "from meridian.acupoints import zulinqi224 from meridian.acupoints import diwuhui434 from meridian.acupoints import xiaxi21 from", "from meridian.acupoints import weidao24 from meridian.acupoints import juliao12 from meridian.acupoints import huantiao24 from", "from meridian.acupoints import shuaigu43 from meridian.acupoints import tianchong11 from meridian.acupoints import fubai22 from", "meridian.acupoints import zulinqi224 from meridian.acupoints import diwuhui434 from meridian.acupoints import xiaxi21 from meridian.acupoints", "chengling22 from meridian.acupoints import naokong31 from meridian.acupoints import fengchi12 from meridian.acupoints import jianjing13", "zhengying42 from meridian.acupoints import chengling22 from meridian.acupoints import naokong31 from meridian.acupoints import fengchi12", "from meridian.acupoints import riyue44 from meridian.acupoints import jingmen12 from meridian.acupoints import daimai44 from", "import guangming12 from meridian.acupoints import yangfu23 from meridian.acupoints import xuanzhong21 from meridian.acupoints import", "meridian.acupoints import huantiao24 from meridian.acupoints import fengshi14 from meridian.acupoints import zhongdu12 from meridian.acupoints", "meridian.acupoints import fubai22 from meridian.acupoints import touqiaoyin241 from meridian.acupoints import wangu23 from meridian.acupoints", "@author: sheng @license: ''' from meridian.acupoints import tongziliao232 from meridian.acupoints import tinghui14 from", "tongziliao232 from meridian.acupoints import tinghui14 from meridian.acupoints import shangguan41 from meridian.acupoints import heyan24", "@license: ''' from meridian.acupoints import tongziliao232 from meridian.acupoints import tinghui14 from meridian.acupoints import", "import fengshi14 from meridian.acupoints import zhongdu12 from meridian.acupoints import xiyangguan121 from meridian.acupoints import", "meridian.acupoints import muchuang41 from meridian.acupoints import zhengying42 from meridian.acupoints import chengling22 from meridian.acupoints", "from meridian.acupoints import yangjiao21 from meridian.acupoints import waiqiu41 from meridian.acupoints import guangming12 from", "import shangguan41 from meridian.acupoints import heyan24 from meridian.acupoints import xuanlu22 from meridian.acupoints import", "import toulinqi221 from meridian.acupoints import muchuang41 from meridian.acupoints import zhengying42 from meridian.acupoints import", "import waiqiu41 from meridian.acupoints import guangming12 from meridian.acupoints import yangfu23 from meridian.acupoints import", "import daimai44 from meridian.acupoints import wushu31 from meridian.acupoints import weidao24 from meridian.acupoints import", "from meridian.acupoints import zhengying42 from meridian.acupoints import chengling22 from meridian.acupoints import naokong31 from", "from meridian.acupoints import tianchong11 from meridian.acupoints import fubai22 from meridian.acupoints import touqiaoyin241 from", "xiyangguan121 from meridian.acupoints import yanglingquan222 from meridian.acupoints import yangjiao21 from meridian.acupoints import waiqiu41", "meridian.acupoints import fengshi14 from meridian.acupoints import zhongdu12 from meridian.acupoints import xiyangguan121 from meridian.acupoints", "import jingmen12 from meridian.acupoints import daimai44 from meridian.acupoints import wushu31 from meridian.acupoints import", "yangjiao21 from meridian.acupoints import waiqiu41 from meridian.acupoints import guangming12 from meridian.acupoints import yangfu23", "meridian.acupoints import riyue44 from meridian.acupoints import jingmen12 from meridian.acupoints import daimai44 from meridian.acupoints", "import riyue44 from meridian.acupoints import jingmen12 from meridian.acupoints import daimai44 from meridian.acupoints import", "meridian.acupoints import wushu31 from meridian.acupoints import weidao24 from meridian.acupoints import juliao12 from meridian.acupoints", "#!/usr/bin/python #coding=utf-8 ''' @author: sheng @license: ''' from meridian.acupoints import tongziliao232 from meridian.acupoints", "tinghui14 from meridian.acupoints import shangguan41 from meridian.acupoints import heyan24 from meridian.acupoints import xuanlu22", "benshen32 from meridian.acupoints import yangbai22 from meridian.acupoints import toulinqi221 from meridian.acupoints import muchuang41", "meridian.acupoints import qubin14 from meridian.acupoints import shuaigu43 from meridian.acupoints import tianchong11 from meridian.acupoints", "meridian.acupoints import shuaigu43 from meridian.acupoints import tianchong11 from meridian.acupoints import fubai22 from meridian.acupoints", "yangbai22 from meridian.acupoints import toulinqi221 from meridian.acupoints import muchuang41 from meridian.acupoints import zhengying42", "import fengchi12 from meridian.acupoints import jianjing13 from meridian.acupoints import yuanye14 from meridian.acupoints import", "import tianchong11 from meridian.acupoints import fubai22 from meridian.acupoints import touqiaoyin241 from meridian.acupoints import", "from meridian.acupoints import qubin14 from meridian.acupoints import shuaigu43 from meridian.acupoints import tianchong11 from", "zhongdu12 from meridian.acupoints import xiyangguan121 from meridian.acupoints import yanglingquan222 from meridian.acupoints import yangjiao21", "shuaigu43 from meridian.acupoints import tianchong11 from meridian.acupoints import fubai22 from meridian.acupoints import touqiaoyin241", "yanglingquan222 from meridian.acupoints import yangjiao21 from meridian.acupoints import waiqiu41 from meridian.acupoints import guangming12", "from meridian.acupoints import huantiao24 from meridian.acupoints import fengshi14 from meridian.acupoints import zhongdu12 from", "import xiaxi21 from meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经' ABBR=u'GB' NAME='gallbladder' FULLNAME='GallbladderChannelofFoot-Shaoyang' SEQ=8 if", "from meridian.acupoints import zhejin21 from meridian.acupoints import riyue44 from meridian.acupoints import jingmen12 from", "meridian.acupoints import xiyangguan121 from meridian.acupoints import yanglingquan222 from meridian.acupoints import yangjiao21 from meridian.acupoints", "from meridian.acupoints import shangguan41 from meridian.acupoints import heyan24 from meridian.acupoints import xuanlu22 from", "import weidao24 from meridian.acupoints import juliao12 from meridian.acupoints import huantiao24 from meridian.acupoints import", "import yangjiao21 from meridian.acupoints import waiqiu41 from meridian.acupoints import guangming12 from meridian.acupoints import", "daimai44 from meridian.acupoints import wushu31 from meridian.acupoints import weidao24 from meridian.acupoints import juliao12", "from meridian.acupoints import jianjing13 from meridian.acupoints import yuanye14 from meridian.acupoints import zhejin21 from", "meridian.acupoints import zhejin21 from meridian.acupoints import riyue44 from meridian.acupoints import jingmen12 from meridian.acupoints", "meridian.acupoints import touqiaoyin241 from meridian.acupoints import wangu23 from meridian.acupoints import benshen32 from meridian.acupoints", "meridian.acupoints import tianchong11 from meridian.acupoints import fubai22 from meridian.acupoints import touqiaoyin241 from meridian.acupoints", "from meridian.acupoints import toulinqi221 from meridian.acupoints import muchuang41 from meridian.acupoints import zhengying42 from", "from meridian.acupoints import touqiaoyin241 from meridian.acupoints import wangu23 from meridian.acupoints import benshen32 from", "naokong31 from meridian.acupoints import fengchi12 from meridian.acupoints import jianjing13 from meridian.acupoints import yuanye14", "guangming12 from meridian.acupoints import yangfu23 from meridian.acupoints import xuanzhong21 from meridian.acupoints import qiuxu11", "from meridian.acupoints import yangbai22 from meridian.acupoints import toulinqi221 from meridian.acupoints import muchuang41 from", "yuanye14 from meridian.acupoints import zhejin21 from meridian.acupoints import riyue44 from meridian.acupoints import jingmen12", "riyue44 from meridian.acupoints import jingmen12 from meridian.acupoints import daimai44 from meridian.acupoints import wushu31", "xiaxi21 from meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经' ABBR=u'GB' NAME='gallbladder' FULLNAME='GallbladderChannelofFoot-Shaoyang' SEQ=8 if __name__", "meridian.acupoints import jingmen12 from meridian.acupoints import daimai44 from meridian.acupoints import wushu31 from meridian.acupoints", "import benshen32 from meridian.acupoints import yangbai22 from meridian.acupoints import toulinqi221 from meridian.acupoints import", "from meridian.acupoints import chengling22 from meridian.acupoints import naokong31 from meridian.acupoints import fengchi12 from", "from meridian.acupoints import naokong31 from meridian.acupoints import fengchi12 from meridian.acupoints import jianjing13 from", "import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经' ABBR=u'GB' NAME='gallbladder' FULLNAME='GallbladderChannelofFoot-Shaoyang' SEQ=8 if __name__ == '__main__': pass", "meridian.acupoints import zhengying42 from meridian.acupoints import chengling22 from meridian.acupoints import naokong31 from meridian.acupoints", "import muchuang41 from meridian.acupoints import zhengying42 from meridian.acupoints import chengling22 from meridian.acupoints import", "meridian.acupoints import xuanzhong21 from meridian.acupoints import qiuxu11 from meridian.acupoints import zulinqi224 from meridian.acupoints", "import huantiao24 from meridian.acupoints import fengshi14 from meridian.acupoints import zhongdu12 from meridian.acupoints import", "tianchong11 from meridian.acupoints import fubai22 from meridian.acupoints import touqiaoyin241 from meridian.acupoints import wangu23", "import fubai22 from meridian.acupoints import touqiaoyin241 from meridian.acupoints import wangu23 from meridian.acupoints import", "fengchi12 from meridian.acupoints import jianjing13 from meridian.acupoints import yuanye14 from meridian.acupoints import zhejin21", "import chengling22 from meridian.acupoints import naokong31 from meridian.acupoints import fengchi12 from meridian.acupoints import", "meridian.acupoints import zhongdu12 from meridian.acupoints import xiyangguan121 from meridian.acupoints import yanglingquan222 from meridian.acupoints", "wushu31 from meridian.acupoints import weidao24 from meridian.acupoints import juliao12 from meridian.acupoints import huantiao24", "meridian.acupoints import shangguan41 from meridian.acupoints import heyan24 from meridian.acupoints import xuanlu22 from meridian.acupoints", "import zulinqi224 from meridian.acupoints import diwuhui434 from meridian.acupoints import xiaxi21 from meridian.acupoints import", "#coding=utf-8 ''' @author: sheng @license: ''' from meridian.acupoints import tongziliao232 from meridian.acupoints import", "import jianjing13 from meridian.acupoints import yuanye14 from meridian.acupoints import zhejin21 from meridian.acupoints import", "import yanglingquan222 from meridian.acupoints import yangjiao21 from meridian.acupoints import waiqiu41 from meridian.acupoints import", "meridian.acupoints import diwuhui434 from meridian.acupoints import xiaxi21 from meridian.acupoints import zuqiaoyin241 SPELL=u'zúshàoyángdǎnjīng' CN=u'足少阳胆经'", "muchuang41 from meridian.acupoints import zhengying42 from meridian.acupoints import chengling22 from meridian.acupoints import naokong31" ]
[ "as nn import torch.nn.functional as F # from loglinear import LogLinear class DeepSet(nn.Module):", "import LogLinear class DeepSet(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet, self).__init__() self.in_features = in_features", "self.regressor = nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512,", "nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True) ) self.regressor =", "= nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, set_features) ) self.regressor =", "nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True) ) self.l1 = nn.Linear(set_features*2, 30) self.l2", "x.sum(dim=1) x1 = self.l1(x) x2 = self.lp(x) + 0.001 x2 = self.l2(x2) x", "self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512,", "nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50,", "str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor) + ')' class DeepSet2(nn.Module): def", "class DeepSet1(nn.Module): def __init__(self, in_features, set_features=512): super(DeepSet1, self).__init__() self.in_features = in_features self.out_features =", "nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512,", "Set Feature' + str(self.regressor) + ')' class DeepSet1(nn.Module): def __init__(self, in_features, set_features=512): super(DeepSet1,", "set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, set_features) )", "= set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, set_features)", "super(DeepSet3, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50),", "Feature' + str(self.regressor) + ')' class DeepSet3(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet3, self).__init__()", "+ 0.001 x2 = x2.log() x = torch.cat((x1, x2), 2) x = x.sum(dim=1)", "x2 = x2.log() x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x1 =", "')' class DeepSet3(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet3, self).__init__() self.in_features = in_features self.out_features", "self.feature_extractor(x) x = x.sum(dim=1) x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__", "DeepSet2(nn.Module): def __init__(self, in_features, set_features=256): super(DeepSet2, self).__init__() self.in_features = in_features self.out_features = set_features", ") self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module in self.children(): reset_op =", "= nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True) ) self.l1", "50), nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True) ) self.l1 = nn.Linear(set_features*2, 30) self.l2 = LogLinear(set_features*2,", "\\ + '\\n Set Feature' + str(self.regressor) + ')' class DeepSet1(nn.Module): def __init__(self,", "512), nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module", "F # from loglinear import LogLinear class DeepSet(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet,", "set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30, 10),", "+ ')' class DeepSet1(nn.Module): def __init__(self, in_features, set_features=512): super(DeepSet1, self).__init__() self.in_features = in_features", "nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor)", "self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True) )", "nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1',", "Feature' + str(self.regressor) + ')' class DeepSet2(nn.Module): def __init__(self, in_features, set_features=256): super(DeepSet2, self).__init__()", "nn.ReLU(inplace=True) ) self.regressor = nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512),", "__init__(self, in_features, set_features=256): super(DeepSet2, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor =", "30), nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor)", "= x.sum(dim=1) x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__ + '('", "nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1',", "self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100,", "__init__(self, in_features, set_features=50): super(DeepSet3, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor =", "set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, set_features) )", "x = x.sum(dim=1) x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__ +", "512), nn.ELU(inplace=True), nn.Linear(512, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512),", "getattr(module, \"reset_parameters\", None) if callable(reset_op): reset_op() def forward(self, input): x = input x", "None) if callable(reset_op): reset_op() def forward(self, input): x = input x1 = self.feature_extractor(x)", "self.feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, set_features) ) self.log_feature_extractor", "= set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100, set_features)", "nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self):", "= in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 50),", "x1 = self.feature_extractor(x) x2 = self.log_feature_extractor(x) + 0.001 x2 = x2.log() x =", "nn.Linear(50, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50,", "0.001 x2 = x2.log() x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x", "50), nn.ELU(inplace=True), nn.Linear(50, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50, 50),", "nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50, set_features) ) self.log_feature_extractor = nn.Sequential(", "self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50,", "nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True),", "self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256,", "512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0', self.feature_extractor)", "nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module in", "self.log_feature_extractor(x) + 0.001 x2 = x2.log() x = torch.cat((x1, x2), 2) x =", "torch.nn as nn import torch.nn.functional as F # from loglinear import LogLinear class", "nn.ELU(inplace=True), nn.Linear(100, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True),", "'(' \\ + 'Feature Exctractor=' + str(self.feature_extractor) \\ + '\\n Set Feature' +", "= input x1 = self.feature_extractor(x) x2 = self.log_feature_extractor(x) + 0.001 x2 = x2.log()", "<filename>PopStats/model.py import torch import torch.nn as nn import torch.nn.functional as F # from", "'\\n Set Feature' + str(self.regressor) + ')' class DeepSet1(nn.Module): def __init__(self, in_features, set_features=512):", "nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100, set_features) ) self.regressor = nn.Sequential(", "self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True),", "nn.Linear(256, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256,", "x2), 2) x = x.sum(dim=1) x1 = self.l1(x) x2 = self.lp(x) + 0.001", "= LogLinear(set_features*2, 30) self.lp = nn.ReLU() self.regressor = nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60,", "1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module in self.children(): reset_op", "= nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1),", "torch.cat((x1, x2), 2) x = x.sum(dim=1) x = self.regressor(x) return x def __repr__(self):", "str(self.regressor) + ')' class DeepSet1(nn.Module): def __init__(self, in_features, set_features=512): super(DeepSet1, self).__init__() self.in_features =", "= torch.cat((x1, x2), 1) x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__", "= set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, set_features)", "nn.ReLU() self.regressor = nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True),", "x2 = self.l2(x2) x = torch.cat((x1, x2), 1) x = self.regressor(x) return x", "nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True) ) self.l1 = nn.Linear(set_features*2,", "__init__(self, in_features, set_features=512): super(DeepSet1, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor =", "nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True) ) self.l1 =", "callable(reset_op): reset_op() def forward(self, input): x = input x = self.feature_extractor(x) x =", "self.lp(x) + 0.001 x2 = self.l2(x2) x = torch.cat((x1, x2), 1) x =", "import torch import torch.nn as nn import torch.nn.functional as F # from loglinear", "nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, set_features) ) self.regressor = nn.Sequential(", "nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for", "'\\n Set Feature' + str(self.regressor) + ')' class DeepSet2(nn.Module): def __init__(self, in_features, set_features=256):", "self.feature_extractor(x) x2 = self.log_feature_extractor(x) + 0.001 x2 = x2.log() x = torch.cat((x1, x2),", "nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True) ) self.regressor = nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True),", "def __repr__(self): return self.__class__.__name__ + '(' \\ + 'Feature Exctractor=' + str(self.feature_extractor) \\", "+ str(self.regressor) + ')' class DeepSet3(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet3, self).__init__() self.in_features", "nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), )", "= torch.cat((x1, x2), 2) x = x.sum(dim=1) x = self.regressor(x) return x def", "self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100, set_features) ) self.regressor", "30) self.lp = nn.ReLU() self.regressor = nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True),", "+ '\\n Set Feature' + str(self.regressor) + ')' class DeepSet1(nn.Module): def __init__(self, in_features,", "nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, set_features) ) self.log_feature_extractor = nn.Sequential(", ") self.regressor = nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True),", "x2 = self.lp(x) + 0.001 x2 = self.l2(x2) x = torch.cat((x1, x2), 1)", "set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512),", "\\ + 'Feature Exctractor=' + str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor)", "torch.cat((x1, x2), 2) x = x.sum(dim=1) x1 = self.l1(x) x2 = self.lp(x) +", "nn.Linear(256, set_features), nn.ReLU(inplace=True) ) self.regressor = nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True),", "256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True) ) self.regressor = nn.Sequential( nn.Linear(set_features*2,", "x2.log() x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x = self.regressor(x) return", "forward(self, input): x = input x1 = self.feature_extractor(x) x2 = self.log_feature_extractor(x) + 0.001", "set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, set_features),", "in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True),", "+ 'Feature Exctractor=' + str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor) +", "nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True),", "nn import torch.nn.functional as F # from loglinear import LogLinear class DeepSet(nn.Module): def", "set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50, set_features),", "str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor) + ')' class DeepSet1(nn.Module): def", "x2 = self.log_feature_extractor(x) + 0.001 x2 = x2.log() x = torch.cat((x1, x2), 2)", "50), nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 50),", "= nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100, set_features) ) self.regressor =", "0.001 x2 = x2.log() x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x1", "set_features=512): super(DeepSet1, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features,", ") self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True)", "in_features, set_features=50): super(DeepSet, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential(", "= nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, set_features) ) self.log_feature_extractor =", "self.l1(x) x2 = self.lp(x) + 0.001 x2 = self.l2(x2) x = torch.cat((x1, x2),", "= self.lp(x) + 0.001 x2 = self.l2(x2) x = torch.cat((x1, x2), 1) x", "self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256,", "for module in self.children(): reset_op = getattr(module, \"reset_parameters\", None) if callable(reset_op): reset_op() def", "self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True),", "class DeepSet(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet, self).__init__() self.in_features = in_features self.out_features =", "nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor)", "Set Feature' + str(self.regressor) + ')' class DeepSet3(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet3,", "in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True),", "class DeepSet3(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet3, self).__init__() self.in_features = in_features self.out_features =", "50), nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True) ) self.l1 = nn.Linear(set_features*2, 30)", "= torch.cat((x1, x2), 2) x = x.sum(dim=1) x1 = self.l1(x) x2 = self.lp(x)", "if callable(reset_op): reset_op() def forward(self, input): x = input x1 = self.feature_extractor(x) x2", "set_features), nn.ReLU(inplace=True) ) self.regressor = nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512,", "0.001 x2 = self.l2(x2) x = torch.cat((x1, x2), 1) x = self.regressor(x) return", "def __init__(self, in_features, set_features=256): super(DeepSet2, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor", "nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True) ) self.regressor = nn.Sequential(", "= nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50, set_features) ) self.log_feature_extractor =", "self.__class__.__name__ + '(' \\ + 'Feature Exctractor=' + str(self.feature_extractor) \\ + '\\n Set", "nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0',", "def __init__(self, in_features, set_features=512): super(DeepSet1, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor", "= nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1),", "+ str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor) + ')' class DeepSet2(nn.Module):", "nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True),", "in_features, set_features=50): super(DeepSet3, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential(", "nn.ELU(inplace=True), nn.Linear(512, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True),", "\\ + '\\n Set Feature' + str(self.regressor) + ')' class DeepSet3(nn.Module): def __init__(self,", "x = torch.cat((x1, x2), 1) x = self.regressor(x) return x def __repr__(self): return", "input): x = input x1 = self.feature_extractor(x) x2 = self.log_feature_extractor(x) + 0.001 x2", "30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def", "DeepSet3(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet3, self).__init__() self.in_features = in_features self.out_features = set_features", "import torch.nn.functional as F # from loglinear import LogLinear class DeepSet(nn.Module): def __init__(self,", "nn.Linear(512, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module in self.children():", "loglinear import LogLinear class DeepSet(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet, self).__init__() self.in_features =", "= self.l1(x) x2 = self.lp(x) + 0.001 x2 = self.l2(x2) x = torch.cat((x1,", "super(DeepSet1, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 512),", "self.l2(x2) x = torch.cat((x1, x2), 1) x = self.regressor(x) return x def __repr__(self):", ") self.regressor = nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True),", "+ str(self.regressor) + ')' class DeepSet2(nn.Module): def __init__(self, in_features, set_features=256): super(DeepSet2, self).__init__() self.in_features", "nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features,", "= self.log_feature_extractor(x) + 0.001 x2 = x2.log() x = torch.cat((x1, x2), 2) x", "forward(self, input): x = input x = self.feature_extractor(x) x = x.sum(dim=1) x =", "set_features=50): super(DeepSet3, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features,", "+ 0.001 x2 = self.l2(x2) x = torch.cat((x1, x2), 1) x = self.regressor(x)", "512), nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor)", "= self.feature_extractor(x) x = x.sum(dim=1) x = self.regressor(x) return x def __repr__(self): return", "str(self.regressor) + ')' class DeepSet3(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet3, self).__init__() self.in_features =", ") self.regressor = nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True),", "Feature' + str(self.regressor) + ')' class DeepSet1(nn.Module): def __init__(self, in_features, set_features=512): super(DeepSet1, self).__init__()", "input x = self.feature_extractor(x) x = x.sum(dim=1) x = self.regressor(x) return x def", "= nn.ReLU() self.regressor = nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30, 10),", "self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512,", "self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module in self.children(): reset_op = getattr(module, \"reset_parameters\",", "= set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50, set_features)", "x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x1 = self.l1(x) x2 =", "str(self.regressor) + ')' class DeepSet2(nn.Module): def __init__(self, in_features, set_features=256): super(DeepSet2, self).__init__() self.in_features =", "return self.__class__.__name__ + '(' \\ + 'Feature Exctractor=' + str(self.feature_extractor) \\ + '\\n", "= nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1),", "nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), )", "input x1 = self.feature_extractor(x) x2 = self.log_feature_extractor(x) + 0.001 x2 = x2.log() x", "self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50, set_features) ) self.log_feature_extractor", "= self.regressor(x) return x def __repr__(self): return self.__class__.__name__ + '(' \\ + 'Feature", "in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True),", "')' class DeepSet2(nn.Module): def __init__(self, in_features, set_features=256): super(DeepSet2, self).__init__() self.in_features = in_features self.out_features", "50), nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 30),", "reset_op() def forward(self, input): x = input x1 = self.feature_extractor(x) x2 = self.log_feature_extractor(x)", "from loglinear import LogLinear class DeepSet(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet, self).__init__() self.in_features", "+ str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor) + ')' class DeepSet3(nn.Module):", "self.regressor(x) return x def __repr__(self): return self.__class__.__name__ + '(' \\ + 'Feature Exctractor='", "set_features=50): super(DeepSet, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features,", "256), nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True) ) self.regressor = nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512,", "nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self):", "nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module in self.children():", "getattr(module, \"reset_parameters\", None) if callable(reset_op): reset_op() def forward(self, input): x = input x1", "self.l1 = nn.Linear(set_features*2, 30) self.l2 = LogLinear(set_features*2, 30) self.lp = nn.ReLU() self.regressor =", "as F # from loglinear import LogLinear class DeepSet(nn.Module): def __init__(self, in_features, set_features=50):", "512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def", "10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module", "= getattr(module, \"reset_parameters\", None) if callable(reset_op): reset_op() def forward(self, input): x = input", "x.sum(dim=1) x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__ + '(' \\", "torch import torch.nn as nn import torch.nn.functional as F # from loglinear import", "= x.sum(dim=1) x1 = self.l1(x) x2 = self.lp(x) + 0.001 x2 = self.l2(x2)", "LogLinear(set_features*2, 30) self.lp = nn.ReLU() self.regressor = nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60, 30),", "= input x = self.feature_extractor(x) x = x.sum(dim=1) x = self.regressor(x) return x", "30) self.l2 = LogLinear(set_features*2, 30) self.lp = nn.ReLU() self.regressor = nn.Sequential( #nn.Linear(set_features*2, 512),", "x = x.sum(dim=1) x1 = self.l1(x) x2 = self.lp(x) + 0.001 x2 =", "if callable(reset_op): reset_op() def forward(self, input): x = input x = self.feature_extractor(x) x", "__repr__(self): return self.__class__.__name__ + '(' \\ + 'Feature Exctractor=' + str(self.feature_extractor) \\ +", "= in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256, 256),", "Set Feature' + str(self.regressor) + ')' class DeepSet2(nn.Module): def __init__(self, in_features, set_features=256): super(DeepSet2,", "nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), )", "reset_parameters(self): for module in self.children(): reset_op = getattr(module, \"reset_parameters\", None) if callable(reset_op): reset_op()", "\"reset_parameters\", None) if callable(reset_op): reset_op() def forward(self, input): x = input x =", "nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module in", "+ ')' class DeepSet2(nn.Module): def __init__(self, in_features, set_features=256): super(DeepSet2, self).__init__() self.in_features = in_features", "256), nn.ELU(inplace=True), nn.Linear(256, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 256),", "+ '\\n Set Feature' + str(self.regressor) + ')' class DeepSet3(nn.Module): def __init__(self, in_features,", "self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50,", "self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True) )", ") self.l1 = nn.Linear(set_features*2, 30) self.l2 = LogLinear(set_features*2, 30) self.lp = nn.ReLU() self.regressor", "nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1',", "= x2.log() x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x = self.regressor(x)", "= in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512),", "nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True),", "2) x = x.sum(dim=1) x1 = self.l1(x) x2 = self.lp(x) + 0.001 x2", "x = input x1 = self.feature_extractor(x) x2 = self.log_feature_extractor(x) + 0.001 x2 =", "x1 = self.l1(x) x2 = self.lp(x) + 0.001 x2 = self.l2(x2) x =", "+ '\\n Set Feature' + str(self.regressor) + ')' class DeepSet2(nn.Module): def __init__(self, in_features,", "def forward(self, input): x = input x1 = self.feature_extractor(x) x2 = self.log_feature_extractor(x) +", "'Feature Exctractor=' + str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor) + ')'", "x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__ + '(' \\ +", "LogLinear class DeepSet(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet, self).__init__() self.in_features = in_features self.out_features", "nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), ) self.add_module('0',", "callable(reset_op): reset_op() def forward(self, input): x = input x1 = self.feature_extractor(x) x2 =", "nn.ELU(inplace=True), nn.Linear(50, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True),", "nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1), )", "+ ')' class DeepSet3(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet3, self).__init__() self.in_features = in_features", "nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True) ) self.regressor = nn.Sequential( nn.Linear(set_features*2, 512),", "256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 256),", "in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True),", "nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for", "x def __repr__(self): return self.__class__.__name__ + '(' \\ + 'Feature Exctractor=' + str(self.feature_extractor)", "= nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True) ) self.regressor", "def __init__(self, in_features, set_features=50): super(DeepSet3, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor", "class DeepSet2(nn.Module): def __init__(self, in_features, set_features=256): super(DeepSet2, self).__init__() self.in_features = in_features self.out_features =", "1) x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__ + '(' \\", "2) x = x.sum(dim=1) x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__", "nn.Linear(50, set_features), nn.ReLU(inplace=True) ) self.l1 = nn.Linear(set_features*2, 30) self.l2 = LogLinear(set_features*2, 30) self.lp", "x2), 2) x = x.sum(dim=1) x = self.regressor(x) return x def __repr__(self): return", "nn.Linear(set_features*2, 30) self.l2 = LogLinear(set_features*2, 30) self.lp = nn.ReLU() self.regressor = nn.Sequential( #nn.Linear(set_features*2,", "self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True),", "DeepSet1(nn.Module): def __init__(self, in_features, set_features=512): super(DeepSet1, self).__init__() self.in_features = in_features self.out_features = set_features", "#nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0',", "in_features, set_features=256): super(DeepSet2, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential(", "= self.feature_extractor(x) x2 = self.log_feature_extractor(x) + 0.001 x2 = x2.log() x = torch.cat((x1,", "nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True) ) self.l1 = nn.Linear(set_features*2, 30) self.l2 = LogLinear(set_features*2, 30)", "None) if callable(reset_op): reset_op() def forward(self, input): x = input x = self.feature_extractor(x)", "nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features,", "nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256,", "= self.l2(x2) x = torch.cat((x1, x2), 1) x = self.regressor(x) return x def", "self.children(): reset_op = getattr(module, \"reset_parameters\", None) if callable(reset_op): reset_op() def forward(self, input): x", "set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 50), nn.ELU(inplace=True), nn.Linear(50, set_features) )", "def __init__(self, in_features, set_features=50): super(DeepSet, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor", "reset_op = getattr(module, \"reset_parameters\", None) if callable(reset_op): reset_op() def forward(self, input): x =", "nn.Linear(in_features, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features,", "x2), 1) x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__ + '('", "set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100, set_features) )", "self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor) def reset_parameters(self): for module in self.children(): reset_op = getattr(module,", "torch.nn.functional as F # from loglinear import LogLinear class DeepSet(nn.Module): def __init__(self, in_features,", "reset_op() def forward(self, input): x = input x = self.feature_extractor(x) x = x.sum(dim=1)", "nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features,", "self.feature_extractor = nn.Sequential( nn.Linear(in_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, set_features) ) self.regressor", "set_features=256): super(DeepSet2, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features,", "__init__(self, in_features, set_features=50): super(DeepSet, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor =", "nn.ELU(inplace=True), nn.Linear(256, set_features) ) self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True),", "import torch.nn as nn import torch.nn.functional as F # from loglinear import LogLinear", "512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 512),", "def forward(self, input): x = input x = self.feature_extractor(x) x = x.sum(dim=1) x", "# from loglinear import LogLinear class DeepSet(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet, self).__init__()", "nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True) ) self.l1 = nn.Linear(set_features*2, 30) self.l2 =", "self.l2 = LogLinear(set_features*2, 30) self.lp = nn.ReLU() self.regressor = nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True),", "\"reset_parameters\", None) if callable(reset_op): reset_op() def forward(self, input): x = input x1 =", "nn.Linear(50, 100), nn.ELU(inplace=True), nn.Linear(100, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30,", "nn.ReLU(inplace=True), nn.Linear(256, set_features), nn.ReLU(inplace=True) ) self.regressor = nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512, 512),", "x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x = self.regressor(x) return x", "self.regressor = nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10,", "super(DeepSet, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50),", "self.regressor = nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10,", "'\\n Set Feature' + str(self.regressor) + ')' class DeepSet3(nn.Module): def __init__(self, in_features, set_features=50):", "x = input x = self.feature_extractor(x) x = x.sum(dim=1) x = self.regressor(x) return", "in_features, set_features=512): super(DeepSet1, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential(", "+ '(' \\ + 'Feature Exctractor=' + str(self.feature_extractor) \\ + '\\n Set Feature'", ") self.log_feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ReLU(inplace=True), nn.Linear(50, 50), nn.ReLU(inplace=True), nn.Linear(50, set_features), nn.ReLU(inplace=True)", "x2.log() x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x1 = self.l1(x) x2", "100), nn.ELU(inplace=True), nn.Linear(100, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30, 30),", "+ str(self.regressor) + ')' class DeepSet1(nn.Module): def __init__(self, in_features, set_features=512): super(DeepSet1, self).__init__() self.in_features", "DeepSet(nn.Module): def __init__(self, in_features, set_features=50): super(DeepSet, self).__init__() self.in_features = in_features self.out_features = set_features", "in self.children(): reset_op = getattr(module, \"reset_parameters\", None) if callable(reset_op): reset_op() def forward(self, input):", "nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0',", "nn.Linear(100, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 30), nn.ELU(inplace=True), nn.Linear(30, 30), nn.ELU(inplace=True), nn.Linear(30,", "nn.ReLU(inplace=True) ) self.l1 = nn.Linear(set_features*2, 30) self.l2 = LogLinear(set_features*2, 30) self.lp = nn.ReLU()", "self.add_module('1', self.regressor) def reset_parameters(self): for module in self.children(): reset_op = getattr(module, \"reset_parameters\", None)", "')' class DeepSet1(nn.Module): def __init__(self, in_features, set_features=512): super(DeepSet1, self).__init__() self.in_features = in_features self.out_features", "nn.Linear(512, set_features) ) self.regressor = nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512,", "super(DeepSet2, self).__init__() self.in_features = in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 256),", "x = self.feature_extractor(x) x = x.sum(dim=1) x = self.regressor(x) return x def __repr__(self):", "self.lp = nn.ReLU() self.regressor = nn.Sequential( #nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30,", "= nn.Sequential( nn.Linear(set_features*2, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 1),", "self.regressor) def reset_parameters(self): for module in self.children(): reset_op = getattr(module, \"reset_parameters\", None) if", "module in self.children(): reset_op = getattr(module, \"reset_parameters\", None) if callable(reset_op): reset_op() def forward(self,", "= nn.Linear(set_features*2, 30) self.l2 = LogLinear(set_features*2, 30) self.lp = nn.ReLU() self.regressor = nn.Sequential(", "self.regressor = nn.Sequential( nn.Linear(set_features, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512, 512), nn.ELU(inplace=True), nn.Linear(512,", "= in_features self.out_features = set_features self.feature_extractor = nn.Sequential( nn.Linear(in_features, 50), nn.ELU(inplace=True), nn.Linear(50, 100),", "= x2.log() x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x1 = self.l1(x)", "Exctractor=' + str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor) + ')' class", "torch.cat((x1, x2), 1) x = self.regressor(x) return x def __repr__(self): return self.__class__.__name__ +", "input): x = input x = self.feature_extractor(x) x = x.sum(dim=1) x = self.regressor(x)", "+ str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor) + ')' class DeepSet1(nn.Module):", "nn.Linear(60, 30), nn.ELU(inplace=True), nn.Linear(30, 10), nn.ELU(inplace=True), nn.Linear(10, 1), ) self.add_module('0', self.feature_extractor) self.add_module('1', self.regressor)", "set_features), nn.ReLU(inplace=True) ) self.l1 = nn.Linear(set_features*2, 30) self.l2 = LogLinear(set_features*2, 30) self.lp =", "x2 = x2.log() x = torch.cat((x1, x2), 2) x = x.sum(dim=1) x =", "str(self.feature_extractor) \\ + '\\n Set Feature' + str(self.regressor) + ')' class DeepSet3(nn.Module): def", "\\ + '\\n Set Feature' + str(self.regressor) + ')' class DeepSet2(nn.Module): def __init__(self,", "def reset_parameters(self): for module in self.children(): reset_op = getattr(module, \"reset_parameters\", None) if callable(reset_op):", "return x def __repr__(self): return self.__class__.__name__ + '(' \\ + 'Feature Exctractor=' +" ]
[ "flatten(nodes)] elif node.is_leaf_node(): return node else: return [node] + [cls.get_choices(node=node) for node in", "# ppt .0 conductivity = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2, help_text='S/m') # S/m .00", "= models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts') wind_direction = models.CharField(max_length=2, null=True,", "models.BooleanField(default=False) class Meta: unique_together = ('trip', 'code') @property def environmentmeasure_set(self): return [x for", "TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return", "__str__(self): return u\"{0} / {1} / {2}{3}\".format(self.frame_type.type, self.container.type, self.camera, ' (Stereo)' if self.stereo", "checked! self.coordinates = Point(float(self.longitude), float(self.latitude)) if not self.code: # set code if it", "= models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active = models.BooleanField( default=True, help_text='overridden if", "not None] @property def next_by_code(self): return self.trip.get_next_set_by_code(self.code) def save(self, *args, **kwargs): # todo:", "self.stereo else '') class Meta: verbose_name_plural = \"Equipment\" ordering = ['frame_type__type', 'container__type', 'camera']", "SetTag#get_choices because python doesn't have this somehow (!!!) def flatten(x): if type(x) is", "models.TimeField() haul_date = models.DateField(null=True, blank=True) haul_time = models.TimeField(null=True, blank=True) depth = models.DecimalField(help_text='m', decimal_places=2,", "latitude & longitude! this should be checked! self.coordinates = Point(float(self.longitude), float(self.latitude)) if not", "record has been completed # 3) other 'required' fields have been completed (see", "in flatten(nodes)] elif node.is_leaf_node(): return node else: return [node] + [cls.get_choices(node=node) for node", "def __str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model): set = models.ForeignKey(Set) benthic_category = TreeForeignKey(BenthicCategory)", "class Meta: unique_together = ('trip', 'code') @property def environmentmeasure_set(self): return [x for x", "if self.oiled else '') class Meta: unique_together = ('description', 'type', 'oiled') # needed", "nodes = [cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)] return [(node.pk, node.name) for node", "water_temperature = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1, help_text='C') # C salinity = models.DecimalField(null=True, blank=True,", "help_text='H, M, L') current_flow_instrumented = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00", "models.CharField(max_length=32) stereo = models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True,", "Point from global_finprint.annotation.models.observation import Observation, MasterRecord from global_finprint.annotation.models.video import Video, Assignment from global_finprint.core.version", "null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts')", "number within reef] code = models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx', null=True, blank=True)", "models.PointField(null=True) latitude = models.DecimalField(max_digits=12, decimal_places=8) longitude = models.DecimalField(max_digits=12, decimal_places=8) drop_time = models.TimeField() haul_date", "get_absolute_url(self): return reverse('set_update', args=[str(self.id)]) def observations(self): if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type):", "been set self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args, **kwargs) self.refresh_from_db() if self.code ==", "SURFACE_CHOP_CHOICES = { ('L', 'Light'), ('M', 'Medium'), ('H', 'Heavy'), } BAIT_TYPE_CHOICES = {", "= {'Total': 0} if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total'] = sum(status_list.values()) return", "status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total'] = sum(status_list.values()) return status_list def required_fields(self): # need", "required: # 1) visibility # 2) current flow (either) # 3) substrate #", "class Bait(AuditableModel): description = models.CharField(max_length=32, help_text='1kg') type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False,", "'container__type', 'camera'] class EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1, help_text='C') # C", "splendor_image_url = models.CharField(max_length=200, null=True, blank=True) benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new fields substrate_relief_mean", "# 4) substrate complexity return bool(self.visibility and (self.current_flow_estimated or self.current_flow_instrumented)) def completed(self): #", "= models.PositiveIntegerField(null=True, help_text='centimeters') camera_height = models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self): return u\"{0} / {1}", "= models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 current_direction = models.CharField(max_length=2, null=True,", "verbose_name_plural = \"Equipment\" ordering = ['frame_type__type', 'container__type', 'camera'] class EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True,", "= models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self): return u'{0} {1}'.format('Env measure for',str(self.set)) class", "blank=True, help_text='%') # percentage surface_chop = models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self): return", "'oiled') # needed for SetTag#get_choices because python doesn't have this somehow (!!!) def", "reef code]_xxx', null=True, blank=True) set_date = models.DateField() coordinates = models.PointField(null=True) latitude = models.DecimalField(max_digits=12,", "models.TimeField(null=True, blank=True) depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True, blank=True) message_to_annotators", "help_text='kts') wind_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass", "# m/s .00 bruv_image_url = models.CharField(max_length=200, null=True, blank=True) splendor_image_url = models.CharField(max_length=200, null=True, blank=True)", "None def assignment_counts(self, project=1): status_list = {'Total': 0} if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id',", "doesn't have this somehow (!!!) def flatten(x): if type(x) is list: return [a", "decimal_places=2, help_text='ppt') # ppt .0 conductivity = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2, help_text='S/m') #", "ot the db? EQUIPMENT_BAIT_CONTAINER = { ('B', 'Bag'), ('C', 'Cage'), } CURRENT_DIRECTION =", "code pattern: # [site.code][reef.code]_[set number within reef] code = models.CharField(max_length=32, db_index=True, help_text='[site +", "node.is_leaf_node(): return node else: return [node] + [cls.get_choices(node=node) for node in node.get_children().filter(active=True)] class", "= models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new fields substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) substrate_relief_sd", "should be checked! self.coordinates = Point(float(self.longitude), float(self.latitude)) if not self.code: # set code", "save(self, *args, **kwargs): # todo: we're assuming the input is latitude & longitude!", "**kwargs) def reef(self): return self.reef_habitat.reef def get_absolute_url(self): return reverse('set_update', args=[str(self.id)]) def observations(self): if", "decimal_places=2, help_text='m/s') # m/s .00 current_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction')", "('CHP', 'Chopped'), ('CRS', 'Crushed'), ('WHL', 'Whole'), } VISIBILITY_CHOICES = { ('V0-2', 'V0-2'), ('V2-4',", "models.ForeignKey(Trip) drop_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE,", "self.required_fields() def __str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model): set = models.ForeignKey(Set) benthic_category =", "return [node] + [cls.get_choices(node=node) for node in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name = models.CharField(max_length=50,", "\\ and self.required_fields() def __str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model): set = models.ForeignKey(Set)", "todo: need some form changes here ... bait = models.ForeignKey(Bait, null=True) equipment =", "return u\"{0}\".format(self.type) class FrameType(models.Model): # starting seed: rebar, stainless rebar, PVC, mixed type", "unique_together = ('trip', 'code') @property def environmentmeasure_set(self): return [x for x in [self.haul_measure,", "} BAIT_TYPE_CHOICES = { ('CHP', 'Chopped'), ('CRS', 'Crushed'), ('WHL', 'Whole'), } VISIBILITY_CHOICES =", "choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields', null=True) # todo: need some form changes here ...", "EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video =", "this somehow (!!!) def flatten(x): if type(x) is list: return [a for i", "# need to make this data-driven, not hard-coded field choices # currently required:", "'') class Meta: unique_together = ('description', 'type', 'oiled') # needed for SetTag#get_choices because", "arm_length = models.PositiveIntegerField(null=True, help_text='centimeters') camera_height = models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self): return u\"{0} /", "# S/m .00 dissolved_oxygen = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1) current_flow = models.DecimalField(null=True, blank=True,", "equipment = models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat, blank=True) trip = models.ForeignKey(Trip) drop_measure = models.OneToOneField(", "self.master() return master \\ and (master.status.is_finished) \\ and self.required_fields() def __str__(self): return u\"{0}_{1}\".format(self.trip.code,", "class BaitContainer(models.Model): # starting seed: cage, bag type = models.CharField(max_length=32) def __str__(self): return", "been promoted into a master # 2) a master annotation record has been", "on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video = models.OneToOneField( Video, on_delete=models.CASCADE, null=True, related_name='set' ) bulk_loaded =", "max_digits=8, decimal_places=2, help_text='S/m') # S/m .00 dissolved_oxygen = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1) current_flow", "through='BenthicCategoryValue') # new fields substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) substrate_relief_sd = models.DecimalField(null=True,", "MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) @classmethod def get_choices(cls, node=None): if", "= self.master() return master \\ and (master.status.is_finished) \\ and self.required_fields() def __str__(self): return", "Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type): server_env = VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type) #", ") bulk_loaded = models.BooleanField(default=False) class Meta: unique_together = ('trip', 'code') @property def environmentmeasure_set(self):", "blank=True) def __str__(self): return u\"{0}\".format(self.type) class Equipment(AuditableModel): camera = models.CharField(max_length=32) stereo = models.BooleanField(default=False)", "def __str__(self): return u\"{0}\".format(self.type) class Equipment(AuditableModel): camera = models.CharField(max_length=32) stereo = models.BooleanField(default=False) frame_type", "blank=True) set_date = models.DateField() coordinates = models.PointField(null=True) latitude = models.DecimalField(max_digits=12, decimal_places=8) longitude =", "reef(self): return self.reef_habitat.reef def get_absolute_url(self): return reverse('set_update', args=[str(self.id)]) def observations(self): if self.video: return", "('FU', 'Facing Up'), ('FD', 'Facing Down'), ('L', 'Limited'), ('O', 'Open') } class BaitContainer(models.Model):", "is latitude & longitude! this should be checked! self.coordinates = Point(float(self.longitude), float(self.latitude)) if", "# [site.code][reef.code]_[set number within reef] code = models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx',", "coordinates = models.PointField(null=True) latitude = models.DecimalField(max_digits=12, decimal_places=8) longitude = models.DecimalField(max_digits=12, decimal_places=8) drop_time =", "fields substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12)", "self.code, image_type) # todo: \"property-ize\" this? def master(self, project=1): try: return MasterRecord.objects.get(set=self, project_id=project)", "3) substrate # 4) substrate complexity return bool(self.visibility and (self.current_flow_estimated or self.current_flow_instrumented)) def", "BenthicCategory(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active = models.BooleanField( default=True,", "blank=True, help_text='H, M, L') current_flow_instrumented = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s", "= ('description', 'type', 'oiled') # needed for SetTag#get_choices because python doesn't have this", "need to make this data-driven, not hard-coded field choices # currently required: #", "'Northeast'), ('E', 'East'), ('SE', 'Southeast'), ('S', 'South'), ('SW', 'Southwest'), ('W', 'West'), ('NW', 'Northwest'),", "@classmethod def get_choices(cls, node=None): if node is None: nodes = [cls.get_choices(node=node) for node", "self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type): server_env = VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code,", "{ ('F', 'Flood'), ('E', 'Ebb'), ('S', 'Slack'), ('S2F', 'Slack to Flood'), ('S2E', 'Slack", "import Trip from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity from mptt.models import MPTTModel, TreeForeignKey", "1) visibility # 2) current flow (either) # 3) substrate # 4) substrate", "# 2) current flow (either) # 3) substrate # 4) substrate complexity return", "'Northwest'), } TIDE_CHOICES = { ('F', 'Flood'), ('E', 'Ebb'), ('S', 'Slack'), ('S2F', 'Slack", "'Facing Up'), ('FD', 'Facing Down'), ('L', 'Limited'), ('O', 'Open') } class BaitContainer(models.Model): #", "compass cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') # percentage surface_chop = models.CharField(max_length=1, null=True, blank=True,", "= models.CharField(max_length=32) stereo = models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer) arm_length =", "models.DecimalField(max_digits=12, decimal_places=8) drop_time = models.TimeField() haul_date = models.DateField(null=True, blank=True) haul_time = models.TimeField(null=True, blank=True)", "'V4-6'), ('V6-8', 'V6-8'), ('V8-10', 'V8-10'), ('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES = { ('FU', 'Facing", "choices # currently required: # 1) visibility # 2) current flow (either) #", "[x for x in [self.haul_measure, self.drop_measure] if x is not None] @property def", "def master(self, project=1): try: return MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist: return None def assignment_counts(self,", "TreeForeignKey from django.contrib.postgres.fields import ArrayField, JSONField # todo: move some of these out", "max_digits=12) substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True,", "null=True, blank=True) benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new fields substrate_relief_mean = models.DecimalField(null=True, blank=True,", "M, L') current_flow_instrumented = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 bruv_image_url", "for node in cls.objects.filter(parent=None, active=True)] return [(node.pk, node.name) for node in flatten(nodes)] elif", "models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value =", "self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set,", "models.DecimalField(max_digits=12, decimal_places=8) longitude = models.DecimalField(max_digits=12, decimal_places=8) drop_time = models.TimeField() haul_date = models.DateField(null=True, blank=True)", "db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) @classmethod def get_choices(cls,", "Meta: verbose_name_plural = 'benthic categories' class Set(AuditableModel): # suggested code pattern: # [site.code][reef.code]_[set", "[(node.pk, node.name) for node in flatten(nodes)] elif node.is_leaf_node(): return node else: return [node]", "return master \\ and (master.status.is_finished) \\ and self.required_fields() def __str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code)", "[node] + [cls.get_choices(node=node) for node in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name = models.CharField(max_length=50, unique=True)", "return u\"{0}\".format(self.name) class Meta: verbose_name_plural = 'benthic categories' class Set(AuditableModel): # suggested code", "models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 bruv_image_url = models.CharField(max_length=200, null=True, blank=True)", "Meta: verbose_name_plural = \"Equipment\" ordering = ['frame_type__type', 'container__type', 'camera'] class EnvironmentMeasure(AuditableModel): water_temperature =", "import MPTTModel, TreeForeignKey from django.contrib.postgres.fields import ArrayField, JSONField # todo: move some of", "('B', 'Bag'), ('C', 'Cage'), } CURRENT_DIRECTION = { ('N', 'North'), ('NE', 'Northeast'), ('E',", "return [x] class SetTag(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active", "if x is not None] @property def next_by_code(self): return self.trip.get_next_set_by_code(self.code) def save(self, *args,", "'Cage'), } CURRENT_DIRECTION = { ('N', 'North'), ('NE', 'Northeast'), ('E', 'East'), ('SE', 'Southeast'),", "class SetTag(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active = models.BooleanField(", "above) master = self.master() return master \\ and (master.status.is_finished) \\ and self.required_fields() def", "MaxValueValidator from django.core.urlresolvers import reverse from django.contrib.gis.geos import Point from global_finprint.annotation.models.observation import Observation,", "__str__(self): return u\"{0}\".format(self.name) class Meta: verbose_name_plural = 'benthic categories' class Set(AuditableModel): # suggested", "is inactive') parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by =", "consider the following for \"completion\": # 1) complete annotations have been promoted into", "oil') def __str__(self): return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled else '')", "models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True, blank=True) message_to_annotators = models.TextField(null=True, blank=True) tags", "is not None] @property def next_by_code(self): return self.trip.get_next_set_by_code(self.code) def save(self, *args, **kwargs): #", "a master # 2) a master annotation record has been completed # 3)", "models.CharField(max_length=32) image = models.ImageField(null=True, blank=True) def __str__(self): return u\"{0}\".format(self.type) class Equipment(AuditableModel): camera =", "related_name='set' ) bulk_loaded = models.BooleanField(default=False) class Meta: unique_together = ('trip', 'code') @property def", "['name'] def __str__(self): return u\"{0}\".format(self.name) class Meta: verbose_name_plural = 'benthic categories' class Set(AuditableModel):", "haul_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video = models.OneToOneField( Video, on_delete=models.CASCADE, null=True,", "global_finprint.annotation.models.observation import Observation, MasterRecord from global_finprint.annotation.models.video import Video, Assignment from global_finprint.core.version import VersionInfo", "hasn't been set self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args, **kwargs) self.refresh_from_db() if self.code", "Up'), ('FD', 'Facing Down'), ('L', 'Limited'), ('O', 'Open') } class BaitContainer(models.Model): # starting", "def __str__(self): return u\"{0}\".format(self.name) @classmethod def get_choices(cls, node=None): if node is None: nodes", "next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs) def reef(self):", "MasterRecord.DoesNotExist: return None def assignment_counts(self, project=1): status_list = {'Total': 0} if self.video: status_list.update(Counter(Assignment.objects.filter(", "Meta: unique_together = ('description', 'type', 'oiled') # needed for SetTag#get_choices because python doesn't", "blank=True, max_digits=8, decimal_places=2, help_text='S/m') # S/m .00 dissolved_oxygen = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1)", "= [cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)] return [(node.pk, node.name) for node in", "self.current_flow_instrumented)) def completed(self): # we consider the following for \"completion\": # 1) complete", "= models.ForeignKey(ReefHabitat, blank=True) trip = models.ForeignKey(Trip) drop_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set')", "completed # 3) other 'required' fields have been completed (see above) master =", "/ {1} / {2}{3}\".format(self.frame_type.type, self.container.type, self.camera, ' (Stereo)' if self.stereo else '') class", "blank=True, decimal_places=4, max_digits=12) visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10,", "JSONField(db_column='custom_fields', null=True) # todo: need some form changes here ... bait = models.ForeignKey(Bait,", "FIELD_OF_VIEW_CHOICES = { ('FU', 'Facing Up'), ('FD', 'Facing Down'), ('L', 'Limited'), ('O', 'Open')", "substrate # 4) substrate complexity return bool(self.visibility and (self.current_flow_estimated or self.current_flow_instrumented)) def completed(self):", "'Open') } class BaitContainer(models.Model): # starting seed: cage, bag type = models.CharField(max_length=32) def", "visibility # 2) current flow (either) # 3) substrate # 4) substrate complexity", "models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False, help_text='20ml menhaden oil') def __str__(self): return u'{0} {1}{2}'.format(self.get_type_display(),", "'Flood'), ('E', 'Ebb'), ('S', 'Slack'), ('S2F', 'Slack to Flood'), ('S2E', 'Slack to Ebb'),", "estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts') wind_direction = models.CharField(max_length=2,", "TIDE_CHOICES = { ('F', 'Flood'), ('E', 'Ebb'), ('S', 'Slack'), ('S2F', 'Slack to Flood'),", "self.code = self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs) def reef(self): return self.reef_habitat.reef def get_absolute_url(self):", "from global_finprint.core.version import VersionInfo from global_finprint.core.models import AuditableModel from global_finprint.trip.models import Trip from", "= models.DateField() coordinates = models.PointField(null=True) latitude = models.DecimalField(max_digits=12, decimal_places=8) longitude = models.DecimalField(max_digits=12, decimal_places=8)", "# todo: need some form changes here ... bait = models.ForeignKey(Bait, null=True) equipment", "order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) @classmethod def get_choices(cls, node=None): if node", "blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass cloud_cover = models.IntegerField(null=True, blank=True, help_text='%')", "environmentmeasure_set(self): return [x for x in [self.haul_measure, self.drop_measure] if x is not None]", "return [a for i in x for a in flatten(i)] else: return [x]", "Substrate, SubstrateComplexity from mptt.models import MPTTModel, TreeForeignKey from django.contrib.postgres.fields import ArrayField, JSONField #", "not hard-coded field choices # currently required: # 1) visibility # 2) current", "[cls.get_choices(node=node) for node in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name = models.CharField(max_length=50, unique=True) description =", "in flatten(i)] else: return [x] class SetTag(MPTTModel): name = models.CharField(max_length=50, unique=True) description =", "@property def next_by_code(self): return self.trip.get_next_set_by_code(self.code) def save(self, *args, **kwargs): # todo: we're assuming", "we consider the following for \"completion\": # 1) complete annotations have been promoted", "Observation, MasterRecord from global_finprint.annotation.models.video import Video, Assignment from global_finprint.core.version import VersionInfo from global_finprint.core.models", "have been completed (see above) master = self.master() return master \\ and (master.status.is_finished)", "bool(self.visibility and (self.current_flow_estimated or self.current_flow_instrumented)) def completed(self): # we consider the following for", "= models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 bruv_image_url = models.CharField(max_length=200, null=True,", "node in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True)", "null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields', null=True)", "= models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat, blank=True) trip = models.ForeignKey(Trip) drop_measure = models.OneToOneField( EnvironmentMeasure,", "Decimal from collections import Counter from django.contrib.gis.db import models from django.core.validators import MinValueValidator,", "models.TextField(null=True, blank=True) tags = models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L')", "'Slack to Ebb'), } SURFACE_CHOP_CHOICES = { ('L', 'Light'), ('M', 'Medium'), ('H', 'Heavy'),", "self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total'] = sum(status_list.values()) return status_list def required_fields(self): #", "= models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view", "class Set(AuditableModel): # suggested code pattern: # [site.code][reef.code]_[set number within reef] code =", "django.core.validators import MinValueValidator, MaxValueValidator from django.core.urlresolvers import reverse from django.contrib.gis.geos import Point from", "master annotation record has been completed # 3) other 'required' fields have been", "models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat, blank=True) trip = models.ForeignKey(Trip) drop_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE,", "master = self.master() return master \\ and (master.status.is_finished) \\ and self.required_fields() def __str__(self):", "('V4-6', 'V4-6'), ('V6-8', 'V6-8'), ('V8-10', 'V8-10'), ('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES = { ('FU',", "global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity from mptt.models import MPTTModel, TreeForeignKey from django.contrib.postgres.fields import", "= models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass cloud_cover =", "+ [cls.get_choices(node=node) for node in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name = models.CharField(max_length=50, unique=True) description", "elif node.is_leaf_node(): return node else: return [node] + [cls.get_choices(node=node) for node in node.get_children().filter(active=True)]", "decimal_places=2, help_text='m/s') # m/s .00 bruv_image_url = models.CharField(max_length=200, null=True, blank=True) splendor_image_url = models.CharField(max_length=200,", "('E', 'Ebb'), ('S', 'Slack'), ('S2F', 'Slack to Flood'), ('S2E', 'Slack to Ebb'), }", "unique_together = ('description', 'type', 'oiled') # needed for SetTag#get_choices because python doesn't have", "2) a master annotation record has been completed # 3) other 'required' fields", "('SE', 'Southeast'), ('S', 'South'), ('SW', 'Southwest'), ('W', 'West'), ('NW', 'Northwest'), } TIDE_CHOICES =", "blank=True) benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new fields substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4,", "EQUIPMENT_BAIT_CONTAINER = { ('B', 'Bag'), ('C', 'Cage'), } CURRENT_DIRECTION = { ('N', 'North'),", "rebar, PVC, mixed type = models.CharField(max_length=32) image = models.ImageField(null=True, blank=True) def __str__(self): return", "out ot the db? EQUIPMENT_BAIT_CONTAINER = { ('B', 'Bag'), ('C', 'Cage'), } CURRENT_DIRECTION", "reverse from django.contrib.gis.geos import Point from global_finprint.annotation.models.observation import Observation, MasterRecord from global_finprint.annotation.models.video import", "'West'), ('NW', 'Northwest'), } TIDE_CHOICES = { ('F', 'Flood'), ('E', 'Ebb'), ('S', 'Slack'),", "u'{0} {1}'.format('Env measure for',str(self.set)) class Bait(AuditableModel): description = models.CharField(max_length=32, help_text='1kg') type = models.CharField(max_length=3,", "EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video = models.OneToOneField( Video, on_delete=models.CASCADE, null=True, related_name='set' ) bulk_loaded", "stainless rebar, PVC, mixed type = models.CharField(max_length=32) image = models.ImageField(null=True, blank=True) def __str__(self):", "ppt .0 conductivity = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2, help_text='S/m') # S/m .00 dissolved_oxygen", "field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields', null=True) # todo: need", "' (Stereo)' if self.stereo else '') class Meta: verbose_name_plural = \"Equipment\" ordering =", "# eight point compass cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') # percentage surface_chop =", "FrameType(models.Model): # starting seed: rebar, stainless rebar, PVC, mixed type = models.CharField(max_length=32) image", "these out ot the db? EQUIPMENT_BAIT_CONTAINER = { ('B', 'Bag'), ('C', 'Cage'), }", "models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields', null=True) # todo: need some form", "other 'required' fields have been completed (see above) master = self.master() return master", "blank=True, help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts') wind_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION,", "on_delete=models.CASCADE, null=True, related_name='set' ) bulk_loaded = models.BooleanField(default=False) class Meta: unique_together = ('trip', 'code')", "super(Set, self).save(*args, **kwargs) def reef(self): return self.reef_habitat.reef def get_absolute_url(self): return reverse('set_update', args=[str(self.id)]) def", "'Medium'), ('H', 'Heavy'), } BAIT_TYPE_CHOICES = { ('CHP', 'Chopped'), ('CRS', 'Crushed'), ('WHL', 'Whole'),", "mptt.models import MPTTModel, TreeForeignKey from django.contrib.postgres.fields import ArrayField, JSONField # todo: move some", "models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L') current_flow_instrumented = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s')", "assignment_counts(self, project=1): status_list = {'Total': 0} if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total']", "'Limited'), ('O', 'Open') } class BaitContainer(models.Model): # starting seed: cage, bag type =", "class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) @classmethod def get_choices(cls, node=None):", "import Video, Assignment from global_finprint.core.version import VersionInfo from global_finprint.core.models import AuditableModel from global_finprint.trip.models", "return None def assignment_counts(self, project=1): status_list = {'Total': 0} if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video,", "point compass tide_state = models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort')", "= models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) visibility =", "'type', 'oiled') # needed for SetTag#get_choices because python doesn't have this somehow (!!!)", "reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs) def reef(self): return self.reef_habitat.reef def", "(see above) master = self.master() return master \\ and (master.status.is_finished) \\ and self.required_fields()", "else: return [x] class SetTag(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True)", "next_by_code(self): return self.trip.get_next_set_by_code(self.code) def save(self, *args, **kwargs): # todo: we're assuming the input", "conductivity = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2, help_text='S/m') # S/m .00 dissolved_oxygen = models.DecimalField(null=True,", "from django.core.urlresolvers import reverse from django.contrib.gis.geos import Point from global_finprint.annotation.models.observation import Observation, MasterRecord", "def next_by_code(self): return self.trip.get_next_set_by_code(self.code) def save(self, *args, **kwargs): # todo: we're assuming the", "this? def master(self, project=1): try: return MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist: return None def", "list: return [a for i in x for a in flatten(i)] else: return", "cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') # percentage surface_chop = models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES)", "SubstrateComplexity from mptt.models import MPTTModel, TreeForeignKey from django.contrib.postgres.fields import ArrayField, JSONField # todo:", "'Southeast'), ('S', 'South'), ('SW', 'Southwest'), ('W', 'West'), ('NW', 'Northwest'), } TIDE_CHOICES = {", "# m/s .00 current_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight", "('O', 'Open') } class BaitContainer(models.Model): # starting seed: cage, bag type = models.CharField(max_length=32)", "decimal_places=4, max_digits=12) visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10, null=True,", "def __str__(self): return u'{0} {1}'.format('Env measure for',str(self.set)) class Bait(AuditableModel): description = models.CharField(max_length=32, help_text='1kg')", "related_name='drop_parent_set') haul_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video = models.OneToOneField( Video, on_delete=models.CASCADE,", "models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True, blank=True,", "def reef(self): return self.reef_habitat.reef def get_absolute_url(self): return reverse('set_update', args=[str(self.id)]) def observations(self): if self.video:", "# starting seed: rebar, stainless rebar, PVC, mixed type = models.CharField(max_length=32) image =", "= { ('FU', 'Facing Up'), ('FD', 'Facing Down'), ('L', 'Limited'), ('O', 'Open') }", "(Stereo)' if self.stereo else '') class Meta: verbose_name_plural = \"Equipment\" ordering = ['frame_type__type',", "image_type): server_env = VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type) # todo: \"property-ize\" this?", "decimal_places=8) drop_time = models.TimeField() haul_date = models.DateField(null=True, blank=True) haul_time = models.TimeField(null=True, blank=True) depth", "starting seed: cage, bag type = models.CharField(max_length=32) def __str__(self): return u\"{0}\".format(self.type) class FrameType(models.Model):", "null=True, blank=True) set_date = models.DateField() coordinates = models.PointField(null=True) latitude = models.DecimalField(max_digits=12, decimal_places=8) longitude", "VISIBILITY_CHOICES = { ('V0-2', 'V0-2'), ('V2-4', 'V2-4'), ('V4-6', 'V4-6'), ('V6-8', 'V6-8'), ('V8-10', 'V8-10'),", "= models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True,", "Flood'), ('S2E', 'Slack to Ebb'), } SURFACE_CHOP_CHOICES = { ('L', 'Light'), ('M', 'Medium'),", "todo: move some of these out ot the db? EQUIPMENT_BAIT_CONTAINER = { ('B',", "models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 current_direction = models.CharField(max_length=2, null=True, blank=True,", "else '') class Meta: verbose_name_plural = \"Equipment\" ordering = ['frame_type__type', 'container__type', 'camera'] class", "if not self.code: # set code if it hasn't been set self.code =", "for SetTag#get_choices because python doesn't have this somehow (!!!) def flatten(x): if type(x)", "annotations have been promoted into a master # 2) a master annotation record", "code]_xxx', null=True, blank=True) set_date = models.DateField() coordinates = models.PointField(null=True) latitude = models.DecimalField(max_digits=12, decimal_places=8)", "{ ('CHP', 'Chopped'), ('CRS', 'Crushed'), ('WHL', 'Whole'), } VISIBILITY_CHOICES = { ('V0-2', 'V0-2'),", "blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass tide_state = models.CharField(max_length=3, null=True, blank=True,", "percentage surface_chop = models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self): return u'{0} {1}'.format('Env measure", "choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass tide_state = models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES)", "type = models.CharField(max_length=32) image = models.ImageField(null=True, blank=True) def __str__(self): return u\"{0}\".format(self.type) class Equipment(AuditableModel):", "MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) class Meta: verbose_name_plural = 'benthic", "project_id=project) except MasterRecord.DoesNotExist: return None def assignment_counts(self, project=1): status_list = {'Total': 0} if", "{'Total': 0} if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total'] = sum(status_list.values()) return status_list", "\"completion\": # 1) complete annotations have been promoted into a master # 2)", "a master annotation record has been completed # 3) other 'required' fields have", "null=True, related_name='haul_parent_set') video = models.OneToOneField( Video, on_delete=models.CASCADE, null=True, related_name='set' ) bulk_loaded = models.BooleanField(default=False)", "bruv_image_url = models.CharField(max_length=200, null=True, blank=True) splendor_image_url = models.CharField(max_length=200, null=True, blank=True) benthic_category = models.ManyToManyField(BenthicCategory,", "class Meta: verbose_name_plural = \"Equipment\" ordering = ['frame_type__type', 'container__type', 'camera'] class EnvironmentMeasure(AuditableModel): water_temperature", "return u\"{0}\".format(self.type) class Equipment(AuditableModel): camera = models.CharField(max_length=32) stereo = models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType)", "return node else: return [node] + [cls.get_choices(node=node) for node in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel):", "help_text='overridden if parent is inactive') parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) class", "= models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True, help_text='centimeters') camera_height = models.PositiveIntegerField(null=True, help_text='centimeters')", "= models.PointField(null=True) latitude = models.DecimalField(max_digits=12, decimal_places=8) longitude = models.DecimalField(max_digits=12, decimal_places=8) drop_time = models.TimeField()", "salinity = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2, help_text='ppt') # ppt .0 conductivity = models.DecimalField(null=True,", "return [x for x in [self.haul_measure, self.drop_measure] if x is not None] @property", "this should be checked! self.coordinates = Point(float(self.longitude), float(self.latitude)) if not self.code: # set", "\"Equipment\" ordering = ['frame_type__type', 'container__type', 'camera'] class EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True, blank=True, max_digits=4,", "def observations(self): if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type): server_env = VersionInfo.get_server_env() return", "from decimal import Decimal from collections import Counter from django.contrib.gis.db import models from", "import VersionInfo from global_finprint.core.models import AuditableModel from global_finprint.trip.models import Trip from global_finprint.habitat.models import", "decimal_places=1) current_flow = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 current_direction =", "for i in x for a in flatten(i)] else: return [x] class SetTag(MPTTModel):", "complete annotations have been promoted into a master # 2) a master annotation", "AuditableModel from global_finprint.trip.models import Trip from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity from mptt.models", "node in flatten(nodes)] elif node.is_leaf_node(): return node else: return [node] + [cls.get_choices(node=node) for", "blank=True, max_digits=8, decimal_places=1) current_flow = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00", "cls.objects.filter(parent=None, active=True)] return [(node.pk, node.name) for node in flatten(nodes)] elif node.is_leaf_node(): return node", "{1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled else '') class Meta: unique_together = ('description',", "point compass cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') # percentage surface_chop = models.CharField(max_length=1, null=True,", "here ... bait = models.ForeignKey(Bait, null=True) equipment = models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat, blank=True)", "blank=True) depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True, blank=True) message_to_annotators =", "return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type) # todo: \"property-ize\" this? def master(self, project=1): try:", "} SURFACE_CHOP_CHOICES = { ('L', 'Light'), ('M', 'Medium'), ('H', 'Heavy'), } BAIT_TYPE_CHOICES =", "null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass tide_state = models.CharField(max_length=3, null=True,", "blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 current_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION,", "wind_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass cloud_cover", "if it hasn't been set self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args, **kwargs) self.refresh_from_db()", "= models.TextField(null=True, blank=True) tags = models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H, M,", "# eight point compass tide_state = models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True,", "*args, **kwargs): # todo: we're assuming the input is latitude & longitude! this", "'Crushed'), ('WHL', 'Whole'), } VISIBILITY_CHOICES = { ('V0-2', 'V0-2'), ('V2-4', 'V2-4'), ('V4-6', 'V4-6'),", "longitude = models.DecimalField(max_digits=12, decimal_places=8) drop_time = models.TimeField() haul_date = models.DateField(null=True, blank=True) haul_time =", "decimal_places=8) longitude = models.DecimalField(max_digits=12, decimal_places=8) drop_time = models.TimeField() haul_date = models.DateField(null=True, blank=True) haul_time", "dissolved_oxygen = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1) current_flow = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s')", "models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new fields substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) substrate_relief_sd =", "# 3) substrate # 4) substrate complexity return bool(self.visibility and (self.current_flow_estimated or self.current_flow_instrumented))", "(m)' if self.oiled else '') class Meta: unique_together = ('description', 'type', 'oiled') #", "reef] code = models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx', null=True, blank=True) set_date =", "__str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model): set = models.ForeignKey(Set) benthic_category = TreeForeignKey(BenthicCategory) value", "models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view =", "been completed (see above) master = self.master() return master \\ and (master.status.is_finished) \\", "been completed # 3) other 'required' fields have been completed (see above) master", "VersionInfo from global_finprint.core.models import AuditableModel from global_finprint.trip.models import Trip from global_finprint.habitat.models import ReefHabitat,", "django.contrib.gis.geos import Point from global_finprint.annotation.models.observation import Observation, MasterRecord from global_finprint.annotation.models.video import Video, Assignment", "class BenthicCategory(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active = models.BooleanField(", "models.PositiveIntegerField(null=True, help_text='centimeters') camera_height = models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self): return u\"{0} / {1} /", "self.trip.get_next_set_by_code(self.code) def save(self, *args, **kwargs): # todo: we're assuming the input is latitude", "('S', 'South'), ('SW', 'Southwest'), ('W', 'West'), ('NW', 'Northwest'), } TIDE_CHOICES = { ('F',", "= 'benthic categories' class Set(AuditableModel): # suggested code pattern: # [site.code][reef.code]_[set number within", "some of these out ot the db? EQUIPMENT_BAIT_CONTAINER = { ('B', 'Bag'), ('C',", "related_name='haul_parent_set') video = models.OneToOneField( Video, on_delete=models.CASCADE, null=True, related_name='set' ) bulk_loaded = models.BooleanField(default=False) class", "camera_height = models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self): return u\"{0} / {1} / {2}{3}\".format(self.frame_type.type, self.container.type,", "null=True) equipment = models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat, blank=True) trip = models.ForeignKey(Trip) drop_measure =", "L') current_flow_instrumented = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 bruv_image_url =", "global_finprint.core.version import VersionInfo from global_finprint.core.models import AuditableModel from global_finprint.trip.models import Trip from global_finprint.habitat.models", "return [(node.pk, node.name) for node in flatten(nodes)] elif node.is_leaf_node(): return node else: return", "help_text='compass direction') # eight point compass cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') # percentage", "blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) class", "drop_time = models.TimeField() haul_date = models.DateField(null=True, blank=True) haul_time = models.TimeField(null=True, blank=True) depth =", "self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs) def", "the db? EQUIPMENT_BAIT_CONTAINER = { ('B', 'Bag'), ('C', 'Cage'), } CURRENT_DIRECTION = {", "help_text='[site + reef code]_xxx', null=True, blank=True) set_date = models.DateField() coordinates = models.PointField(null=True) latitude", "self.coordinates = Point(float(self.longitude), float(self.latitude)) if not self.code: # set code if it hasn't", "models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2, help_text='S/m') # S/m .00 dissolved_oxygen = models.DecimalField(null=True, blank=True, max_digits=8,", "= models.ForeignKey(Bait, null=True) equipment = models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat, blank=True) trip = models.ForeignKey(Trip)", "django.contrib.postgres.fields import ArrayField, JSONField # todo: move some of these out ot the", "message_to_annotators = models.TextField(null=True, blank=True) tags = models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H,", "complexity return bool(self.visibility and (self.current_flow_estimated or self.current_flow_instrumented)) def completed(self): # we consider the", ".00 dissolved_oxygen = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1) current_flow = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2,", "comments = models.TextField(null=True, blank=True) message_to_annotators = models.TextField(null=True, blank=True) tags = models.ManyToManyField(to=SetTag) current_flow_estimated =", "\"property-ize\" this? def master(self, project=1): try: return MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist: return None", "__str__(self): return u\"{0}\".format(self.name) @classmethod def get_choices(cls, node=None): if node is None: nodes =", "help_text='1kg') type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False, help_text='20ml menhaden oil') def __str__(self):", "bag type = models.CharField(max_length=32) def __str__(self): return u\"{0}\".format(self.type) class FrameType(models.Model): # starting seed:", ".0 conductivity = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2, help_text='S/m') # S/m .00 dissolved_oxygen =", "from django.core.validators import MinValueValidator, MaxValueValidator from django.core.urlresolvers import reverse from django.contrib.gis.geos import Point", "= models.DateField(null=True, blank=True) haul_time = models.TimeField(null=True, blank=True) depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))])", "to Ebb'), } SURFACE_CHOP_CHOICES = { ('L', 'Light'), ('M', 'Medium'), ('H', 'Heavy'), }", "to make this data-driven, not hard-coded field choices # currently required: # 1)", "from django.contrib.postgres.fields import ArrayField, JSONField # todo: move some of these out ot", "def __str__(self): return u\"{0}\".format(self.type) class FrameType(models.Model): # starting seed: rebar, stainless rebar, PVC,", "('H', 'Heavy'), } BAIT_TYPE_CHOICES = { ('CHP', 'Chopped'), ('CRS', 'Crushed'), ('WHL', 'Whole'), }", "help_text='m/s') # m/s .00 bruv_image_url = models.CharField(max_length=200, null=True, blank=True) splendor_image_url = models.CharField(max_length=200, null=True,", "blank=True) splendor_image_url = models.CharField(max_length=200, null=True, blank=True) benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new fields", "'East'), ('SE', 'Southeast'), ('S', 'South'), ('SW', 'Southwest'), ('W', 'West'), ('NW', 'Northwest'), } TIDE_CHOICES", "= models.CharField(max_length=32) image = models.ImageField(null=True, blank=True) def __str__(self): return u\"{0}\".format(self.type) class Equipment(AuditableModel): camera", "= models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx', null=True, blank=True) set_date = models.DateField() coordinates", "models.DateField(null=True, blank=True) haul_time = models.TimeField(null=True, blank=True) depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments", "# 3) other 'required' fields have been completed (see above) master = self.master()", "status_list def required_fields(self): # need to make this data-driven, not hard-coded field choices", "('description', 'type', 'oiled') # needed for SetTag#get_choices because python doesn't have this somehow", "self.trip.code, self.code, image_type) # todo: \"property-ize\" this? def master(self, project=1): try: return MasterRecord.objects.get(set=self,", "ordering = ['frame_type__type', 'container__type', 'camera'] class EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1,", "name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active = models.BooleanField( default=True, help_text='overridden", "depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True, blank=True) message_to_annotators = models.TextField(null=True,", "video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total'] = sum(status_list.values()) return status_list def required_fields(self): # need to", "import MinValueValidator, MaxValueValidator from django.core.urlresolvers import reverse from django.contrib.gis.geos import Point from global_finprint.annotation.models.observation", "models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True, help_text='centimeters') camera_height = models.PositiveIntegerField(null=True, help_text='centimeters') def", "observations(self): if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type): server_env = VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env,", "'/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type) # todo: \"property-ize\" this? def master(self, project=1): try: return", "# 2) a master annotation record has been completed # 3) other 'required'", "'Whole'), } VISIBILITY_CHOICES = { ('V0-2', 'V0-2'), ('V2-4', 'V2-4'), ('V4-6', 'V4-6'), ('V6-8', 'V6-8'),", "self.container.type, self.camera, ' (Stereo)' if self.stereo else '') class Meta: verbose_name_plural = \"Equipment\"", "choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') #", "if parent is inactive') parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) class MPTTMeta:", "blank=True) haul_time = models.TimeField(null=True, blank=True) depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments =", "eight point compass tide_state = models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True, blank=True,", "changes here ... bait = models.ForeignKey(Bait, null=True) equipment = models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat,", "(self.current_flow_estimated or self.current_flow_instrumented)) def completed(self): # we consider the following for \"completion\": #", "'Light'), ('M', 'Medium'), ('H', 'Heavy'), } BAIT_TYPE_CHOICES = { ('CHP', 'Chopped'), ('CRS', 'Crushed'),", "def required_fields(self): # need to make this data-driven, not hard-coded field choices #", "we're assuming the input is latitude & longitude! this should be checked! self.coordinates", "= models.BooleanField( default=True, help_text='overridden if parent is inactive') parent = TreeForeignKey('self', null=True, blank=True,", "status_list['Total'] = sum(status_list.values()) return status_list def required_fields(self): # need to make this data-driven,", "('S2F', 'Slack to Flood'), ('S2E', 'Slack to Ebb'), } SURFACE_CHOP_CHOICES = { ('L',", "help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts') wind_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass", "order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) class Meta: verbose_name_plural = 'benthic categories'", "4) substrate complexity return bool(self.visibility and (self.current_flow_estimated or self.current_flow_instrumented)) def completed(self): # we", "(!!!) def flatten(x): if type(x) is list: return [a for i in x", "__str__(self): return u'{0} {1}'.format('Env measure for',str(self.set)) class Bait(AuditableModel): description = models.CharField(max_length=32, help_text='1kg') type", "is None: nodes = [cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)] return [(node.pk, node.name)", "blank=True) message_to_annotators = models.TextField(null=True, blank=True) tags = models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50, null=True, blank=True,", "= JSONField(db_column='custom_fields', null=True) # todo: need some form changes here ... bait =", "hard-coded field choices # currently required: # 1) visibility # 2) current flow", "models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self): return u'{0} {1}'.format('Env measure for',str(self.set)) class Bait(AuditableModel):", "= Point(float(self.longitude), float(self.latitude)) if not self.code: # set code if it hasn't been", "= models.TimeField() haul_date = models.DateField(null=True, blank=True) haul_time = models.TimeField(null=True, blank=True) depth = models.DecimalField(help_text='m',", "Bait(AuditableModel): description = models.CharField(max_length=32, help_text='1kg') type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False, help_text='20ml", "models.BooleanField(default=False, help_text='20ml menhaden oil') def __str__(self): return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if", "new fields substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4,", "['name'] def __str__(self): return u\"{0}\".format(self.name) @classmethod def get_choices(cls, node=None): if node is None:", "(master.status.is_finished) \\ and self.required_fields() def __str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model): set =", "= models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L') current_flow_instrumented = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2,", "choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts') wind_direction =", "PVC, mixed type = models.CharField(max_length=32) image = models.ImageField(null=True, blank=True) def __str__(self): return u\"{0}\".format(self.type)", "it hasn't been set self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args, **kwargs) self.refresh_from_db() if", "models.IntegerField(null=True, blank=True, help_text='kts') wind_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight", "C salinity = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2, help_text='ppt') # ppt .0 conductivity =", "max_digits=8, decimal_places=1) current_flow = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 current_direction", "code = models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx', null=True, blank=True) set_date = models.DateField()", "= models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2, help_text='S/m') # S/m .00 dissolved_oxygen = models.DecimalField(null=True, blank=True,", "default=True, help_text='overridden if parent is inactive') parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)", "# todo: \"property-ize\" this? def master(self, project=1): try: return MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist:", "direction') # eight point compass tide_state = models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed =", "} TIDE_CHOICES = { ('F', 'Flood'), ('E', 'Ebb'), ('S', 'Slack'), ('S2F', 'Slack to", "self).save(*args, **kwargs) def reef(self): return self.reef_habitat.reef def get_absolute_url(self): return reverse('set_update', args=[str(self.id)]) def observations(self):", "stereo = models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True, help_text='centimeters')", "... bait = models.ForeignKey(Bait, null=True) equipment = models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat, blank=True) trip", "null=True, blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name)", "blank=True) active = models.BooleanField( default=True, help_text='overridden if parent is inactive') parent = TreeForeignKey('self',", "def get_choices(cls, node=None): if node is None: nodes = [cls.get_choices(node=node) for node in", "global_finprint.annotation.models.video import Video, Assignment from global_finprint.core.version import VersionInfo from global_finprint.core.models import AuditableModel from", "CURRENT_DIRECTION = { ('N', 'North'), ('NE', 'Northeast'), ('E', 'East'), ('SE', 'Southeast'), ('S', 'South'),", "Down'), ('L', 'Limited'), ('O', 'Open') } class BaitContainer(models.Model): # starting seed: cage, bag", "x is not None] @property def next_by_code(self): return self.trip.get_next_set_by_code(self.code) def save(self, *args, **kwargs):", "tags = models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L') current_flow_instrumented =", "models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts') wind_direction = models.CharField(max_length=2, null=True, blank=True,", "from django.contrib.gis.geos import Point from global_finprint.annotation.models.observation import Observation, MasterRecord from global_finprint.annotation.models.video import Video,", "if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx', u'_{}'.format(next_id))", "{ ('FU', 'Facing Up'), ('FD', 'Facing Down'), ('L', 'Limited'), ('O', 'Open') } class", "MinValueValidator, MaxValueValidator from django.core.urlresolvers import reverse from django.contrib.gis.geos import Point from global_finprint.annotation.models.observation import", "set code if it hasn't been set self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args,", "Equipment(AuditableModel): camera = models.CharField(max_length=32) stereo = models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer)", "max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 current_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass", "project=1): status_list = {'Total': 0} if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total'] =", "('NE', 'Northeast'), ('E', 'East'), ('SE', 'Southeast'), ('S', 'South'), ('SW', 'Southwest'), ('W', 'West'), ('NW',", "null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self): return u'{0} {1}'.format('Env measure for',str(self.set)) class Bait(AuditableModel): description", "parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['name'] def", "u\"{0} / {1} / {2}{3}\".format(self.frame_type.type, self.container.type, self.camera, ' (Stereo)' if self.stereo else '')", "from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity from mptt.models import MPTTModel, TreeForeignKey from django.contrib.postgres.fields", "('V6-8', 'V6-8'), ('V8-10', 'V8-10'), ('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES = { ('FU', 'Facing Up'),", "('S', 'Slack'), ('S2F', 'Slack to Flood'), ('S2E', 'Slack to Ebb'), } SURFACE_CHOP_CHOICES =", "if self.stereo else '') class Meta: verbose_name_plural = \"Equipment\" ordering = ['frame_type__type', 'container__type',", "visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES)", "= models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True, help_text='centimeters') camera_height", "on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video = models.OneToOneField(", "need some form changes here ... bait = models.ForeignKey(Bait, null=True) equipment = models.ForeignKey(Equipment)", "**kwargs): # todo: we're assuming the input is latitude & longitude! this should", "BAIT_TYPE_CHOICES = { ('CHP', 'Chopped'), ('CRS', 'Crushed'), ('WHL', 'Whole'), } VISIBILITY_CHOICES = {", "'benthic categories' class Set(AuditableModel): # suggested code pattern: # [site.code][reef.code]_[set number within reef]", "return self.trip.get_next_set_by_code(self.code) def save(self, *args, **kwargs): # todo: we're assuming the input is", "db? EQUIPMENT_BAIT_CONTAINER = { ('B', 'Bag'), ('C', 'Cage'), } CURRENT_DIRECTION = { ('N',", "reverse('set_update', args=[str(self.id)]) def observations(self): if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type): server_env =", "blank=True, choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts') wind_direction", "# 1) complete annotations have been promoted into a master # 2) a", "blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self): return u'{0} {1}'.format('Env measure for',str(self.set)) class Bait(AuditableModel): description =", "validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True, blank=True) message_to_annotators = models.TextField(null=True, blank=True) tags = models.ManyToManyField(to=SetTag) current_flow_estimated", "blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields', null=True) # todo: need some form changes here", "have been promoted into a master # 2) a master annotation record has", "[cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)] return [(node.pk, node.name) for node in flatten(nodes)]", "Video, on_delete=models.CASCADE, null=True, related_name='set' ) bulk_loaded = models.BooleanField(default=False) class Meta: unique_together = ('trip',", "for \"completion\": # 1) complete annotations have been promoted into a master #", "benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new fields substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12)", "models.ForeignKey(ReefHabitat, blank=True) trip = models.ForeignKey(Trip) drop_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure", "get_choices(cls, node=None): if node is None: nodes = [cls.get_choices(node=node) for node in cls.objects.filter(parent=None,", "not self.code: # set code if it hasn't been set self.code = u'{}{}_xxx'.format(self.reef().site.code,", "node.name) for node in flatten(nodes)] elif node.is_leaf_node(): return node else: return [node] +", "substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES)", "current flow (either) # 3) substrate # 4) substrate complexity return bool(self.visibility and", "2) current flow (either) # 3) substrate # 4) substrate complexity return bool(self.visibility", "return status_list def required_fields(self): # need to make this data-driven, not hard-coded field", "have this somehow (!!!) def flatten(x): if type(x) is list: return [a for", "db_index=True, help_text='[site + reef code]_xxx', null=True, blank=True) set_date = models.DateField() coordinates = models.PointField(null=True)", "input is latitude & longitude! this should be checked! self.coordinates = Point(float(self.longitude), float(self.latitude))", "decimal_places=1, help_text='C') # C salinity = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2, help_text='ppt') # ppt", "'Bag'), ('C', 'Cage'), } CURRENT_DIRECTION = { ('N', 'North'), ('NE', 'Northeast'), ('E', 'East'),", "return u\"{0}\".format(self.name) @classmethod def get_choices(cls, node=None): if node is None: nodes = [cls.get_choices(node=node)", "import Point from global_finprint.annotation.models.observation import Observation, MasterRecord from global_finprint.annotation.models.video import Video, Assignment from", "import Observation, MasterRecord from global_finprint.annotation.models.video import Video, Assignment from global_finprint.core.version import VersionInfo from", "null=True, related_name='drop_parent_set') haul_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video = models.OneToOneField( Video,", "image_type) # todo: \"property-ize\" this? def master(self, project=1): try: return MasterRecord.objects.get(set=self, project_id=project) except", "or self.current_flow_instrumented)) def completed(self): # we consider the following for \"completion\": # 1)", "= models.IntegerField(null=True, blank=True, help_text='%') # percentage surface_chop = models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def", "Assignment from global_finprint.core.version import VersionInfo from global_finprint.core.models import AuditableModel from global_finprint.trip.models import Trip", "= models.TextField(null=True, blank=True) message_to_annotators = models.TextField(null=True, blank=True) tags = models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50,", "# todo: move some of these out ot the db? EQUIPMENT_BAIT_CONTAINER = {", "float(self.latitude)) if not self.code: # set code if it hasn't been set self.code", "in cls.objects.filter(parent=None, active=True)] return [(node.pk, node.name) for node in flatten(nodes)] elif node.is_leaf_node(): return", "from global_finprint.core.models import AuditableModel from global_finprint.trip.models import Trip from global_finprint.habitat.models import ReefHabitat, Substrate,", "decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True, blank=True) message_to_annotators = models.TextField(null=True, blank=True) tags =", "from collections import Counter from django.contrib.gis.db import models from django.core.validators import MinValueValidator, MaxValueValidator", "} VISIBILITY_CHOICES = { ('V0-2', 'V0-2'), ('V2-4', 'V2-4'), ('V4-6', 'V4-6'), ('V6-8', 'V6-8'), ('V8-10',", "is list: return [a for i in x for a in flatten(i)] else:", "haul_time = models.TimeField(null=True, blank=True) depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True,", "active=True)] return [(node.pk, node.name) for node in flatten(nodes)] elif node.is_leaf_node(): return node else:", "str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs) def reef(self): return self.reef_habitat.reef", "# 1) visibility # 2) current flow (either) # 3) substrate # 4)", "return bool(self.visibility and (self.current_flow_estimated or self.current_flow_instrumented)) def completed(self): # we consider the following", "models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1, help_text='C') # C salinity = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2,", "= models.CharField(max_length=32, help_text='1kg') type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False, help_text='20ml menhaden oil')", "sum(status_list.values()) return status_list def required_fields(self): # need to make this data-driven, not hard-coded", "Trip from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity from mptt.models import MPTTModel, TreeForeignKey from", "rebar, stainless rebar, PVC, mixed type = models.CharField(max_length=32) image = models.ImageField(null=True, blank=True) def", "current_flow = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 current_direction = models.CharField(max_length=2,", "help_text='m/s') # m/s .00 current_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') #", "i in x for a in flatten(i)] else: return [x] class SetTag(MPTTModel): name", "blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) @classmethod", "self.reef_habitat.reef def get_absolute_url(self): return reverse('set_update', args=[str(self.id)]) def observations(self): if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def", "('NW', 'Northwest'), } TIDE_CHOICES = { ('F', 'Flood'), ('E', 'Ebb'), ('S', 'Slack'), ('S2F',", "= self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs) def reef(self): return self.reef_habitat.reef def get_absolute_url(self): return", "models.IntegerField(null=True, blank=True, help_text='%') # percentage surface_chop = models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self):", "def __str__(self): return u\"{0}\".format(self.name) class Meta: verbose_name_plural = 'benthic categories' class Set(AuditableModel): #", "= { ('CHP', 'Chopped'), ('CRS', 'Crushed'), ('WHL', 'Whole'), } VISIBILITY_CHOICES = { ('V0-2',", "except MasterRecord.DoesNotExist: return None def assignment_counts(self, project=1): status_list = {'Total': 0} if self.video:", "help_text='%') # percentage surface_chop = models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self): return u'{0}", "project=1): try: return MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist: return None def assignment_counts(self, project=1): status_list", "= \"Equipment\" ordering = ['frame_type__type', 'container__type', 'camera'] class EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True, blank=True,", "description = models.CharField(max_length=32, help_text='1kg') type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False, help_text='20ml menhaden", "__str__(self): return u\"{0}\".format(self.type) class FrameType(models.Model): # starting seed: rebar, stainless rebar, PVC, mixed", "models.ForeignKey(Bait, null=True) equipment = models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat, blank=True) trip = models.ForeignKey(Trip) drop_measure", "be checked! self.coordinates = Point(float(self.longitude), float(self.latitude)) if not self.code: # set code if", "the following for \"completion\": # 1) complete annotations have been promoted into a", "server_env = VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type) # todo: \"property-ize\" this? def", "within reef] code = models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx', null=True, blank=True) set_date", "make this data-driven, not hard-coded field choices # currently required: # 1) visibility", "u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model): set = models.ForeignKey(Set) benthic_category = TreeForeignKey(BenthicCategory) value = models.IntegerField()", "= { ('L', 'Light'), ('M', 'Medium'), ('H', 'Heavy'), } BAIT_TYPE_CHOICES = { ('CHP',", "models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self): return u\"{0} / {1} / {2}{3}\".format(self.frame_type.type, self.container.type, self.camera, '", "python doesn't have this somehow (!!!) def flatten(x): if type(x) is list: return", ".00 current_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass", "'camera'] class EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1, help_text='C') # C salinity", "camera = models.CharField(max_length=32) stereo = models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer) arm_length", "type(x) is list: return [a for i in x for a in flatten(i)]", "max_digits=4, decimal_places=1, help_text='C') # C salinity = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2, help_text='ppt') #", "u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args, **kwargs) self.refresh_from_db() if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id =", "self.description, ' (m)' if self.oiled else '') class Meta: unique_together = ('description', 'type',", "# C salinity = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2, help_text='ppt') # ppt .0 conductivity", "('N', 'North'), ('NE', 'Northeast'), ('E', 'East'), ('SE', 'Southeast'), ('S', 'South'), ('SW', 'Southwest'), ('W',", "parent is inactive') parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by", "u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs)", "self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs) def reef(self): return self.reef_habitat.reef def get_absolute_url(self): return reverse('set_update',", "VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type) # todo: \"property-ize\" this? def master(self, project=1):", "SetTag(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active = models.BooleanField( default=True,", "master # 2) a master annotation record has been completed # 3) other", "('trip', 'code') @property def environmentmeasure_set(self): return [x for x in [self.haul_measure, self.drop_measure] if", "{ ('B', 'Bag'), ('C', 'Cage'), } CURRENT_DIRECTION = { ('N', 'North'), ('NE', 'Northeast'),", "['frame_type__type', 'container__type', 'camera'] class EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1, help_text='C') #", "models.BooleanField( default=True, help_text='overridden if parent is inactive') parent = TreeForeignKey('self', null=True, blank=True, related_name='children',", "= models.TimeField(null=True, blank=True) depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True, blank=True)", "current_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass tide_state", "node is None: nodes = [cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)] return [(node.pk,", "substrate complexity return bool(self.visibility and (self.current_flow_estimated or self.current_flow_instrumented)) def completed(self): # we consider", "node in cls.objects.filter(parent=None, active=True)] return [(node.pk, node.name) for node in flatten(nodes)] elif node.is_leaf_node():", "'V10+') } FIELD_OF_VIEW_CHOICES = { ('FU', 'Facing Up'), ('FD', 'Facing Down'), ('L', 'Limited'),", "decimal_places=2, help_text='S/m') # S/m .00 dissolved_oxygen = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1) current_flow =", "self.oiled else '') class Meta: unique_together = ('description', 'type', 'oiled') # needed for", "models.CharField(max_length=200, null=True, blank=True) benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new fields substrate_relief_mean = models.DecimalField(null=True,", "completed(self): # we consider the following for \"completion\": # 1) complete annotations have", "annotation record has been completed # 3) other 'required' fields have been completed", ".00 bruv_image_url = models.CharField(max_length=200, null=True, blank=True) splendor_image_url = models.CharField(max_length=200, null=True, blank=True) benthic_category =", "class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) class Meta: verbose_name_plural =", "class Meta: unique_together = ('description', 'type', 'oiled') # needed for SetTag#get_choices because python", "('SW', 'Southwest'), ('W', 'West'), ('NW', 'Northwest'), } TIDE_CHOICES = { ('F', 'Flood'), ('E',", "direction') # eight point compass cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') # percentage surface_chop", "set_date = models.DateField() coordinates = models.PointField(null=True) latitude = models.DecimalField(max_digits=12, decimal_places=8) longitude = models.DecimalField(max_digits=12,", "None: nodes = [cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)] return [(node.pk, node.name) for", "a in flatten(i)] else: return [x] class SetTag(MPTTModel): name = models.CharField(max_length=50, unique=True) description", "else '') class Meta: unique_together = ('description', 'type', 'oiled') # needed for SetTag#get_choices", "= models.IntegerField(null=True, blank=True, help_text='kts') wind_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') #", "models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass tide_state = models.CharField(max_length=3,", "measure for',str(self.set)) class Bait(AuditableModel): description = models.CharField(max_length=32, help_text='1kg') type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled", "models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2, help_text='ppt') # ppt .0 conductivity = models.DecimalField(null=True, blank=True, max_digits=8,", "'Slack to Flood'), ('S2E', 'Slack to Ebb'), } SURFACE_CHOP_CHOICES = { ('L', 'Light'),", "= models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False, help_text='20ml menhaden oil') def __str__(self): return u'{0}", "('W', 'West'), ('NW', 'Northwest'), } TIDE_CHOICES = { ('F', 'Flood'), ('E', 'Ebb'), ('S',", "flatten(i)] else: return [x] class SetTag(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True,", "# suggested code pattern: # [site.code][reef.code]_[set number within reef] code = models.CharField(max_length=32, db_index=True,", "'V2-4'), ('V4-6', 'V4-6'), ('V6-8', 'V6-8'), ('V8-10', 'V8-10'), ('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES = {", "= str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs) def reef(self): return", "data-driven, not hard-coded field choices # currently required: # 1) visibility # 2)", "type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False, help_text='20ml menhaden oil') def __str__(self): return", "habitat_filename(self, image_type): server_env = VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type) # todo: \"property-ize\"", "pattern: # [site.code][reef.code]_[set number within reef] code = models.CharField(max_length=32, db_index=True, help_text='[site + reef", "= ['frame_type__type', 'container__type', 'camera'] class EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1, help_text='C')", "blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 bruv_image_url = models.CharField(max_length=200, null=True, blank=True) splendor_image_url", "field choices # currently required: # 1) visibility # 2) current flow (either)", "Counter from django.contrib.gis.db import models from django.core.validators import MinValueValidator, MaxValueValidator from django.core.urlresolvers import", "return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type): server_env = VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type)", "__str__(self): return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled else '') class Meta:", "<filename>global_finprint/bruv/models.py from decimal import Decimal from collections import Counter from django.contrib.gis.db import models", "unique=True) description = models.TextField(null=True, blank=True) active = models.BooleanField( default=True, help_text='overridden if parent is", "= ['name'] def __str__(self): return u\"{0}\".format(self.name) class Meta: verbose_name_plural = 'benthic categories' class", "flow (either) # 3) substrate # 4) substrate complexity return bool(self.visibility and (self.current_flow_estimated", "return self.reef_habitat.reef def get_absolute_url(self): return reverse('set_update', args=[str(self.id)]) def observations(self): if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all())", "if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type): server_env = VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code,", "models.OneToOneField( Video, on_delete=models.CASCADE, null=True, related_name='set' ) bulk_loaded = models.BooleanField(default=False) class Meta: unique_together =", "completed (see above) master = self.master() return master \\ and (master.status.is_finished) \\ and", "('L', 'Limited'), ('O', 'Open') } class BaitContainer(models.Model): # starting seed: cage, bag type", "return u\"{0} / {1} / {2}{3}\".format(self.frame_type.type, self.container.type, self.camera, ' (Stereo)' if self.stereo else", "Ebb'), } SURFACE_CHOP_CHOICES = { ('L', 'Light'), ('M', 'Medium'), ('H', 'Heavy'), } BAIT_TYPE_CHOICES", "'North'), ('NE', 'Northeast'), ('E', 'East'), ('SE', 'Southeast'), ('S', 'South'), ('SW', 'Southwest'), ('W', 'West'),", "frame_type = models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True, help_text='centimeters') camera_height = models.PositiveIntegerField(null=True,", "max_digits=12) visibility = models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10, null=True, blank=True,", "= models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video = models.OneToOneField( Video, on_delete=models.CASCADE, null=True, related_name='set'", "eight point compass cloud_cover = models.IntegerField(null=True, blank=True, help_text='%') # percentage surface_chop = models.CharField(max_length=1,", "MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist: return None def assignment_counts(self, project=1): status_list = {'Total': 0}", "= models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True, help_text='centimeters') camera_height = models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self): return", "related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) @classmethod def", "= models.ForeignKey(Trip) drop_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure = models.OneToOneField( EnvironmentMeasure,", "('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES = { ('FU', 'Facing Up'), ('FD', 'Facing Down'), ('L',", "null=True, related_name='set' ) bulk_loaded = models.BooleanField(default=False) class Meta: unique_together = ('trip', 'code') @property", "0} if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total'] = sum(status_list.values()) return status_list def", "u\"{0}\".format(self.type) class FrameType(models.Model): # starting seed: rebar, stainless rebar, PVC, mixed type =", "else: return [node] + [cls.get_choices(node=node) for node in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name =", "Meta: unique_together = ('trip', 'code') @property def environmentmeasure_set(self): return [x for x in", "models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1) current_flow = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s", "measured_wind_speed = models.IntegerField(null=True, blank=True, help_text='kts') wind_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction')", "menhaden oil') def __str__(self): return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled else", "1) complete annotations have been promoted into a master # 2) a master", "blank=True, help_text='kts') wind_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point", "class Equipment(AuditableModel): camera = models.CharField(max_length=32) stereo = models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType) container =", "django.core.urlresolvers import reverse from django.contrib.gis.geos import Point from global_finprint.annotation.models.observation import Observation, MasterRecord from", "custom_field_value = JSONField(db_column='custom_fields', null=True) # todo: need some form changes here ... bait", "help_text='ppt') # ppt .0 conductivity = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2, help_text='S/m') # S/m", "= models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L') current_flow_instrumented = models.DecimalField(null=True,", "inactive') parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['name']", "= models.DecimalField(max_digits=12, decimal_places=8) drop_time = models.TimeField() haul_date = models.DateField(null=True, blank=True) haul_time = models.TimeField(null=True,", "starting seed: rebar, stainless rebar, PVC, mixed type = models.CharField(max_length=32) image = models.ImageField(null=True,", "('V8-10', 'V8-10'), ('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES = { ('FU', 'Facing Up'), ('FD', 'Facing", "to Flood'), ('S2E', 'Slack to Ebb'), } SURFACE_CHOP_CHOICES = { ('L', 'Light'), ('M',", "'V6-8'), ('V8-10', 'V8-10'), ('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES = { ('FU', 'Facing Up'), ('FD',", "type = models.CharField(max_length=32) def __str__(self): return u\"{0}\".format(self.type) class FrameType(models.Model): # starting seed: rebar,", "bait = models.ForeignKey(Bait, null=True) equipment = models.ForeignKey(Equipment) reef_habitat = models.ForeignKey(ReefHabitat, blank=True) trip =", "= { ('F', 'Flood'), ('E', 'Ebb'), ('S', 'Slack'), ('S2F', 'Slack to Flood'), ('S2E',", "('FD', 'Facing Down'), ('L', 'Limited'), ('O', 'Open') } class BaitContainer(models.Model): # starting seed:", "= models.CharField(max_length=32) def __str__(self): return u\"{0}\".format(self.type) class FrameType(models.Model): # starting seed: rebar, stainless", "{ ('L', 'Light'), ('M', 'Medium'), ('H', 'Heavy'), } BAIT_TYPE_CHOICES = { ('CHP', 'Chopped'),", "video = models.OneToOneField( Video, on_delete=models.CASCADE, null=True, related_name='set' ) bulk_loaded = models.BooleanField(default=False) class Meta:", "def __str__(self): return u\"{0} / {1} / {2}{3}\".format(self.frame_type.type, self.container.type, self.camera, ' (Stereo)' if", "has been completed # 3) other 'required' fields have been completed (see above)", "' (m)' if self.oiled else '') class Meta: unique_together = ('description', 'type', 'oiled')", "blank=True, choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields', null=True) #", "node=None): if node is None: nodes = [cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)]", "= u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args, **kwargs) self.refresh_from_db() if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id", "('F', 'Flood'), ('E', 'Ebb'), ('S', 'Slack'), ('S2F', 'Slack to Flood'), ('S2E', 'Slack to", "= TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self):", "longitude! this should be checked! self.coordinates = Point(float(self.longitude), float(self.latitude)) if not self.code: #", "help_text='compass direction') # eight point compass tide_state = models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed", "for',str(self.set)) class Bait(AuditableModel): description = models.CharField(max_length=32, help_text='1kg') type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled =", "@property def environmentmeasure_set(self): return [x for x in [self.haul_measure, self.drop_measure] if x is", "current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L') current_flow_instrumented = models.DecimalField(null=True, blank=True, max_digits=5,", "{ ('N', 'North'), ('NE', 'Northeast'), ('E', 'East'), ('SE', 'Southeast'), ('S', 'South'), ('SW', 'Southwest'),", "models.TextField(null=True, blank=True) active = models.BooleanField( default=True, help_text='overridden if parent is inactive') parent =", "models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx', null=True, blank=True) set_date = models.DateField() coordinates =", "max_digits=4, decimal_places=2, help_text='ppt') # ppt .0 conductivity = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2, help_text='S/m')", "= models.CharField(max_length=200, null=True, blank=True) splendor_image_url = models.CharField(max_length=200, null=True, blank=True) benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue')", "code if it hasn't been set self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args, **kwargs)", "master \\ and (master.status.is_finished) \\ and self.required_fields() def __str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code) class", "decimal import Decimal from collections import Counter from django.contrib.gis.db import models from django.core.validators", "m/s .00 current_direction = models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point", "{1}'.format('Env measure for',str(self.set)) class Bait(AuditableModel): description = models.CharField(max_length=32, help_text='1kg') type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES)", "decimal_places=4, max_digits=12) substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) visibility = models.CharField(db_column='visibility_str', max_length=10, null=True,", "__str__(self): return u\"{0}\".format(self.type) class Equipment(AuditableModel): camera = models.CharField(max_length=32) stereo = models.BooleanField(default=False) frame_type =", "models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active = models.BooleanField( default=True, help_text='overridden if parent", "\\ and (master.status.is_finished) \\ and self.required_fields() def __str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model):", "models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video", "help_text='centimeters') def __str__(self): return u\"{0} / {1} / {2}{3}\".format(self.frame_type.type, self.container.type, self.camera, ' (Stereo)'", "'Slack'), ('S2F', 'Slack to Flood'), ('S2E', 'Slack to Ebb'), } SURFACE_CHOP_CHOICES = {", "def assignment_counts(self, project=1): status_list = {'Total': 0} if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True)))", "u\"{0}\".format(self.name) @classmethod def get_choices(cls, node=None): if node is None: nodes = [cls.get_choices(node=node) for", "move some of these out ot the db? EQUIPMENT_BAIT_CONTAINER = { ('B', 'Bag'),", "max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True, blank=True) message_to_annotators = models.TextField(null=True, blank=True) tags = models.ManyToManyField(to=SetTag)", "django.contrib.gis.db import models from django.core.validators import MinValueValidator, MaxValueValidator from django.core.urlresolvers import reverse from", "('V2-4', 'V2-4'), ('V4-6', 'V4-6'), ('V6-8', 'V6-8'), ('V8-10', 'V8-10'), ('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES =", "global_finprint.trip.models import Trip from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity from mptt.models import MPTTModel,", "models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType) container = models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True, help_text='centimeters') camera_height =", "blank=True) tags = models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L') current_flow_instrumented", "null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields', null=True) # todo: need some form changes", "def environmentmeasure_set(self): return [x for x in [self.haul_measure, self.drop_measure] if x is not", "in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active", "= models.BooleanField(default=False) class Meta: unique_together = ('trip', 'code') @property def environmentmeasure_set(self): return [x", "return MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist: return None def assignment_counts(self, project=1): status_list = {'Total':", "= models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2, help_text='ppt') # ppt .0 conductivity = models.DecimalField(null=True, blank=True,", "current_flow_instrumented = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 bruv_image_url = models.CharField(max_length=200,", "args=[str(self.id)]) def observations(self): if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type): server_env = VersionInfo.get_server_env()", "active = models.BooleanField( default=True, help_text='overridden if parent is inactive') parent = TreeForeignKey('self', null=True,", "promoted into a master # 2) a master annotation record has been completed", "== u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx', u'_{}'.format(next_id)) super(Set, self).save(*args,", "= { ('V0-2', 'V0-2'), ('V2-4', 'V2-4'), ('V4-6', 'V4-6'), ('V6-8', 'V6-8'), ('V8-10', 'V8-10'), ('V10+',", "= { ('B', 'Bag'), ('C', 'Cage'), } CURRENT_DIRECTION = { ('N', 'North'), ('NE',", "} CURRENT_DIRECTION = { ('N', 'North'), ('NE', 'Northeast'), ('E', 'East'), ('SE', 'Southeast'), ('S',", "# todo: we're assuming the input is latitude & longitude! this should be", "into a master # 2) a master annotation record has been completed #", "# percentage surface_chop = models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self): return u'{0} {1}'.format('Env", "from django.contrib.gis.db import models from django.core.validators import MinValueValidator, MaxValueValidator from django.core.urlresolvers import reverse", "('E', 'East'), ('SE', 'Southeast'), ('S', 'South'), ('SW', 'Southwest'), ('W', 'West'), ('NW', 'Northwest'), }", "flatten(x): if type(x) is list: return [a for i in x for a", "def habitat_filename(self, image_type): server_env = VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type) # todo:", "tide_state = models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed =", "= ('trip', 'code') @property def environmentmeasure_set(self): return [x for x in [self.haul_measure, self.drop_measure]", "= sum(status_list.values()) return status_list def required_fields(self): # need to make this data-driven, not", "= models.TextField(null=True, blank=True) active = models.BooleanField( default=True, help_text='overridden if parent is inactive') parent", "max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields',", "latitude = models.DecimalField(max_digits=12, decimal_places=8) longitude = models.DecimalField(max_digits=12, decimal_places=8) drop_time = models.TimeField() haul_date =", "('V0-2', 'V0-2'), ('V2-4', 'V2-4'), ('V4-6', 'V4-6'), ('V6-8', 'V6-8'), ('V8-10', 'V8-10'), ('V10+', 'V10+') }", "models.CharField(max_length=200, null=True, blank=True) splendor_image_url = models.CharField(max_length=200, null=True, blank=True) benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') #", "currently required: # 1) visibility # 2) current flow (either) # 3) substrate", "/ {2}{3}\".format(self.frame_type.type, self.container.type, self.camera, ' (Stereo)' if self.stereo else '') class Meta: verbose_name_plural", "this data-driven, not hard-coded field choices # currently required: # 1) visibility #", "} class BaitContainer(models.Model): # starting seed: cage, bag type = models.CharField(max_length=32) def __str__(self):", "{ ('V0-2', 'V0-2'), ('V2-4', 'V2-4'), ('V4-6', 'V4-6'), ('V6-8', 'V6-8'), ('V8-10', 'V8-10'), ('V10+', 'V10+')", "'Heavy'), } BAIT_TYPE_CHOICES = { ('CHP', 'Chopped'), ('CRS', 'Crushed'), ('WHL', 'Whole'), } VISIBILITY_CHOICES", "def __str__(self): return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled else '') class", "assuming the input is latitude & longitude! this should be checked! self.coordinates =", "class Meta: verbose_name_plural = 'benthic categories' class Set(AuditableModel): # suggested code pattern: #", "collections import Counter from django.contrib.gis.db import models from django.core.validators import MinValueValidator, MaxValueValidator from", "= { ('N', 'North'), ('NE', 'Northeast'), ('E', 'East'), ('SE', 'Southeast'), ('S', 'South'), ('SW',", "in x for a in flatten(i)] else: return [x] class SetTag(MPTTModel): name =", "def save(self, *args, **kwargs): # todo: we're assuming the input is latitude &", "[site.code][reef.code]_[set number within reef] code = models.CharField(max_length=32, db_index=True, help_text='[site + reef code]_xxx', null=True,", "haul_date = models.DateField(null=True, blank=True) haul_time = models.TimeField(null=True, blank=True) depth = models.DecimalField(help_text='m', decimal_places=2, max_digits=12,", "class FrameType(models.Model): # starting seed: rebar, stainless rebar, PVC, mixed type = models.CharField(max_length=32)", "= models.ImageField(null=True, blank=True) def __str__(self): return u\"{0}\".format(self.type) class Equipment(AuditableModel): camera = models.CharField(max_length=32) stereo", "mixed type = models.CharField(max_length=32) image = models.ImageField(null=True, blank=True) def __str__(self): return u\"{0}\".format(self.type) class", "drop_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True,", "& longitude! this should be checked! self.coordinates = Point(float(self.longitude), float(self.latitude)) if not self.code:", "= models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1) current_flow = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') #", "super(Set, self).save(*args, **kwargs) self.refresh_from_db() if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3)", "u\"{0}\".format(self.type) class Equipment(AuditableModel): camera = models.CharField(max_length=32) stereo = models.BooleanField(default=False) frame_type = models.ForeignKey(to=FrameType) container", "+ reef code]_xxx', null=True, blank=True) set_date = models.DateField() coordinates = models.PointField(null=True) latitude =", "def flatten(x): if type(x) is list: return [a for i in x for", "help_text='S/m') # S/m .00 dissolved_oxygen = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1) current_flow = models.DecimalField(null=True,", "# needed for SetTag#get_choices because python doesn't have this somehow (!!!) def flatten(x):", "surface_chop = models.CharField(max_length=1, null=True, blank=True, choices=SURFACE_CHOP_CHOICES) def __str__(self): return u'{0} {1}'.format('Env measure for',str(self.set))", "self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args, **kwargs) self.refresh_from_db() if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code):", "choices=SURFACE_CHOP_CHOICES) def __str__(self): return u'{0} {1}'.format('Env measure for',str(self.set)) class Bait(AuditableModel): description = models.CharField(max_length=32,", "global_finprint.core.models import AuditableModel from global_finprint.trip.models import Trip from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity", "and (self.current_flow_estimated or self.current_flow_instrumented)) def completed(self): # we consider the following for \"completion\":", "ReefHabitat, Substrate, SubstrateComplexity from mptt.models import MPTTModel, TreeForeignKey from django.contrib.postgres.fields import ArrayField, JSONField", "'V8-10'), ('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES = { ('FU', 'Facing Up'), ('FD', 'Facing Down'),", "node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active =", "verbose_name_plural = 'benthic categories' class Set(AuditableModel): # suggested code pattern: # [site.code][reef.code]_[set number", "x in [self.haul_measure, self.drop_measure] if x is not None] @property def next_by_code(self): return", "and (master.status.is_finished) \\ and self.required_fields() def __str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model): set", "= models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass tide_state =", "the input is latitude & longitude! this should be checked! self.coordinates = Point(float(self.longitude),", "'Chopped'), ('CRS', 'Crushed'), ('WHL', 'Whole'), } VISIBILITY_CHOICES = { ('V0-2', 'V0-2'), ('V2-4', 'V2-4'),", "u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled else '') class Meta: unique_together =", "models.CharField(max_length=32, help_text='1kg') type = models.CharField(max_length=3, choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False, help_text='20ml menhaden oil') def", "self.refresh_from_db() if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code = self.code.replace('_xxx',", "choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields', null=True) # todo:", "blank=True, max_digits=4, decimal_places=1, help_text='C') # C salinity = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2, help_text='ppt')", "JSONField # todo: move some of these out ot the db? EQUIPMENT_BAIT_CONTAINER =", "import reverse from django.contrib.gis.geos import Point from global_finprint.annotation.models.observation import Observation, MasterRecord from global_finprint.annotation.models.video", "'South'), ('SW', 'Southwest'), ('W', 'West'), ('NW', 'Northwest'), } TIDE_CHOICES = { ('F', 'Flood'),", "= models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self): return u\"{0} / {1} / {2}{3}\".format(self.frame_type.type, self.container.type, self.camera,", "= ['name'] def __str__(self): return u\"{0}\".format(self.name) @classmethod def get_choices(cls, node=None): if node is", "null=True, blank=True, help_text='H, M, L') current_flow_instrumented = models.DecimalField(null=True, blank=True, max_digits=5, decimal_places=2, help_text='m/s') #", "models.DateField() coordinates = models.PointField(null=True) latitude = models.DecimalField(max_digits=12, decimal_places=8) longitude = models.DecimalField(max_digits=12, decimal_places=8) drop_time", "db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) class Meta: verbose_name_plural", "help_text='centimeters') camera_height = models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self): return u\"{0} / {1} / {2}{3}\".format(self.frame_type.type,", "[x] class SetTag(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True, blank=True) active =", "todo: \"property-ize\" this? def master(self, project=1): try: return MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist: return", "if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total'] = sum(status_list.values()) return status_list def required_fields(self):", "{2}{3}\".format(self.frame_type.type, self.container.type, self.camera, ' (Stereo)' if self.stereo else '') class Meta: verbose_name_plural =", "for node in flatten(nodes)] elif node.is_leaf_node(): return node else: return [node] + [cls.get_choices(node=node)", "Point(float(self.longitude), float(self.latitude)) if not self.code: # set code if it hasn't been set", "= models.DecimalField(max_digits=12, decimal_places=8) longitude = models.DecimalField(max_digits=12, decimal_places=8) drop_time = models.TimeField() haul_date = models.DateField(null=True,", "('C', 'Cage'), } CURRENT_DIRECTION = { ('N', 'North'), ('NE', 'Northeast'), ('E', 'East'), ('SE',", "container = models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True, help_text='centimeters') camera_height = models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self):", "blank=True, max_digits=4, decimal_places=2, help_text='ppt') # ppt .0 conductivity = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2,", "null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass cloud_cover = models.IntegerField(null=True, blank=True,", "def completed(self): # we consider the following for \"completion\": # 1) complete annotations", "return u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model): set = models.ForeignKey(Set) benthic_category = TreeForeignKey(BenthicCategory) value =", "'') class Meta: verbose_name_plural = \"Equipment\" ordering = ['frame_type__type', 'container__type', 'camera'] class EnvironmentMeasure(AuditableModel):", "seed: cage, bag type = models.CharField(max_length=32) def __str__(self): return u\"{0}\".format(self.type) class FrameType(models.Model): #", "following for \"completion\": # 1) complete annotations have been promoted into a master", "and self.required_fields() def __str__(self): return u\"{0}_{1}\".format(self.trip.code, self.code) class BenthicCategoryValue(models.Model): set = models.ForeignKey(Set) benthic_category", "related_name='children', db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return u\"{0}\".format(self.name) class Meta:", "= models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set')", "('WHL', 'Whole'), } VISIBILITY_CHOICES = { ('V0-2', 'V0-2'), ('V2-4', 'V2-4'), ('V4-6', 'V4-6'), ('V6-8',", "compass tide_state = models.CharField(max_length=3, null=True, blank=True, choices=TIDE_CHOICES) estimated_wind_speed = models.IntegerField(null=True, blank=True, help_text='Beaufort') measured_wind_speed", "MasterRecord from global_finprint.annotation.models.video import Video, Assignment from global_finprint.core.version import VersionInfo from global_finprint.core.models import", "models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50, null=True, blank=True, help_text='H, M, L') current_flow_instrumented = models.DecimalField(null=True, blank=True,", "master(self, project=1): try: return MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist: return None def assignment_counts(self, project=1):", "# we consider the following for \"completion\": # 1) complete annotations have been", "('M', 'Medium'), ('H', 'Heavy'), } BAIT_TYPE_CHOICES = { ('CHP', 'Chopped'), ('CRS', 'Crushed'), ('WHL',", "null=True) # todo: need some form changes here ... bait = models.ForeignKey(Bait, null=True)", "bulk_loaded = models.BooleanField(default=False) class Meta: unique_together = ('trip', 'code') @property def environmentmeasure_set(self): return", "= models.BooleanField(default=False, help_text='20ml menhaden oil') def __str__(self): return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)'", "from global_finprint.annotation.models.video import Video, Assignment from global_finprint.core.version import VersionInfo from global_finprint.core.models import AuditableModel", "from global_finprint.trip.models import Trip from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity from mptt.models import", "self.camera, ' (Stereo)' if self.stereo else '') class Meta: verbose_name_plural = \"Equipment\" ordering", "[a for i in x for a in flatten(i)] else: return [x] class", "**kwargs) self.refresh_from_db() if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code =", "(either) # 3) substrate # 4) substrate complexity return bool(self.visibility and (self.current_flow_estimated or", "needed for SetTag#get_choices because python doesn't have this somehow (!!!) def flatten(x): if", "= VersionInfo.get_server_env() return '/{0}/{1}/{2}/{3}.png'.format(server_env, self.trip.code, self.code, image_type) # todo: \"property-ize\" this? def master(self,", "x for a in flatten(i)] else: return [x] class SetTag(MPTTModel): name = models.CharField(max_length=50,", "image = models.ImageField(null=True, blank=True) def __str__(self): return u\"{0}\".format(self.type) class Equipment(AuditableModel): camera = models.CharField(max_length=32)", "cage, bag type = models.CharField(max_length=32) def __str__(self): return u\"{0}\".format(self.type) class FrameType(models.Model): # starting", "form changes here ... bait = models.ForeignKey(Bait, null=True) equipment = models.ForeignKey(Equipment) reef_habitat =", "todo: we're assuming the input is latitude & longitude! this should be checked!", "('L', 'Light'), ('M', 'Medium'), ('H', 'Heavy'), } BAIT_TYPE_CHOICES = { ('CHP', 'Chopped'), ('CRS',", "choices=BAIT_TYPE_CHOICES) oiled = models.BooleanField(default=False, help_text='20ml menhaden oil') def __str__(self): return u'{0} {1}{2}'.format(self.get_type_display(), self.description,", "blank=True) trip = models.ForeignKey(Trip) drop_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure =", "'Facing Down'), ('L', 'Limited'), ('O', 'Open') } class BaitContainer(models.Model): # starting seed: cage,", "for x in [self.haul_measure, self.drop_measure] if x is not None] @property def next_by_code(self):", "('CRS', 'Crushed'), ('WHL', 'Whole'), } VISIBILITY_CHOICES = { ('V0-2', 'V0-2'), ('V2-4', 'V2-4'), ('V4-6',", "if node is None: nodes = [cls.get_choices(node=node) for node in cls.objects.filter(parent=None, active=True)] return", "= models.DecimalField(help_text='m', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))]) comments = models.TextField(null=True, blank=True) message_to_annotators = models.TextField(null=True, blank=True)", "models.CharField(max_length=32) def __str__(self): return u\"{0}\".format(self.type) class FrameType(models.Model): # starting seed: rebar, stainless rebar,", "suggested code pattern: # [site.code][reef.code]_[set number within reef] code = models.CharField(max_length=32, db_index=True, help_text='[site", "models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='haul_parent_set') video = models.OneToOneField( Video, on_delete=models.CASCADE, null=True, related_name='set' )", "description = models.TextField(null=True, blank=True) active = models.BooleanField( default=True, help_text='overridden if parent is inactive')", "m/s .00 bruv_image_url = models.CharField(max_length=200, null=True, blank=True) splendor_image_url = models.CharField(max_length=200, null=True, blank=True) benthic_category", "models.TextField(null=True, blank=True) message_to_annotators = models.TextField(null=True, blank=True) tags = models.ManyToManyField(to=SetTag) current_flow_estimated = models.CharField(max_length=50, null=True,", "MPTTModel, TreeForeignKey from django.contrib.postgres.fields import ArrayField, JSONField # todo: move some of these", "'code') @property def environmentmeasure_set(self): return [x for x in [self.haul_measure, self.drop_measure] if x", "models.ForeignKey(to=BaitContainer) arm_length = models.PositiveIntegerField(null=True, help_text='centimeters') camera_height = models.PositiveIntegerField(null=True, help_text='centimeters') def __str__(self): return u\"{0}", "= models.CharField(max_length=200, null=True, blank=True) benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new fields substrate_relief_mean =", "import Counter from django.contrib.gis.db import models from django.core.validators import MinValueValidator, MaxValueValidator from django.core.urlresolvers", "set self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set, self).save(*args, **kwargs) self.refresh_from_db() if self.code == u'{}{}_xxx'.format(self.reef().site.code,", "required_fields(self): # need to make this data-driven, not hard-coded field choices # currently", "if type(x) is list: return [a for i in x for a in", "import ArrayField, JSONField # todo: move some of these out ot the db?", "ArrayField, JSONField # todo: move some of these out ot the db? EQUIPMENT_BAIT_CONTAINER", "S/m .00 dissolved_oxygen = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=1) current_flow = models.DecimalField(null=True, blank=True, max_digits=5,", "return u'{0} {1}'.format('Env measure for',str(self.set)) class Bait(AuditableModel): description = models.CharField(max_length=32, help_text='1kg') type =", "Video, Assignment from global_finprint.core.version import VersionInfo from global_finprint.core.models import AuditableModel from global_finprint.trip.models import", "node else: return [node] + [cls.get_choices(node=node) for node in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name", "oiled = models.BooleanField(default=False, help_text='20ml menhaden oil') def __str__(self): return u'{0} {1}{2}'.format(self.get_type_display(), self.description, '", "u'_{}'.format(next_id)) super(Set, self).save(*args, **kwargs) def reef(self): return self.reef_habitat.reef def get_absolute_url(self): return reverse('set_update', args=[str(self.id)])", "('S2E', 'Slack to Ebb'), } SURFACE_CHOP_CHOICES = { ('L', 'Light'), ('M', 'Medium'), ('H',", "from global_finprint.annotation.models.observation import Observation, MasterRecord from global_finprint.annotation.models.video import Video, Assignment from global_finprint.core.version import", "models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) visibility = models.CharField(db_column='visibility_str',", "import Decimal from collections import Counter from django.contrib.gis.db import models from django.core.validators import", "status_list = {'Total': 0} if self.video: status_list.update(Counter(Assignment.objects.filter( video=self.video, project=project).values_list('status__id', flat=True))) status_list['Total'] = sum(status_list.values())", "flat=True))) status_list['Total'] = sum(status_list.values()) return status_list def required_fields(self): # need to make this", "for a in flatten(i)] else: return [x] class SetTag(MPTTModel): name = models.CharField(max_length=50, unique=True)", "substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) visibility", "import AuditableModel from global_finprint.trip.models import Trip from global_finprint.habitat.models import ReefHabitat, Substrate, SubstrateComplexity from", "of these out ot the db? EQUIPMENT_BAIT_CONTAINER = { ('B', 'Bag'), ('C', 'Cage'),", "= models.OneToOneField( Video, on_delete=models.CASCADE, null=True, related_name='set' ) bulk_loaded = models.BooleanField(default=False) class Meta: unique_together", "categories' class Set(AuditableModel): # suggested code pattern: # [site.code][reef.code]_[set number within reef] code", "some form changes here ... bait = models.ForeignKey(Bait, null=True) equipment = models.ForeignKey(Equipment) reef_habitat", "self.code: # set code if it hasn't been set self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code)", "somehow (!!!) def flatten(x): if type(x) is list: return [a for i in", "fields have been completed (see above) master = self.master() return master \\ and", "return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled else '') class Meta: unique_together", "Set(AuditableModel): # suggested code pattern: # [site.code][reef.code]_[set number within reef] code = models.CharField(max_length=32,", "max_digits=5, decimal_places=2, help_text='m/s') # m/s .00 bruv_image_url = models.CharField(max_length=200, null=True, blank=True) splendor_image_url =", "u\"{0}\".format(self.name) class Meta: verbose_name_plural = 'benthic categories' class Set(AuditableModel): # suggested code pattern:", "return reverse('set_update', args=[str(self.id)]) def observations(self): if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self, image_type): server_env", "class EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1, help_text='C') # C salinity =", "[self.haul_measure, self.drop_measure] if x is not None] @property def next_by_code(self): return self.trip.get_next_set_by_code(self.code) def", "help_text='C') # C salinity = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=2, help_text='ppt') # ppt .0", "'Southwest'), ('W', 'West'), ('NW', 'Northwest'), } TIDE_CHOICES = { ('F', 'Flood'), ('E', 'Ebb'),", "{1} / {2}{3}\".format(self.frame_type.type, self.container.type, self.camera, ' (Stereo)' if self.stereo else '') class Meta:", "# set code if it hasn't been set self.code = u'{}{}_xxx'.format(self.reef().site.code, self.reef().code) super(Set,", "EnvironmentMeasure(AuditableModel): water_temperature = models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1, help_text='C') # C salinity = models.DecimalField(null=True,", "= models.CharField(db_column='visibility_str', max_length=10, null=True, blank=True, choices=VISIBILITY_CHOICES) field_of_view = models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value", "models from django.core.validators import MinValueValidator, MaxValueValidator from django.core.urlresolvers import reverse from django.contrib.gis.geos import", "'V0-2'), ('V2-4', 'V2-4'), ('V4-6', 'V4-6'), ('V6-8', 'V6-8'), ('V8-10', 'V8-10'), ('V10+', 'V10+') } FIELD_OF_VIEW_CHOICES", "trip = models.ForeignKey(Trip) drop_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True, related_name='drop_parent_set') haul_measure = models.OneToOneField(", "self.reef().code) super(Set, self).save(*args, **kwargs) self.refresh_from_db() if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip,", "reef_habitat = models.ForeignKey(ReefHabitat, blank=True) trip = models.ForeignKey(Trip) drop_measure = models.OneToOneField( EnvironmentMeasure, on_delete=models.CASCADE, null=True,", "seed: rebar, stainless rebar, PVC, mixed type = models.CharField(max_length=32) image = models.ImageField(null=True, blank=True)", "models.ImageField(null=True, blank=True) def __str__(self): return u\"{0}\".format(self.type) class Equipment(AuditableModel): camera = models.CharField(max_length=32) stereo =", "import ReefHabitat, Substrate, SubstrateComplexity from mptt.models import MPTTModel, TreeForeignKey from django.contrib.postgres.fields import ArrayField,", "for node in node.get_children().filter(active=True)] class BenthicCategory(MPTTModel): name = models.CharField(max_length=50, unique=True) description = models.TextField(null=True,", "project=project).values_list('status__id', flat=True))) status_list['Total'] = sum(status_list.values()) return status_list def required_fields(self): # need to make", "try: return MasterRecord.objects.get(set=self, project_id=project) except MasterRecord.DoesNotExist: return None def assignment_counts(self, project=1): status_list =", "BaitContainer(models.Model): # starting seed: cage, bag type = models.CharField(max_length=32) def __str__(self): return u\"{0}\".format(self.type)", "# new fields substrate_relief_mean = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) substrate_relief_sd = models.DecimalField(null=True, blank=True,", "blank=True, decimal_places=4, max_digits=12) substrate_relief_sd = models.DecimalField(null=True, blank=True, decimal_places=4, max_digits=12) visibility = models.CharField(db_column='visibility_str', max_length=10,", "self).save(*args, **kwargs) self.refresh_from_db() if self.code == u'{}{}_xxx'.format(self.reef().site.code, self.reef().code): next_id = str(len(Set.objects.filter(trip=self.trip, reef_habitat__reef=self.reef()))).zfill(3) self.code", "# currently required: # 1) visibility # 2) current flow (either) # 3)", "3) other 'required' fields have been completed (see above) master = self.master() return", "= models.DecimalField(null=True, blank=True, max_digits=4, decimal_places=1, help_text='C') # C salinity = models.DecimalField(null=True, blank=True, max_digits=4,", "null=True, blank=True) splendor_image_url = models.CharField(max_length=200, null=True, blank=True) benthic_category = models.ManyToManyField(BenthicCategory, through='BenthicCategoryValue') # new", "models.CharField(max_length=2, null=True, blank=True, choices=CURRENT_DIRECTION, help_text='compass direction') # eight point compass cloud_cover = models.IntegerField(null=True,", "because python doesn't have this somehow (!!!) def flatten(x): if type(x) is list:", "help_text='20ml menhaden oil') def __str__(self): return u'{0} {1}{2}'.format(self.get_type_display(), self.description, ' (m)' if self.oiled", "'required' fields have been completed (see above) master = self.master() return master \\", "= models.CharField(max_length=10, null=True, blank=True, choices=FIELD_OF_VIEW_CHOICES) custom_field_value = JSONField(db_column='custom_fields', null=True) # todo: need some", "} FIELD_OF_VIEW_CHOICES = { ('FU', 'Facing Up'), ('FD', 'Facing Down'), ('L', 'Limited'), ('O',", "None] @property def next_by_code(self): return self.trip.get_next_set_by_code(self.code) def save(self, *args, **kwargs): # todo: we're", "def get_absolute_url(self): return reverse('set_update', args=[str(self.id)]) def observations(self): if self.video: return Observation.objects.filter(assignment__in=self.video.assignment_set.all()) def habitat_filename(self,", "in [self.haul_measure, self.drop_measure] if x is not None] @property def next_by_code(self): return self.trip.get_next_set_by_code(self.code)", "from mptt.models import MPTTModel, TreeForeignKey from django.contrib.postgres.fields import ArrayField, JSONField # todo: move", "# starting seed: cage, bag type = models.CharField(max_length=32) def __str__(self): return u\"{0}\".format(self.type) class", "import models from django.core.validators import MinValueValidator, MaxValueValidator from django.core.urlresolvers import reverse from django.contrib.gis.geos", "self.drop_measure] if x is not None] @property def next_by_code(self): return self.trip.get_next_set_by_code(self.code) def save(self,", "'Ebb'), ('S', 'Slack'), ('S2F', 'Slack to Flood'), ('S2E', 'Slack to Ebb'), } SURFACE_CHOP_CHOICES" ]
[ "{ 'data': data } return self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\" Syncs the latest API", "generator for retrieving data from the API. :return: Generator \"\"\" query_params = self.__construct_query_params()", "sync(self): \"\"\" Syncs the latest API data to DB. \"\"\" generator = self.__get_all_generator()", "\"\"\" Constructs the query params for the API call. :return: dict \"\"\" last_synced_record", "data): \"\"\" Post of reimbursements \"\"\" payload = { 'data': data } return", "\"\"\" query_params = self.__construct_query_params() return self.connection.list_all(query_params) def search_reimbursements(self, query_params): \"\"\" Get of reimbursements", "the query params for the API call. :return: dict \"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id)", "class Reimbursements(Base): \"\"\"Class for Reimbursements APIs.\"\"\" def __construct_query_params(self) -> dict: \"\"\" Constructs the", "Reimbursement class Reimbursements(Base): \"\"\"Class for Reimbursements APIs.\"\"\" def __construct_query_params(self) -> dict: \"\"\" Constructs", "query_params['order'] = 'updated_at.desc' return self.connection.list_all(query_params) def bulk_post_reimbursements(self, data): \"\"\" Post of reimbursements \"\"\"", "reimbursements \"\"\" payload = { 'data': data } return self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\"", "the latest API data to DB. \"\"\" generator = self.__get_all_generator() for items in", "last_synced_record else None query_params = {'order': 'updated_at.desc'} if updated_at: query_params['updated_at'] = updated_at return", "\"\"\" query_params['order'] = 'updated_at.desc' return self.connection.list_all(query_params) def bulk_post_reimbursements(self, data): \"\"\" Post of reimbursements", "API data to DB. \"\"\" generator = self.__get_all_generator() for items in generator: Reimbursement.create_or_update_reimbursement_objects(items['data'],", "retrieving data from the API. :return: Generator \"\"\" query_params = self.__construct_query_params() return self.connection.list_all(query_params)", "= self.__construct_query_params() return self.connection.list_all(query_params) def search_reimbursements(self, query_params): \"\"\" Get of reimbursements filtered on", "of reimbursements filtered on query parameters :return: Generator \"\"\" query_params['order'] = 'updated_at.desc' return", "Returns the generator for retrieving data from the API. :return: Generator \"\"\" query_params", "from .base import Base from apps.fyle.models import Reimbursement class Reimbursements(Base): \"\"\"Class for Reimbursements", "return self.connection.list_all(query_params) def search_reimbursements(self, query_params): \"\"\" Get of reimbursements filtered on query parameters", "if updated_at: query_params['updated_at'] = updated_at return query_params def __get_all_generator(self): \"\"\" Returns the generator", "on query parameters :return: Generator \"\"\" query_params['order'] = 'updated_at.desc' return self.connection.list_all(query_params) def bulk_post_reimbursements(self,", "} return self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\" Syncs the latest API data to DB.", ".base import Base from apps.fyle.models import Reimbursement class Reimbursements(Base): \"\"\"Class for Reimbursements APIs.\"\"\"", "data } return self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\" Syncs the latest API data to", "query_params = {'order': 'updated_at.desc'} if updated_at: query_params['updated_at'] = updated_at return query_params def __get_all_generator(self):", "APIs.\"\"\" def __construct_query_params(self) -> dict: \"\"\" Constructs the query params for the API", "from the API. :return: Generator \"\"\" query_params = self.__construct_query_params() return self.connection.list_all(query_params) def search_reimbursements(self,", ":return: dict \"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record else None", "query_params): \"\"\" Get of reimbursements filtered on query parameters :return: Generator \"\"\" query_params['order']", "return self.connection.list_all(query_params) def bulk_post_reimbursements(self, data): \"\"\" Post of reimbursements \"\"\" payload = {", "def search_reimbursements(self, query_params): \"\"\" Get of reimbursements filtered on query parameters :return: Generator", "apps.fyle.models import Reimbursement class Reimbursements(Base): \"\"\"Class for Reimbursements APIs.\"\"\" def __construct_query_params(self) -> dict:", "updated_at return query_params def __get_all_generator(self): \"\"\" Returns the generator for retrieving data from", "None query_params = {'order': 'updated_at.desc'} if updated_at: query_params['updated_at'] = updated_at return query_params def", "the generator for retrieving data from the API. :return: Generator \"\"\" query_params =", "Reimbursements(Base): \"\"\"Class for Reimbursements APIs.\"\"\" def __construct_query_params(self) -> dict: \"\"\" Constructs the query", "Generator \"\"\" query_params = self.__construct_query_params() return self.connection.list_all(query_params) def search_reimbursements(self, query_params): \"\"\" Get of", "for Reimbursements APIs.\"\"\" def __construct_query_params(self) -> dict: \"\"\" Constructs the query params for", "Get of reimbursements filtered on query parameters :return: Generator \"\"\" query_params['order'] = 'updated_at.desc'", "filtered on query parameters :return: Generator \"\"\" query_params['order'] = 'updated_at.desc' return self.connection.list_all(query_params) def", "parameters :return: Generator \"\"\" query_params['order'] = 'updated_at.desc' return self.connection.list_all(query_params) def bulk_post_reimbursements(self, data): \"\"\"", "return query_params def __get_all_generator(self): \"\"\" Returns the generator for retrieving data from the", "\"\"\" Get of reimbursements filtered on query parameters :return: Generator \"\"\" query_params['order'] =", "def sync(self): \"\"\" Syncs the latest API data to DB. \"\"\" generator =", "<filename>connector/fyle_integrations_platform_connector/apis/reimbursements.py from .base import Base from apps.fyle.models import Reimbursement class Reimbursements(Base): \"\"\"Class for", "payload = { 'data': data } return self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\" Syncs the", "__get_all_generator(self): \"\"\" Returns the generator for retrieving data from the API. :return: Generator", "Constructs the query params for the API call. :return: dict \"\"\" last_synced_record =", "{'order': 'updated_at.desc'} if updated_at: query_params['updated_at'] = updated_at return query_params def __get_all_generator(self): \"\"\" Returns", "\"\"\" Returns the generator for retrieving data from the API. :return: Generator \"\"\"", "self.format_date(last_synced_record.updated_at) if last_synced_record else None query_params = {'order': 'updated_at.desc'} if updated_at: query_params['updated_at'] =", "Reimbursements APIs.\"\"\" def __construct_query_params(self) -> dict: \"\"\" Constructs the query params for the", "API call. :return: dict \"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record", "the API. :return: Generator \"\"\" query_params = self.__construct_query_params() return self.connection.list_all(query_params) def search_reimbursements(self, query_params):", "if last_synced_record else None query_params = {'order': 'updated_at.desc'} if updated_at: query_params['updated_at'] = updated_at", "updated_at: query_params['updated_at'] = updated_at return query_params def __get_all_generator(self): \"\"\" Returns the generator for", "def __construct_query_params(self) -> dict: \"\"\" Constructs the query params for the API call.", "params for the API call. :return: dict \"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at =", "Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record else None query_params = {'order': 'updated_at.desc'} if", "self.__construct_query_params() return self.connection.list_all(query_params) def search_reimbursements(self, query_params): \"\"\" Get of reimbursements filtered on query", "= self.format_date(last_synced_record.updated_at) if last_synced_record else None query_params = {'order': 'updated_at.desc'} if updated_at: query_params['updated_at']", "def bulk_post_reimbursements(self, data): \"\"\" Post of reimbursements \"\"\" payload = { 'data': data", "Base from apps.fyle.models import Reimbursement class Reimbursements(Base): \"\"\"Class for Reimbursements APIs.\"\"\" def __construct_query_params(self)", "self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\" Syncs the latest API data to DB. \"\"\" generator", "\"\"\" Post of reimbursements \"\"\" payload = { 'data': data } return self.connection.bulk_post_reimbursements(payload)", "data from the API. :return: Generator \"\"\" query_params = self.__construct_query_params() return self.connection.list_all(query_params) def", "of reimbursements \"\"\" payload = { 'data': data } return self.connection.bulk_post_reimbursements(payload) def sync(self):", "reimbursements filtered on query parameters :return: Generator \"\"\" query_params['order'] = 'updated_at.desc' return self.connection.list_all(query_params)", "'data': data } return self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\" Syncs the latest API data", "-> dict: \"\"\" Constructs the query params for the API call. :return: dict", "__construct_query_params(self) -> dict: \"\"\" Constructs the query params for the API call. :return:", "Generator \"\"\" query_params['order'] = 'updated_at.desc' return self.connection.list_all(query_params) def bulk_post_reimbursements(self, data): \"\"\" Post of", "Post of reimbursements \"\"\" payload = { 'data': data } return self.connection.bulk_post_reimbursements(payload) def", "= updated_at return query_params def __get_all_generator(self): \"\"\" Returns the generator for retrieving data", "else None query_params = {'order': 'updated_at.desc'} if updated_at: query_params['updated_at'] = updated_at return query_params", "import Reimbursement class Reimbursements(Base): \"\"\"Class for Reimbursements APIs.\"\"\" def __construct_query_params(self) -> dict: \"\"\"", "'updated_at.desc' return self.connection.list_all(query_params) def bulk_post_reimbursements(self, data): \"\"\" Post of reimbursements \"\"\" payload =", "query_params def __get_all_generator(self): \"\"\" Returns the generator for retrieving data from the API.", "import Base from apps.fyle.models import Reimbursement class Reimbursements(Base): \"\"\"Class for Reimbursements APIs.\"\"\" def", ":return: Generator \"\"\" query_params = self.__construct_query_params() return self.connection.list_all(query_params) def search_reimbursements(self, query_params): \"\"\" Get", "\"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record else None query_params =", "the API call. :return: dict \"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at) if", "= 'updated_at.desc' return self.connection.list_all(query_params) def bulk_post_reimbursements(self, data): \"\"\" Post of reimbursements \"\"\" payload", "\"\"\" payload = { 'data': data } return self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\" Syncs", "API. :return: Generator \"\"\" query_params = self.__construct_query_params() return self.connection.list_all(query_params) def search_reimbursements(self, query_params): \"\"\"", "def __get_all_generator(self): \"\"\" Returns the generator for retrieving data from the API. :return:", "latest API data to DB. \"\"\" generator = self.__get_all_generator() for items in generator:", "for the API call. :return: dict \"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at)", "bulk_post_reimbursements(self, data): \"\"\" Post of reimbursements \"\"\" payload = { 'data': data }", "= {'order': 'updated_at.desc'} if updated_at: query_params['updated_at'] = updated_at return query_params def __get_all_generator(self): \"\"\"", "query parameters :return: Generator \"\"\" query_params['order'] = 'updated_at.desc' return self.connection.list_all(query_params) def bulk_post_reimbursements(self, data):", "query params for the API call. :return: dict \"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at", "call. :return: dict \"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record else", "from apps.fyle.models import Reimbursement class Reimbursements(Base): \"\"\"Class for Reimbursements APIs.\"\"\" def __construct_query_params(self) ->", "dict \"\"\" last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record else None query_params", "updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record else None query_params = {'order': 'updated_at.desc'} if updated_at:", "query_params['updated_at'] = updated_at return query_params def __get_all_generator(self): \"\"\" Returns the generator for retrieving", "last_synced_record = Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record else None query_params = {'order':", "dict: \"\"\" Constructs the query params for the API call. :return: dict \"\"\"", "'updated_at.desc'} if updated_at: query_params['updated_at'] = updated_at return query_params def __get_all_generator(self): \"\"\" Returns the", "\"\"\" Syncs the latest API data to DB. \"\"\" generator = self.__get_all_generator() for", "\"\"\"Class for Reimbursements APIs.\"\"\" def __construct_query_params(self) -> dict: \"\"\" Constructs the query params", "self.connection.list_all(query_params) def search_reimbursements(self, query_params): \"\"\" Get of reimbursements filtered on query parameters :return:", "for retrieving data from the API. :return: Generator \"\"\" query_params = self.__construct_query_params() return", "data to DB. \"\"\" generator = self.__get_all_generator() for items in generator: Reimbursement.create_or_update_reimbursement_objects(items['data'], self.workspace_id)", "= Reimbursement.get_last_synced_at(self.workspace_id) updated_at = self.format_date(last_synced_record.updated_at) if last_synced_record else None query_params = {'order': 'updated_at.desc'}", "Syncs the latest API data to DB. \"\"\" generator = self.__get_all_generator() for items", ":return: Generator \"\"\" query_params['order'] = 'updated_at.desc' return self.connection.list_all(query_params) def bulk_post_reimbursements(self, data): \"\"\" Post", "self.connection.list_all(query_params) def bulk_post_reimbursements(self, data): \"\"\" Post of reimbursements \"\"\" payload = { 'data':", "= { 'data': data } return self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\" Syncs the latest", "search_reimbursements(self, query_params): \"\"\" Get of reimbursements filtered on query parameters :return: Generator \"\"\"", "query_params = self.__construct_query_params() return self.connection.list_all(query_params) def search_reimbursements(self, query_params): \"\"\" Get of reimbursements filtered", "return self.connection.bulk_post_reimbursements(payload) def sync(self): \"\"\" Syncs the latest API data to DB. \"\"\"" ]
[ "LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT = \"text\" csvreader = csv.DictReader(sys.stdin) csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames) csvwriter.writeheader()", "stdin # usage: gunzip -c file.csv.gz | python3 query-csv-test.py # 20200525 erikt(at)xs4all.nl import", "\"text\" csvreader = csv.DictReader(sys.stdin) csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames) csvwriter.writeheader() for row in csvreader: if", "extract social distancing tweets from csv file at stdin # usage: gunzip -c", "LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY", "usage: gunzip -c file.csv.gz | python3 query-csv-test.py # 20200525 erikt(at)xs4all.nl import csv import", "from csv file at stdin # usage: gunzip -c file.csv.gz | python3 query-csv-test.py", "-c file.csv.gz | python3 query-csv-test.py # 20200525 erikt(at)xs4all.nl import csv import re import", "-]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY =", "= \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\"", "import sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY =", "csv import re import sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+", "PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY =", "import csv import re import sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+", "r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY", "= \"text\" csvreader = csv.DictReader(sys.stdin) csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames) csvwriter.writeheader() for row in csvreader:", "re import sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY", "sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[", "DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY =", "distancing tweets from csv file at stdin # usage: gunzip -c file.csv.gz |", "r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\"", "at stdin # usage: gunzip -c file.csv.gz | python3 query-csv-test.py # 20200525 erikt(at)xs4all.nl", "TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[", "\"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY", "\"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY,", "import re import sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin'])", "r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT = \"text\" csvreader", "# query-csv-distance.py: extract social distancing tweets from csv file at stdin # usage:", "python3 query-csv-test.py # 20200525 erikt(at)xs4all.nl import csv import re import sys TOPICQUERY =", "query-csv-distance.py: extract social distancing tweets from csv file at stdin # usage: gunzip", "= r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY])", "DISTANCEQUERY, CTBQUERY]) TEXT = \"text\" csvreader = csv.DictReader(sys.stdin) csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames) csvwriter.writeheader() for", "<filename>query-csv-pandemic.py<gh_stars>0 #/usr/bin/env python3 # query-csv-distance.py: extract social distancing tweets from csv file at", "file.csv.gz | python3 query-csv-test.py # 20200525 erikt(at)xs4all.nl import csv import re import sys", "csvreader = csv.DictReader(sys.stdin) csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames) csvwriter.writeheader() for row in csvreader: if re.search(QUERY,row[TEXT],flags=re.IGNORECASE):", "python3 # query-csv-distance.py: extract social distancing tweets from csv file at stdin #", "#/usr/bin/env python3 # query-csv-distance.py: extract social distancing tweets from csv file at stdin", "20200525 erikt(at)xs4all.nl import csv import re import sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY =", "\"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY", "TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY,", "CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT =", "erikt(at)xs4all.nl import csv import re import sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY = \"|\".join([TOPICQUERY,", "VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT = \"text\" csvreader = csv.DictReader(sys.stdin) csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames)", "# 20200525 erikt(at)xs4all.nl import csv import re import sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\" PANDEMICQUERY", "social distancing tweets from csv file at stdin # usage: gunzip -c file.csv.gz", "csv file at stdin # usage: gunzip -c file.csv.gz | python3 query-csv-test.py #", "r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY =", "gunzip -c file.csv.gz | python3 query-csv-test.py # 20200525 erikt(at)xs4all.nl import csv import re", "\"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT = \"text\" csvreader = csv.DictReader(sys.stdin) csvwriter", "= \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY,", "TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT = \"text\" csvreader = csv.DictReader(sys.stdin) csvwriter =", "# usage: gunzip -c file.csv.gz | python3 query-csv-test.py # 20200525 erikt(at)xs4all.nl import csv", "TEXT = \"text\" csvreader = csv.DictReader(sys.stdin) csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames) csvwriter.writeheader() for row in", "tweets from csv file at stdin # usage: gunzip -c file.csv.gz | python3", "= \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr'", "= \"|\".join([TOPICQUERY, r'virus|besmet|ziekenhui|\\bic\\b|intensive.care|^zorg|vaccin|[^ad]arts|uitbraak|uitbrak|pandemie|ggd|'+ r'mondkapje|quarantaine|\\bwho\\b|avondklok|variant|verple|sympto|e.golf|mutant|^omt$|umc|hcq|'+ r'hydroxychloroquine|virolo|zkh|oversterfte|patiënt|patient|intensivist|🦠|ivermectin']) DISTANCEQUERY = \"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\"", "= r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT = \"text\"", "file at stdin # usage: gunzip -c file.csv.gz | python3 query-csv-test.py # 20200525", "r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT", "= \"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY =", "query-csv-test.py # 20200525 erikt(at)xs4all.nl import csv import re import sys TOPICQUERY = \"corona|covid|huisarts|mondkapje|rivm|blijfthuis|flattenthecurve|houvol\"", "\"1[.,]5[ -]*m|afstand.*hou|hou.*afstand|anderhalve[ -]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY", "\"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY,", "-]*meter\" LOCKDOWNQUERY = \"lock.down|lockdown\" VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)'", "QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT = \"text\" csvreader =", "= \"|\".join([PANDEMICQUERY, TESTQUERY, VACCINQUERY, LOCKDOWNQUERY, DISTANCEQUERY, CTBQUERY]) TEXT = \"text\" csvreader = csv.DictReader(sys.stdin)", "CTBQUERY]) TEXT = \"text\" csvreader = csv.DictReader(sys.stdin) csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames) csvwriter.writeheader() for row", "| python3 query-csv-test.py # 20200525 erikt(at)xs4all.nl import csv import re import sys TOPICQUERY", "VACCINQUERY = \"vaccin|ingeënt|ingeent|inent|prik|spuit|bijwerking|-->|💉|pfizer|moderna|astrazeneca|astra|zeneca|novavax|biontech|booster|vax|mrna|inject\" TESTQUERY = r'\\btest|getest|sneltest|pcr' CTBQUERY = r'(ctb|qr|toegangsbewij|testbewij|coronapas|vaccinatiepas|vaccinpas|\\bcodes\\b|2g|3g|1g|apartheid)' QUERY = \"|\".join([PANDEMICQUERY, TESTQUERY,", "= csv.DictReader(sys.stdin) csvwriter = csv.DictWriter(sys.stdout,fieldnames=csvreader.fieldnames) csvwriter.writeheader() for row in csvreader: if re.search(QUERY,row[TEXT],flags=re.IGNORECASE): csvwriter.writerow(row)" ]
[ "= TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) self.assertEqual(trade_update,", "self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"),", "self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee", "rate if the fee amount is calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount =", "TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token different from", "TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\",", "amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self):", "percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"),", "TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount]", "\"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\",", "[token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee(", ") fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee))", "# That forces the logic to need the convertion rate if the fee", "import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema, ) class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema", "calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"),", "TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\",", "percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\",", "DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase):", "{ \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def", "decimal import Decimal from unittest import TestCase from hummingbot.core.data_type.common import TradeType, PositionAction from", "TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount]", "type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\",", ") self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema", "to request the fee value # That forces the logic to need the", "def test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] )", "TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema", "= TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json = { \"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\", } self.assertEqual(expected_json, amount.to_json())", "TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\",", "self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount =", "fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json = trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\":", "order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase): def test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json", "fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token)", "token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase): def test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json =", "class TokenAmountTests(TestCase): def test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json = { \"token\": \"HBOT-COINALPHA\",", "buy_percent_fee_deducted_from_returns=True, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee,", "trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\",", "type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema(", "amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(),", "taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] )", "TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self):", "test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY,", "TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json =", "token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update =", "fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"),", "\"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\"))", "fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"),", "TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema", "fee.flat_fees) schema.percent_fee_token = \"HBOT\" fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))]", "test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update", "from hummingbot.core.data_type.in_flight_order import TradeUpdate from hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema,", "fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json = trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\",", "percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use", "\"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self):", "price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase): def test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\"))", "the logic to need the convertion rate if the fee amount is calculated", "} self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"),", "percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))],", "self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee =", "PositionAction from hummingbot.core.data_type.in_flight_order import TradeUpdate from hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase,", "need the convertion rate if the fee amount is calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"),", "= TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent)", "token used to request the fee value # That forces the logic to", "flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"),", "fee=fee, ) expected_json = trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\", \"fee\":", "TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\",", ") expected_json = trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(),", "= { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json())", "{ \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def", "self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = \"HBOT\" fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN,", "rate if the fee amount is calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount =", "TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\",", "\"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee", "= TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent)", "fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, )", "percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"),", ") self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = \"HBOT\"", "the convertion rate if the fee amount is calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\")", "TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\",", "self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\",", "self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee", "schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY,", "fee_amount) class TokenAmountTests(TestCase): def test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json = { \"token\":", "expected_json = trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(), })", "= TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent)", "fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure", "fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token)", "buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee,", ") self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema", "= TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\"))", "} self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase):", "def test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] )", "test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema,", "= DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\",", ") fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee))", "def test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] )", "TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\":", "expected_json = { \"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\", } self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self): amount", "trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json = trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\",", "import TestCase from hummingbot.core.data_type.common import TradeType, PositionAction from hummingbot.core.data_type.in_flight_order import TradeUpdate from hummingbot.core.data_type.trade_fee", "position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\",", "\"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self): token_amount =", "maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))]", "expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json()) def", "from unittest import TestCase from hummingbot.core.data_type.common import TradeType, PositionAction from hummingbot.core.data_type.in_flight_order import TradeUpdate", "DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112,", "fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"),", ") self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee", "\"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self): token_amount", "= { \"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\", } self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self): amount =", "hummingbot.core.data_type.common import TradeType, PositionAction from hummingbot.core.data_type.in_flight_order import TradeUpdate from hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee,", "fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"),", "fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"),", ") self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount", "def test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json = { \"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\",", "taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] )", "fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns = True fee = TradeFeeBase.new_spot_fee(", "fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) def", "import Decimal from unittest import TestCase from hummingbot.core.data_type.common import TradeType, PositionAction from hummingbot.core.data_type.in_flight_order", "DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema, ) class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\",", "TradeType, PositionAction from hummingbot.core.data_type.in_flight_order import TradeUpdate from hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount,", "= True fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee,", "fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token)", "fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json = trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\",", "= DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee", "token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json()))", "amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self):", "= AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def", "trade_update.to_json()) def test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount]", "\"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\"))", "fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase): def test_json_serialization(self):", "request the fee value # That forces the logic to need the convertion", "percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"),", ") fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee))", "amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee =", "fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount)", "test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema,", "fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema =", "= DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\",", "amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee", "type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema(", "exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json = trade_update._asdict() expected_json.update({ \"fill_price\":", "self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, )", "self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = \"HBOT\" fee = TradeFeeBase.new_perpetual_fee(", "amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee =", "maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))]", "client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json = trade_update._asdict() expected_json.update({", "flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"),", "fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns = True fee", "AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self):", "expected_json = { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json,", "= TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"),", "fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\",", "test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token different from the token", "= TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update = TradeUpdate(", "percent token different from the token used to request the fee value #", "percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\",", "if the fee amount is calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token(", "amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self):", "trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\",", "\"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount =", "fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee = TradeFeeBase.new_spot_fee(", "test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json = { \"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\", }", "fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount]", "= TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = {", "TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json = { \"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\", } self.assertEqual(expected_json, amount.to_json()) def", "TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\":", "\"1000.50\", } self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class", "DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to", "flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent", "schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns = True fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\",", "the token used to request the fee value # That forces the logic", "taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] )", "logic to need the convertion rate if the fee amount is calculated fee", "amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\",", "= DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\",", "True fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee))", "percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\",", "trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use", "flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def", "position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema(", "TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\",", "position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\",", "percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees)", ") expected_json = { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] }", "amount.to_json()) def test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def test_json_serialization(self):", "TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\",", "TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\",", "buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee,", "\"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self):", ") fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee))", "percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))],", "amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee =", "fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"),", "test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount =", "= TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self):", "self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema =", "self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\",", "\"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\"))", "}) self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"),", "\"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\", } self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\"))", ") fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee))", "flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False,", "fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee =", "percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\":", "fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token)", "TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\",", "self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False,", "percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"),", "percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"),", "token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json =", "amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\",", "amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self):", "test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee,", "self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee(", "fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount)", "fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount]", "\"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self):", "TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"),", "= AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\",", "schema.percent_fee_token = \"HBOT\" fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] )", "self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\",", "amount is calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"),", "def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee(", "TokenAmountTests(TestCase): def test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json = { \"token\": \"HBOT-COINALPHA\", \"amount\":", "\"fee\": fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee =", "self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token different", "def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token different from the", ") trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee,", "test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee,", "= TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"),", "is calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\")", "flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token", "schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"),", "fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token)", "amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token =", "amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns = True fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL,", "test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update", "= TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"),", "percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\",", "test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN,", "= \"HBOT\" fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee,", "price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a", "= TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent)", "def test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] )", "the fee amount is calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\",", "def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema,", "{ \"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\", } self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\",", "percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"),", "flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token", "= TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def", "percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees)", "type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee(", "amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False,", "self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee", "token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json =", "fee value # That forces the logic to need the convertion rate if", "fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"),", "flat_fees=[token_amount] ) expected_json = { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()]", "is calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\")", "fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = \"HBOT\" fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema,", "self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = \"HBOT\" fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\",", "from the token used to request the fee value # That forces the", "self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\"))", "self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema =", "= DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class", "trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase): def test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\",", "TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\",", "TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self):", "def test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount", "fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\",", "type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee =", "self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\",", "trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json = trade_update._asdict()", "fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, )", "amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount", "buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee,", "schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"),", "the fee value # That forces the logic to need the convertion rate", "expected_json = { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json,", "amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(),", ") expected_json = { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] }", "fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount =", "TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee", ") fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee))", "} self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"),", "used to request the fee value # That forces the logic to need", "amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json = { \"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\", } self.assertEqual(expected_json,", "fee to use a percent token different from the token used to request", "TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) self.assertEqual(trade_update, TradeUpdate.from_json(trade_update.to_json()))", "different from the token used to request the fee value # That forces", "TokenAmount, TradeFeeBase, TradeFeeSchema, ) class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"),", "position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\",", "TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\",", "token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token", "test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE,", "class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee", "\"HBOT-COINALPHA\", \"amount\": \"1000.50\", } self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount,", "def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema,", "class TradeUpdateTests(TestCase): def test_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\",", "amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\",", "self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns = True", "fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema =", "percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase): def", "fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee(", "unittest import TestCase from hummingbot.core.data_type.common import TradeType, PositionAction from hummingbot.core.data_type.in_flight_order import TradeUpdate from", "schema.buy_percent_fee_deducted_from_returns = True fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] )", "def test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] )", "[token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee(", "value # That forces the logic to need the convertion rate if the", "TradeFeeSchema, ) class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False,", "schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL,", "\"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount =", "fee amount is calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"),", "type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = \"HBOT\" fee =", "= TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent)", "= TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\",", "AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\"))", "to need the convertion rate if the fee amount is calculated fee =", "= TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\",", "flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def", "token different from the token used to request the fee value # That", "import TradeType, PositionAction from hummingbot.core.data_type.in_flight_order import TradeUpdate from hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee,", "= TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self):", "use a percent token different from the token used to request the fee", "def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema,", "amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token =", "DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount", "type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns =", "TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee =", "= fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee", "self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns = True fee =", "test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema,", "hummingbot.core.data_type.in_flight_order import TradeUpdate from hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema, )", "( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema, ) class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema =", "\"2\", \"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\",", "fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def", "hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema, ) class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self):", "order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent", "the convertion rate if the fee amount is calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\")", "schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"),", "fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee(", "maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))]", "fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token different from", "def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee(", "self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True,", "= { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json())", "self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns = True fee = TradeFeeBase.new_spot_fee( fee_schema=schema,", "trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json())", "= TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent)", "self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema =", "if the fee amount is calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token(", "self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase): def test_json_serialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) expected_json = {", "fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token)", "fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount]", "fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False,", "\"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee =", "= TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\",", "fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\":", "self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = \"HBOT\" fee", "percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"),", "def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token different from the", "fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json = trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\":", "= TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def", "# Configure fee to use a percent token different from the token used", "to use a percent token different from the token used to request the", ") self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token", "from hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema, ) class TradeFeeTests(TestCase): def", "= AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\",", "type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema(", "percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\",", "token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json()))", "self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns", "\"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\",", "TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\",", "fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\":", "self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"),", "the fee amount is calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\",", "fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase): def test_json_serialization(self): amount =", "\"HBOT\" fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee))", "trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, )", "AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount", "None schema.buy_percent_fee_deducted_from_returns = True fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))]", ") self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema", "taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] )", "fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = \"HBOT\" fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"),", "fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"),", "fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee", "TestCase from hummingbot.core.data_type.common import TradeType, PositionAction from hummingbot.core.data_type.in_flight_order import TradeUpdate from hummingbot.core.data_type.trade_fee import", "AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema, ) class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema(", ") self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"),", "self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json())) class TradeUpdateTests(TestCase): def", "buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee,", "\"2000.22\", \"fee\": fee.to_json(), }) self.assertEqual(expected_json, trade_update.to_json()) def test_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee", "def test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] )", "buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee,", "test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json", "fee amount is calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"),", ") class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, )", "a percent token different from the token used to request the fee value", "def test_deducted_from_return_spot_fee_created_for_sell(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_spot_fee(", "= fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) class TokenAmountTests(TestCase): def test_json_serialization(self): amount", "TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\",", "= None schema.buy_percent_fee_deducted_from_returns = True fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\",", "amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, )", "\"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee =", "from decimal import Decimal from unittest import TestCase from hummingbot.core.data_type.common import TradeType, PositionAction", "self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee =", "percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee =", "amount is calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"),", "fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to", "self.assertEqual(expected_json, fee.to_json()) def test_deducted_from_returns_json_deserialization(self): token_amount = TokenAmount(token=\"CO<PASSWORD>\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\",", "Configure fee to use a percent token different from the token used to", "TradeFeeBase, TradeFeeSchema, ) class TradeFeeTests(TestCase): def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"),", "= TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update = TradeUpdate(", "convertion rate if the fee amount is calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount", "need the convertion rate if the fee amount is calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"),", "test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json", "amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\",", "= TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = {", "\"COINALPHA\", \"flat_fees\": [token_amount.to_json()] } self.assertEqual(expected_json, fee.to_json()) def test_added_to_cost_json_deserialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee", "forces the logic to need the convertion rate if the fee amount is", ") self.assertEqual(AddedToCostTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self): schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, )", "= trade_update._asdict() expected_json.update({ \"fill_price\": \"1000.11\", \"fill_base_amount\": \"2\", \"fill_quote_amount\": \"2000.22\", \"fee\": fee.to_json(), }) self.assertEqual(expected_json,", "test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token different from the token", "schema = TradeFeeSchema( percent_fee_token=\"HBOT\", maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE,", "TradeUpdate from hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema, ) class TradeFeeTests(TestCase):", "trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) def test_added_to_cost_perpetual_fee_created_when_opening_positions(self): schema = TradeFeeSchema(", "\"amount\": \"1000.50\", } self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self): amount = TokenAmount(token=\"HBOT-COINALPHA\", amount=Decimal(\"1000.50\")) self.assertEqual(amount, TokenAmount.from_json(amount.to_json()))", "AddedToCostTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\":", "self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a percent token different", "token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) trade_update =", "That forces the logic to need the convertion rate if the fee amount", "DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"CO<PASSWORD>\", flat_fees=[token_amount] ) trade_update = TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112,", "amount=Decimal(\"1000.50\")) expected_json = { \"token\": \"HBOT-COINALPHA\", \"amount\": \"1000.50\", } self.assertEqual(expected_json, amount.to_json()) def test_json_deserialization(self):", "fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_added_to_cost_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = AddedToCostTradeFee(", "self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"),", "maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))]", "flat_fees=[token_amount] ) expected_json = { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\": [token_amount.to_json()]", "self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self): schema = TradeFeeSchema( maker_percent_fee_decimal=Decimal(\"1\"),", "fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(AddedToCostTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"),", ") self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = None", "calculated fee = DeductedFromReturnsTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"),", "import TradeUpdate from hummingbot.core.data_type.trade_fee import ( AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema, ) class", "maker_percent_fee_decimal=Decimal(\"1\"), taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=True, ) fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.BUY, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))]", "flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee(", "percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure fee to use a", "percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": AddedToCostTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\", \"flat_fees\":", "fee.flat_fees) schema.percent_fee_token = None schema.buy_percent_fee_deducted_from_returns = True fee = TradeFeeBase.new_spot_fee( fee_schema=schema, trade_type=TradeType.SELL, percent=Decimal(\"1.1\"),", "amount=Decimal(\"20\"))], fee.flat_fees) schema.percent_fee_token = \"HBOT\" fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.OPEN, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\",", "= TradeUpdate( trade_id=\"12345\", client_order_id=\"OID1\", exchange_order_id=\"EOID1\", trading_pair=\"HBOT-COINALPHA\", fill_timestamp=1640001112, fill_price=Decimal(\"1000.11\"), fill_base_amount=Decimal(\"2\"), fill_quote_amount=Decimal(\"2000.22\"), fee=fee, ) expected_json", "Decimal from unittest import TestCase from hummingbot.core.data_type.common import TradeType, PositionAction from hummingbot.core.data_type.in_flight_order import", "percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\": \"COINALPHA\",", "DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) expected_json = { \"fee_type\": DeductedFromReturnsTradeFee.type_descriptor_for_json(), \"percent\": \"0.5\", \"percent_token\":", "trade_type=TradeType.SELL, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] ) self.assertEqual(DeductedFromReturnsTradeFee, type(fee)) self.assertEqual(Decimal(\"1.1\"), fee.percent) self.assertEqual(\"HBOT\", fee.percent_token) self.assertEqual([TokenAmount(token=\"COINALPHA\",", "percent_token=\"CO<PASSWORD>\") fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): #", "amount=Decimal(\"20.6\")) fee = DeductedFromReturnsTradeFee( percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): #", "convertion rate if the fee amount is calculated fee = AddedToCostTradeFee(percent=Decimal(\"0\"), percent_token=\"CO<PASSWORD>\") fee_amount", "fee_amount = fee.fee_amount_in_token( trading_pair=\"HBOT-COINALPHA\", price=Decimal(\"1000\"), order_amount=Decimal(\"1\"), token=\"BNB\") self.assertEqual(Decimal(\"0\"), fee_amount) def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self): # Configure", "from hummingbot.core.data_type.common import TradeType, PositionAction from hummingbot.core.data_type.in_flight_order import TradeUpdate from hummingbot.core.data_type.trade_fee import (", "percent=Decimal(\"0.5\"), percent_token=\"COINALPHA\", flat_fees=[token_amount] ) self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json())) def test_deducted_from_returns_json_serialization(self): token_amount = TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20.6\")) fee", "taker_percent_fee_decimal=Decimal(\"1\"), buy_percent_fee_deducted_from_returns=False, ) fee = TradeFeeBase.new_perpetual_fee( fee_schema=schema, position_action=PositionAction.CLOSE, percent=Decimal(\"1.1\"), percent_token=\"HBOT\", flat_fees=[TokenAmount(token=\"COINALPHA\", amount=Decimal(\"20\"))] )" ]
[ "::strides] return image.astype(dtype) def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray: shape = image.shape shape", "* (image - image_min) + new_min return tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel: np.array) ->", "images. Arguments: image {np.ndarray} -- [description] Returns: np.ndarray -- [description] \"\"\" image =", "shape[1:3] return shape def scale_shape(image: np.ndarray, scale: float): shape = image_shape(image, np.float32) shape", "== 3: image = image[np.newaxis, ...] image = tf.cast(image, tf.float32) image = image.astype(np.float32)", "to pre-process the images. The image is converted into grayscale, and then a", "or 4' image = image.astype(np.float32) rescale_size = scale_shape(image, scale) interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC)", "np.ndarray: \"\"\" Apply convolution filter to image with gaussian image kernel TODO: Verify", "image = tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image, 16, 7 / 6) image_low = rescale(image_low,", "low-frequency image is the result of the following algorithm: 1. Blur the grayscale", "the low-frequency component of images. Arguments: image {np.ndarray} -- [description] Returns: np.ndarray --", "16, 7 / 6) image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low = tf.image.resize(image_low, size=image_shape(image),", "len(image.shape) == 3: image = image[np.newaxis, ...] image = tf.cast(image, tf.float32) image =", "[np.ndarray]: Normalized image \"\"\" original_dtype = image.dtype image = image.astype(np.float32) image_min, image_max =", "by min and max parameter Args: image ([type]): [description] new_min ([type], optional): [description].", "[description]. Defaults to 255. Returns: [np.ndarray]: Normalized image \"\"\" original_dtype = image.dtype image", "np.float32) normalized_image = (new_max - new_min) / (image_max - image_min) * (image -", "** 2)) return normalize_kernel(kernel) def gaussian_filter( image: np.ndarray, kernel_size: int, sigma: float, dtype=np.float32,", "Arguments: image {np.ndarray} -- [description] Returns: np.ndarray -- [description] \"\"\" image = tf.cast(image,", "shape def scale_shape(image: np.ndarray, scale: float): shape = image_shape(image, np.float32) shape = np.math.ceil(shape", "Returns: np.ndarray -- [description] \"\"\" image = tf.cast(image, tf.float32) image = tf.image.rgb_to_grayscale(image) image_low", "image ([type]): [description] new_min ([type], optional): [description]. Defaults to 0. new_max ([type], optional):", "([np.ndarray]): [description] kernel_size ([int]): [description] sigma ([float]): [description] dtype ([type], optional): [description]. Defaults", "methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]): [description] kernel_size ([int]): [description] sigma ([float]):", "kernel = gaussian_kernel2d(kernel_size, sigma) if len(image.shape) == 3: image = image[np.newaxis, ...] image", "np from scipy import signal def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray: \"\"\"", "1. Blur the grayscale image. 2. Downscale it by a factor of SCALING_FACTOR.", "\"\"\" Normalize the input image to a given range set by min and", "the low-frequency image is the result of the following algorithm: 1. Blur the", "3 else shape[1:3] return shape def scale_shape(image: np.ndarray, scale: float): shape = image_shape(image,", "tf import cv2 import numpy as np from scipy import signal def image_normalization(image:", "the low-frequency band, and (2) image distortions barely affect the low-frequency component of", "low-pass filter is applied. The low-pass filter is defined as: \\begin{align*} \\hat{I} =", "reasons for this normalization are (1) the Human Visual System (HVS) is not", "Image Normalization The first step for DIQA is to pre-process the images. The", "dtype=np.float32) -> np.ndarray: krange = np.arange(kernel_size) x, y = np.meshgrid(krange, krange) constant =", "y ** 2) / (2 * sigma ** 2)) return normalize_kernel(kernel) def gaussian_filter(", "math import tensorflow as tf import cv2 import numpy as np from scipy", "image = image.astype(np.float32) image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides]", "image ([np.ndarray]): [description] kernel_size ([int]): [description] sigma ([float]): [description] dtype ([type], optional): [description].", "normalized_image = (new_max - new_min) / (image_max - image_min) * (image - image_min)", "The image is converted into grayscale, and then a low-pass filter is applied.", "tf.cast(image, np.float32) normalized_image = (new_max - new_min) / (image_max - image_min) * (image", "= image[np.newaxis, ...] image = tf.cast(image, tf.float32) image = image.astype(np.float32) image = signal.convolve2d(image,", "[description] dtype ([type], optional): [description]. Defaults to np.float32. strides ([int], optional): [description]. Defaults", "(image - image_min) + new_min return tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel: np.array) -> np.ndarray:", "image_min) * (image - image_min) + new_min return tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel: np.array)", "4), 'The tensor must be of dimension 3 or 4' image = image.astype(np.float32)", "= image.shape shape = shape[:2] if len(image.shape) == 3 else shape[1:3] return shape", "scale) interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype) def", "np.newaxis], mode='same', )[::strides, ::strides] return image.astype(dtype) def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray: shape", "= np.min(image), np.max(image) image = tf.cast(image, np.float32) normalized_image = (new_max - new_min) /", "* sigma**2) * np.math.exp(-(x ** 2 + y ** 2) / (2 *", "cv2.resize(image, rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype) def read_image(filename: str, **kwargs) -> np.ndarray: mode =", "sigma**2) * np.math.exp(-(x ** 2 + y ** 2) / (2 * sigma", "rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype) def read_image(filename: str, **kwargs) -> np.ndarray:", "= image.dtype image = image.astype(np.float32) image_min, image_max = np.min(image), np.max(image) image = tf.cast(image,", "new_min return tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel: np.array) -> np.ndarray: return kernel / np.sum(kernel,", "image: np.ndarray, kernel_size: int, sigma: float, dtype=np.float32, strides: int = 1 ) ->", "normalization are (1) the Human Visual System (HVS) is not sensitive to changes", "the Human Visual System (HVS) is not sensitive to changes in the low-frequency", "np.ndarray: \"\"\" Normalize the input image to a given range set by min", "shape = image_shape(image, np.float32) shape = np.math.ceil(shape * scale) return shape.astype(np.float32) def rescale(image:", "image distortions barely affect the low-frequency component of images. Arguments: image {np.ndarray} --", "new_max ([type], optional): [description]. Defaults to 255. Returns: [np.ndarray]: Normalized image \"\"\" original_dtype", "= 1 / (2 * math.pi * sigma**2) * np.math.exp(-(x ** 2 +", "def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray: assert len(image.shape) in (3,", "2. Downscale it by a factor of SCALING_FACTOR. 3. Upscale it back to", "return kernel / np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray:", "scale: float, dtype=np.float32, **kwargs) -> np.ndarray: assert len(image.shape) in (3, 4), 'The tensor", "gaussian image kernel TODO: Verify this methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]):", "I^{low} \\end{align*} where the low-frequency image is the result of the following algorithm:", "The low-pass filter is defined as: \\begin{align*} \\hat{I} = I_{gray} - I^{low} \\end{align*}", "optional): [description]. Defaults to 0. new_max ([type], optional): [description]. Defaults to 255. Returns:", "rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low = tf.image.resize(image_low, size=image_shape(image), method=tf.image.ResizeMethod.BICUBIC) return image - tf.cast(image_low, image.dtype)", "= gaussian_filter(image, 16, 7 / 6) image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low =", "image to a given range set by min and max parameter Args: image", "constant = np.round(kernel_size / 2) x -= constant y -= constant kernel =", "to 255. Returns: [np.ndarray]: Normalized image \"\"\" original_dtype = image.dtype image = image.astype(np.float32)", "image_max = np.min(image), np.max(image) image = tf.cast(image, np.float32) normalized_image = (new_max - new_min)", ")[::strides, ::strides] return image.astype(dtype) def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray: shape = image.shape", "image kernel TODO: Verify this methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]): [description]", "TODO: Verify this methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]): [description] kernel_size ([int]):", "strides ([int], optional): [description]. Defaults to 1. Returns: [np.ndarray]: [description] \"\"\" kernel =", "converted into grayscale, and then a low-pass filter is applied. The low-pass filter", "with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]): [description] kernel_size ([int]): [description] sigma ([float]): [description]", "this methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]): [description] kernel_size ([int]): [description] sigma", "in the low-frequency band, and (2) image distortions barely affect the low-frequency component", "original_dtype) def normalize_kernel(kernel: np.array) -> np.ndarray: return kernel / np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size:", "tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel: np.array) -> np.ndarray: return kernel / np.sum(kernel, axis=-1) def", "= gaussian_kernel2d(kernel_size, sigma) if len(image.shape) == 3: image = image[np.newaxis, ...] image =", "as tf import cv2 import numpy as np from scipy import signal def", "constant y -= constant kernel = 1 / (2 * math.pi * sigma**2)", "to 1. Returns: [np.ndarray]: [description] \"\"\" kernel = gaussian_kernel2d(kernel_size, sigma) if len(image.shape) ==", "return normalize_kernel(kernel) def gaussian_filter( image: np.ndarray, kernel_size: int, sigma: float, dtype=np.float32, strides: int", "** 2) / (2 * sigma ** 2)) return normalize_kernel(kernel) def gaussian_filter( image:", "Args: image ([type]): [description] new_min ([type], optional): [description]. Defaults to 0. new_max ([type],", "= tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image, 16, 7 / 6) image_low = rescale(image_low, SCALING_FACTOR,", "image with gaussian image kernel TODO: Verify this methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args:", "[description] kernel_size ([int]): [description] sigma ([float]): [description] dtype ([type], optional): [description]. Defaults to", "original size. The main reasons for this normalization are (1) the Human Visual", "([type], optional): [description]. Defaults to 0. new_max ([type], optional): [description]. Defaults to 255.", "1 ) -> np.ndarray: \"\"\" Apply convolution filter to image with gaussian image", "Apply convolution filter to image with gaussian image kernel TODO: Verify this methos", "([type], optional): [description]. Defaults to np.float32. strides ([int], optional): [description]. Defaults to 1.", "= (new_max - new_min) / (image_max - image_min) * (image - image_min) +", "sigma) if len(image.shape) == 3: image = image[np.newaxis, ...] image = tf.cast(image, tf.float32)", "6) image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low = tf.image.resize(image_low, size=image_shape(image), method=tf.image.ResizeMethod.BICUBIC) return image", "= kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype) def read_image(filename: str,", "band, and (2) image distortions barely affect the low-frequency component of images. Arguments:", "np.ndarray: krange = np.arange(kernel_size) x, y = np.meshgrid(krange, krange) constant = np.round(kernel_size /", "* sigma ** 2)) return normalize_kernel(kernel) def gaussian_filter( image: np.ndarray, kernel_size: int, sigma:", "shape[:2] if len(image.shape) == 3 else shape[1:3] return shape def scale_shape(image: np.ndarray, scale:", "- image_min) + new_min return tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel: np.array) -> np.ndarray: return", "-> np.ndarray: \"\"\" Apply convolution filter to image with gaussian image kernel TODO:", "dtype=np.int32) -> np.ndarray: shape = image.shape shape = shape[:2] if len(image.shape) == 3", "7 / 6) image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low = tf.image.resize(image_low, size=image_shape(image), method=tf.image.ResizeMethod.BICUBIC)", "+ new_min return tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel: np.array) -> np.ndarray: return kernel /", "rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype) def read_image(filename: str, **kwargs) -> np.ndarray: mode = kwargs.pop('mode',", "-> np.ndarray: mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode) def image_preprocess(image: np.ndarray, SCALING_FACTOR=1", "float, dtype=np.float32) -> np.ndarray: krange = np.arange(kernel_size) x, y = np.meshgrid(krange, krange) constant", "= kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode) def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) ->", "return rescaled_image.astype(dtype) def read_image(filename: str, **kwargs) -> np.ndarray: mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return", "optional): [description]. Defaults to np.float32. strides ([int], optional): [description]. Defaults to 1. Returns:", "image_low = gaussian_filter(image, 16, 7 / 6) image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low", "Defaults to 0. new_max ([type], optional): [description]. Defaults to 255. Returns: [np.ndarray]: Normalized", "image.astype(np.float32) image_min, image_max = np.min(image), np.max(image) image = tf.cast(image, np.float32) normalized_image = (new_max", "scale: float): shape = image_shape(image, np.float32) shape = np.math.ceil(shape * scale) return shape.astype(np.float32)", "(2 * sigma ** 2)) return normalize_kernel(kernel) def gaussian_filter( image: np.ndarray, kernel_size: int,", "np.meshgrid(krange, krange) constant = np.round(kernel_size / 2) x -= constant y -= constant", "= 1 ) -> np.ndarray: \"\"\" Apply convolution filter to image with gaussian", "optional): [description]. Defaults to 1. Returns: [np.ndarray]: [description] \"\"\" kernel = gaussian_kernel2d(kernel_size, sigma)", "np.round(kernel_size / 2) x -= constant y -= constant kernel = 1 /", "the grayscale image. 2. Downscale it by a factor of SCALING_FACTOR. 3. Upscale", "Blur the grayscale image. 2. Downscale it by a factor of SCALING_FACTOR. 3.", "from scipy import signal def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray: \"\"\" Normalize", "I_{gray} - I^{low} \\end{align*} where the low-frequency image is the result of the", "gaussian_filter( image: np.ndarray, kernel_size: int, sigma: float, dtype=np.float32, strides: int = 1 )", "image.astype(np.float32) image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides] return image.astype(dtype)", "* scale) return shape.astype(np.float32) def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray:", ":, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides] return image.astype(dtype) def image_shape(image: np.ndarray, dtype=np.int32) ->", "[description]. Defaults to 1. Returns: [np.ndarray]: [description] \"\"\" kernel = gaussian_kernel2d(kernel_size, sigma) if", "= tf.cast(image, tf.float32) image = tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image, 16, 7 / 6)", "strides: int = 1 ) -> np.ndarray: \"\"\" Apply convolution filter to image", "is the result of the following algorithm: 1. Blur the grayscale image. 2.", "-> np.ndarray: krange = np.arange(kernel_size) x, y = np.meshgrid(krange, krange) constant = np.round(kernel_size", "2) x -= constant y -= constant kernel = 1 / (2 *", "where the low-frequency image is the result of the following algorithm: 1. Blur", "the following algorithm: 1. Blur the grayscale image. 2. Downscale it by a", "2)) return normalize_kernel(kernel) def gaussian_filter( image: np.ndarray, kernel_size: int, sigma: float, dtype=np.float32, strides:", "image = image[np.newaxis, ...] image = tf.cast(image, tf.float32) image = image.astype(np.float32) image =", "Verify this methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]): [description] kernel_size ([int]): [description]", "kernel / np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray: krange", "* np.math.exp(-(x ** 2 + y ** 2) / (2 * sigma **", "sigma ** 2)) return normalize_kernel(kernel) def gaussian_filter( image: np.ndarray, kernel_size: int, sigma: float,", "(image_max - image_min) * (image - image_min) + new_min return tf.cast(normalized_image, original_dtype) def", "this normalization are (1) the Human Visual System (HVS) is not sensitive to", "image.astype(np.float32) rescale_size = scale_shape(image, scale) interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image = cv2.resize(image, rescale_size,", "low-frequency band, and (2) image distortions barely affect the low-frequency component of images.", "kernel_size: int, sigma: float, dtype=np.float32, strides: int = 1 ) -> np.ndarray: \"\"\"", "The first step for DIQA is to pre-process the images. The image is", "image_min) + new_min return tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel: np.array) -> np.ndarray: return kernel", "krange) constant = np.round(kernel_size / 2) x -= constant y -= constant kernel", "image is the result of the following algorithm: 1. Blur the grayscale image.", "distortions barely affect the low-frequency component of images. Arguments: image {np.ndarray} -- [description]", "signal def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray: \"\"\" Normalize the input image", "int = 1 ) -> np.ndarray: \"\"\" Apply convolution filter to image with", "return tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel: np.array) -> np.ndarray: return kernel / np.sum(kernel, axis=-1)", "np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray: krange = np.arange(kernel_size)", "1 / (2 * math.pi * sigma**2) * np.math.exp(-(x ** 2 + y", "to changes in the low-frequency band, and (2) image distortions barely affect the", "and (2) image distortions barely affect the low-frequency component of images. Arguments: image", "import tensorflow as tf import cv2 import numpy as np from scipy import", "of dimension 3 or 4' image = image.astype(np.float32) rescale_size = scale_shape(image, scale) interpolation", "= image.astype(np.float32) image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides] return", "new_min ([type], optional): [description]. Defaults to 0. new_max ([type], optional): [description]. Defaults to", "cv2 import numpy as np from scipy import signal def image_normalization(image: np.ndarray, new_min=0,", "image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray: \"\"\" #### Image Normalization The first", "gaussian_filter(image, 16, 7 / 6) image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low = tf.image.resize(image_low,", "-= constant y -= constant kernel = 1 / (2 * math.pi *", "SCALING_FACTOR=1 / 4) -> np.ndarray: \"\"\" #### Image Normalization The first step for", "low-pass filter is defined as: \\begin{align*} \\hat{I} = I_{gray} - I^{low} \\end{align*} where", "mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode) def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4)", "is to pre-process the images. The image is converted into grayscale, and then", "new_max=255) -> np.ndarray: \"\"\" Normalize the input image to a given range set", "into grayscale, and then a low-pass filter is applied. The low-pass filter is", "to np.float32. strides ([int], optional): [description]. Defaults to 1. Returns: [np.ndarray]: [description] \"\"\"", "/ 6) image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low = tf.image.resize(image_low, size=image_shape(image), method=tf.image.ResizeMethod.BICUBIC) return", "= shape[:2] if len(image.shape) == 3 else shape[1:3] return shape def scale_shape(image: np.ndarray,", "Defaults to 255. Returns: [np.ndarray]: Normalized image \"\"\" original_dtype = image.dtype image =", "sigma: float, dtype=np.float32) -> np.ndarray: krange = np.arange(kernel_size) x, y = np.meshgrid(krange, krange)", "affect the low-frequency component of images. Arguments: image {np.ndarray} -- [description] Returns: np.ndarray", "float): shape = image_shape(image, np.float32) shape = np.math.ceil(shape * scale) return shape.astype(np.float32) def", "image.shape shape = shape[:2] if len(image.shape) == 3 else shape[1:3] return shape def", "len(image.shape) in (3, 4), 'The tensor must be of dimension 3 or 4'", "dimension 3 or 4' image = image.astype(np.float32) rescale_size = scale_shape(image, scale) interpolation =", "to 0. new_max ([type], optional): [description]. Defaults to 255. Returns: [np.ndarray]: Normalized image", "2 + y ** 2) / (2 * sigma ** 2)) return normalize_kernel(kernel)", "shape.astype(np.float32) def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray: assert len(image.shape) in", "-- [description] Returns: np.ndarray -- [description] \"\"\" image = tf.cast(image, tf.float32) image =", "DIQA is to pre-process the images. The image is converted into grayscale, and", "\"\"\" image = tf.cast(image, tf.float32) image = tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image, 16, 7", "new_min) / (image_max - image_min) * (image - image_min) + new_min return tf.cast(normalized_image,", "def normalize_kernel(kernel: np.array) -> np.ndarray: return kernel / np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size: int,", "for DIQA is to pre-process the images. The image is converted into grayscale,", "/ (image_max - image_min) * (image - image_min) + new_min return tf.cast(normalized_image, original_dtype)", "interpolation=interpolation) return rescaled_image.astype(dtype) def read_image(filename: str, **kwargs) -> np.ndarray: mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED)", "gaussian_kernel2d(kernel_size, sigma) if len(image.shape) == 3: image = image[np.newaxis, ...] image = tf.cast(image,", "tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]): [description] kernel_size ([int]): [description] sigma ([float]): [description] dtype", "gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray: krange = np.arange(kernel_size) x, y =", "np.ndarray: \"\"\" #### Image Normalization The first step for DIQA is to pre-process", "= tf.cast(image, tf.float32) image = image.astype(np.float32) image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis],", "([int]): [description] sigma ([float]): [description] dtype ([type], optional): [description]. Defaults to np.float32. strides", "grayscale image. 2. Downscale it by a factor of SCALING_FACTOR. 3. Upscale it", "tf.cast(image, tf.float32) image = tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image, 16, 7 / 6) image_low", "= np.arange(kernel_size) x, y = np.meshgrid(krange, krange) constant = np.round(kernel_size / 2) x", "import numpy as np from scipy import signal def image_normalization(image: np.ndarray, new_min=0, new_max=255)", "kernel = 1 / (2 * math.pi * sigma**2) * np.math.exp(-(x ** 2", "following algorithm: 1. Blur the grayscale image. 2. Downscale it by a factor", "def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray: krange = np.arange(kernel_size) x, y", "image is converted into grayscale, and then a low-pass filter is applied. The", "def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray: \"\"\" #### Image Normalization The", "algorithm: 1. Blur the grayscale image. 2. Downscale it by a factor of", "- image_min) * (image - image_min) + new_min return tf.cast(normalized_image, original_dtype) def normalize_kernel(kernel:", "\\end{align*} where the low-frequency image is the result of the following algorithm: 1.", "/ (2 * sigma ** 2)) return normalize_kernel(kernel) def gaussian_filter( image: np.ndarray, kernel_size:", "image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides] return image.astype(dtype) def", "1. Returns: [np.ndarray]: [description] \"\"\" kernel = gaussian_kernel2d(kernel_size, sigma) if len(image.shape) == 3:", "image.astype(dtype) def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray: shape = image.shape shape = shape[:2]", "[description] Returns: np.ndarray -- [description] \"\"\" image = tf.cast(image, tf.float32) image = tf.image.rgb_to_grayscale(image)", "Normalize the input image to a given range set by min and max", "float, dtype=np.float32, strides: int = 1 ) -> np.ndarray: \"\"\" Apply convolution filter", "-> np.ndarray: \"\"\" Normalize the input image to a given range set by", "axis=-1) def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray: krange = np.arange(kernel_size) x,", "- I^{low} \\end{align*} where the low-frequency image is the result of the following", "Defaults to np.float32. strides ([int], optional): [description]. Defaults to 1. Returns: [np.ndarray]: [description]", "np.math.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2))", "and then a low-pass filter is applied. The low-pass filter is defined as:", "Normalized image \"\"\" original_dtype = image.dtype image = image.astype(np.float32) image_min, image_max = np.min(image),", "np.max(image) image = tf.cast(image, np.float32) normalized_image = (new_max - new_min) / (image_max -", "range set by min and max parameter Args: image ([type]): [description] new_min ([type],", "([float]): [description] dtype ([type], optional): [description]. Defaults to np.float32. strides ([int], optional): [description].", "read_image(filename: str, **kwargs) -> np.ndarray: mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode) def", "image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray: \"\"\" Normalize the input image to a", "= np.round(kernel_size / 2) x -= constant y -= constant kernel = 1", "set by min and max parameter Args: image ([type]): [description] new_min ([type], optional):", "images. The image is converted into grayscale, and then a low-pass filter is", "return shape def scale_shape(image: np.ndarray, scale: float): shape = image_shape(image, np.float32) shape =", "input image to a given range set by min and max parameter Args:", "original_dtype = image.dtype image = image.astype(np.float32) image_min, image_max = np.min(image), np.max(image) image =", "a given range set by min and max parameter Args: image ([type]): [description]", "cv2.imread(filename, flags=mode) def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray: \"\"\" #### Image", "image_shape(image, np.float32) shape = np.math.ceil(shape * scale) return shape.astype(np.float32) def rescale(image: np.ndarray, scale:", "-= constant kernel = 1 / (2 * math.pi * sigma**2) * np.math.exp(-(x", "4) -> np.ndarray: \"\"\" #### Image Normalization The first step for DIQA is", "Downscale it by a factor of SCALING_FACTOR. 3. Upscale it back to the", "255. Returns: [np.ndarray]: Normalized image \"\"\" original_dtype = image.dtype image = image.astype(np.float32) image_min,", "Returns: [np.ndarray]: Normalized image \"\"\" original_dtype = image.dtype image = image.astype(np.float32) image_min, image_max", "image = tf.cast(image, np.float32) normalized_image = (new_max - new_min) / (image_max - image_min)", "Visual System (HVS) is not sensitive to changes in the low-frequency band, and", "sigma: float, dtype=np.float32, strides: int = 1 ) -> np.ndarray: \"\"\" Apply convolution", "\\begin{align*} \\hat{I} = I_{gray} - I^{low} \\end{align*} where the low-frequency image is the", "image. 2. Downscale it by a factor of SCALING_FACTOR. 3. Upscale it back", "signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides] return image.astype(dtype) def image_shape(image: np.ndarray,", "mode='same', )[::strides, ::strides] return image.astype(dtype) def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray: shape =", "new_min=0, new_max=255) -> np.ndarray: \"\"\" Normalize the input image to a given range", "image = tf.cast(image, tf.float32) image = image.astype(np.float32) image = signal.convolve2d(image, kernel[:, :, np.newaxis,", "filter to image with gaussian image kernel TODO: Verify this methos with tensorflow", "Human Visual System (HVS) is not sensitive to changes in the low-frequency band,", "= image.astype(np.float32) image_min, image_max = np.min(image), np.max(image) image = tf.cast(image, np.float32) normalized_image =", "([type]): [description] new_min ([type], optional): [description]. Defaults to 0. new_max ([type], optional): [description].", "def read_image(filename: str, **kwargs) -> np.ndarray: mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode)", "main reasons for this normalization are (1) the Human Visual System (HVS) is", "constant kernel = 1 / (2 * math.pi * sigma**2) * np.math.exp(-(x **", "+ y ** 2) / (2 * sigma ** 2)) return normalize_kernel(kernel) def", "the result of the following algorithm: 1. Blur the grayscale image. 2. Downscale", "image = tf.cast(image, tf.float32) image = tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image, 16, 7 /", "= np.meshgrid(krange, krange) constant = np.round(kernel_size / 2) x -= constant y -=", "np.newaxis, np.newaxis], mode='same', )[::strides, ::strides] return image.astype(dtype) def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray:", "applied. The low-pass filter is defined as: \\begin{align*} \\hat{I} = I_{gray} - I^{low}", "'The tensor must be of dimension 3 or 4' image = image.astype(np.float32) rescale_size", "= rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low = tf.image.resize(image_low, size=image_shape(image), method=tf.image.ResizeMethod.BICUBIC) return image - tf.cast(image_low,", "kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode) def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray:", "optional): [description]. Defaults to 255. Returns: [np.ndarray]: Normalized image \"\"\" original_dtype = image.dtype", "np.ndarray: mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode) def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 /", "* math.pi * sigma**2) * np.math.exp(-(x ** 2 + y ** 2) /", "x -= constant y -= constant kernel = 1 / (2 * math.pi", "= tf.cast(image, np.float32) normalized_image = (new_max - new_min) / (image_max - image_min) *", "tf.cast(image, tf.float32) image = image.astype(np.float32) image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same',", "and max parameter Args: image ([type]): [description] new_min ([type], optional): [description]. Defaults to", "(new_max - new_min) / (image_max - image_min) * (image - image_min) + new_min", "back to the original size. The main reasons for this normalization are (1)", "import math import tensorflow as tf import cv2 import numpy as np from", "-> np.ndarray: return kernel / np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32)", "= np.math.ceil(shape * scale) return shape.astype(np.float32) def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs)", "assert len(image.shape) in (3, 4), 'The tensor must be of dimension 3 or", "filter is applied. The low-pass filter is defined as: \\begin{align*} \\hat{I} = I_{gray}", "/ 4) -> np.ndarray: \"\"\" #### Image Normalization The first step for DIQA", "np.float32. strides ([int], optional): [description]. Defaults to 1. Returns: [np.ndarray]: [description] \"\"\" kernel", "def scale_shape(image: np.ndarray, scale: float): shape = image_shape(image, np.float32) shape = np.math.ceil(shape *", "of images. Arguments: image {np.ndarray} -- [description] Returns: np.ndarray -- [description] \"\"\" image", "math.pi * sigma**2) * np.math.exp(-(x ** 2 + y ** 2) / (2", "y -= constant kernel = 1 / (2 * math.pi * sigma**2) *", "rescaled_image.astype(dtype) def read_image(filename: str, **kwargs) -> np.ndarray: mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename,", "of the following algorithm: 1. Blur the grayscale image. 2. Downscale it by", "first step for DIQA is to pre-process the images. The image is converted", "(1) the Human Visual System (HVS) is not sensitive to changes in the", "SCALING_FACTOR. 3. Upscale it back to the original size. The main reasons for", "System (HVS) is not sensitive to changes in the low-frequency band, and (2)", "def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray: shape = image.shape shape = shape[:2] if", "is defined as: \\begin{align*} \\hat{I} = I_{gray} - I^{low} \\end{align*} where the low-frequency", "-- [description] \"\"\" image = tf.cast(image, tf.float32) image = tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image,", "image {np.ndarray} -- [description] Returns: np.ndarray -- [description] \"\"\" image = tf.cast(image, tf.float32)", "must be of dimension 3 or 4' image = image.astype(np.float32) rescale_size = scale_shape(image,", "sigma ([float]): [description] dtype ([type], optional): [description]. Defaults to np.float32. strides ([int], optional):", "step for DIQA is to pre-process the images. The image is converted into", "[description] sigma ([float]): [description] dtype ([type], optional): [description]. Defaults to np.float32. strides ([int],", "/ 2) x -= constant y -= constant kernel = 1 / (2", "\"\"\" Apply convolution filter to image with gaussian image kernel TODO: Verify this", "-> np.ndarray: assert len(image.shape) in (3, 4), 'The tensor must be of dimension", "shape = image.shape shape = shape[:2] if len(image.shape) == 3 else shape[1:3] return", "int, sigma: float, dtype=np.float32, strides: int = 1 ) -> np.ndarray: \"\"\" Apply", "\\hat{I} = I_{gray} - I^{low} \\end{align*} where the low-frequency image is the result", "scale) return shape.astype(np.float32) def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray: assert", "image[np.newaxis, ...] image = tf.cast(image, tf.float32) image = image.astype(np.float32) image = signal.convolve2d(image, kernel[:,", "image \"\"\" original_dtype = image.dtype image = image.astype(np.float32) image_min, image_max = np.min(image), np.max(image)", "tensor must be of dimension 3 or 4' image = image.astype(np.float32) rescale_size =", "numpy as np from scipy import signal def image_normalization(image: np.ndarray, new_min=0, new_max=255) ->", "tf.float32) image = image.astype(np.float32) image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides,", "tensorflow as tf import cv2 import numpy as np from scipy import signal", "def gaussian_filter( image: np.ndarray, kernel_size: int, sigma: float, dtype=np.float32, strides: int = 1", "[description] \"\"\" kernel = gaussian_kernel2d(kernel_size, sigma) if len(image.shape) == 3: image = image[np.newaxis,", "tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image, 16, 7 / 6) image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC)", "rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray: assert len(image.shape) in (3, 4),", "https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]): [description] kernel_size ([int]): [description] sigma ([float]): [description] dtype ([type],", "pre-process the images. The image is converted into grayscale, and then a low-pass", "defined as: \\begin{align*} \\hat{I} = I_{gray} - I^{low} \\end{align*} where the low-frequency image", "-> np.ndarray: shape = image.shape shape = shape[:2] if len(image.shape) == 3 else", "a factor of SCALING_FACTOR. 3. Upscale it back to the original size. The", "else shape[1:3] return shape def scale_shape(image: np.ndarray, scale: float): shape = image_shape(image, np.float32)", "as: \\begin{align*} \\hat{I} = I_{gray} - I^{low} \\end{align*} where the low-frequency image is", "Normalization The first step for DIQA is to pre-process the images. The image", "float, dtype=np.float32, **kwargs) -> np.ndarray: assert len(image.shape) in (3, 4), 'The tensor must", "Args: image ([np.ndarray]): [description] kernel_size ([int]): [description] sigma ([float]): [description] dtype ([type], optional):", "it back to the original size. The main reasons for this normalization are", "size. The main reasons for this normalization are (1) the Human Visual System", "is applied. The low-pass filter is defined as: \\begin{align*} \\hat{I} = I_{gray} -", "shape = shape[:2] if len(image.shape) == 3 else shape[1:3] return shape def scale_shape(image:", "to the original size. The main reasons for this normalization are (1) the", "np.ndarray, scale: float): shape = image_shape(image, np.float32) shape = np.math.ceil(shape * scale) return", "- new_min) / (image_max - image_min) * (image - image_min) + new_min return", "image.dtype image = image.astype(np.float32) image_min, image_max = np.min(image), np.max(image) image = tf.cast(image, np.float32)", "**kwargs) -> np.ndarray: mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode) def image_preprocess(image: np.ndarray,", "component of images. Arguments: image {np.ndarray} -- [description] Returns: np.ndarray -- [description] \"\"\"", "[description] new_min ([type], optional): [description]. Defaults to 0. new_max ([type], optional): [description]. Defaults", "= signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides] return image.astype(dtype) def image_shape(image:", "3 or 4' image = image.astype(np.float32) rescale_size = scale_shape(image, scale) interpolation = kwargs.pop('interpolation',", "kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype) def read_image(filename: str, **kwargs)", "kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides] return image.astype(dtype) def image_shape(image: np.ndarray, dtype=np.int32)", "to image with gaussian image kernel TODO: Verify this methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy", "image = image.astype(np.float32) image_min, image_max = np.min(image), np.max(image) image = tf.cast(image, np.float32) normalized_image", "barely affect the low-frequency component of images. Arguments: image {np.ndarray} -- [description] Returns:", "is converted into grayscale, and then a low-pass filter is applied. The low-pass", "(3, 4), 'The tensor must be of dimension 3 or 4' image =", ") -> np.ndarray: \"\"\" Apply convolution filter to image with gaussian image kernel", "\"\"\" original_dtype = image.dtype image = image.astype(np.float32) image_min, image_max = np.min(image), np.max(image) image", "not sensitive to changes in the low-frequency band, and (2) image distortions barely", "the images. The image is converted into grayscale, and then a low-pass filter", "image_min, image_max = np.min(image), np.max(image) image = tf.cast(image, np.float32) normalized_image = (new_max -", "np.ndarray -- [description] \"\"\" image = tf.cast(image, tf.float32) image = tf.image.rgb_to_grayscale(image) image_low =", "a low-pass filter is applied. The low-pass filter is defined as: \\begin{align*} \\hat{I}", "changes in the low-frequency band, and (2) image distortions barely affect the low-frequency", "kernel_size ([int]): [description] sigma ([float]): [description] dtype ([type], optional): [description]. Defaults to np.float32.", "dtype ([type], optional): [description]. Defaults to np.float32. strides ([int], optional): [description]. Defaults to", "y = np.meshgrid(krange, krange) constant = np.round(kernel_size / 2) x -= constant y", "/ np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray: krange =", "krange = np.arange(kernel_size) x, y = np.meshgrid(krange, krange) constant = np.round(kernel_size / 2)", "np.min(image), np.max(image) image = tf.cast(image, np.float32) normalized_image = (new_max - new_min) / (image_max", "then a low-pass filter is applied. The low-pass filter is defined as: \\begin{align*}", "result of the following algorithm: 1. Blur the grayscale image. 2. Downscale it", "grayscale, and then a low-pass filter is applied. The low-pass filter is defined", "(2) image distortions barely affect the low-frequency component of images. Arguments: image {np.ndarray}", "max parameter Args: image ([type]): [description] new_min ([type], optional): [description]. Defaults to 0.", "np.math.ceil(shape * scale) return shape.astype(np.float32) def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) ->", "rescale_size = scale_shape(image, scale) interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation)", "factor of SCALING_FACTOR. 3. Upscale it back to the original size. The main", "** 2 + y ** 2) / (2 * sigma ** 2)) return", "normalize_kernel(kernel) def gaussian_filter( image: np.ndarray, kernel_size: int, sigma: float, dtype=np.float32, strides: int =", "cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode) def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray: \"\"\"", "import signal def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray: \"\"\" Normalize the input", "int, sigma: float, dtype=np.float32) -> np.ndarray: krange = np.arange(kernel_size) x, y = np.meshgrid(krange,", "image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC) image_low = tf.image.resize(image_low, size=image_shape(image), method=tf.image.ResizeMethod.BICUBIC) return image -", "0. new_max ([type], optional): [description]. Defaults to 255. Returns: [np.ndarray]: Normalized image \"\"\"", "= image.astype(np.float32) rescale_size = scale_shape(image, scale) interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image = cv2.resize(image,", "np.ndarray, dtype=np.int32) -> np.ndarray: shape = image.shape shape = shape[:2] if len(image.shape) ==", "[description]. Defaults to 0. new_max ([type], optional): [description]. Defaults to 255. Returns: [np.ndarray]:", "2) / (2 * sigma ** 2)) return normalize_kernel(kernel) def gaussian_filter( image: np.ndarray,", "dtype=np.float32, **kwargs) -> np.ndarray: assert len(image.shape) in (3, 4), 'The tensor must be", "if len(image.shape) == 3 else shape[1:3] return shape def scale_shape(image: np.ndarray, scale: float):", "(HVS) is not sensitive to changes in the low-frequency band, and (2) image", "scale_shape(image, scale) interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype)", "image = image.astype(np.float32) rescale_size = scale_shape(image, scale) interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image =", "Defaults to 1. Returns: [np.ndarray]: [description] \"\"\" kernel = gaussian_kernel2d(kernel_size, sigma) if len(image.shape)", "([int], optional): [description]. Defaults to 1. Returns: [np.ndarray]: [description] \"\"\" kernel = gaussian_kernel2d(kernel_size,", "sensitive to changes in the low-frequency band, and (2) image distortions barely affect", "(2 * math.pi * sigma**2) * np.math.exp(-(x ** 2 + y ** 2)", "np.ndarray, kernel_size: int, sigma: float, dtype=np.float32, strides: int = 1 ) -> np.ndarray:", "#### Image Normalization The first step for DIQA is to pre-process the images.", "are (1) the Human Visual System (HVS) is not sensitive to changes in", "is not sensitive to changes in the low-frequency band, and (2) image distortions", "flags=mode) def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray: \"\"\" #### Image Normalization", "it by a factor of SCALING_FACTOR. 3. Upscale it back to the original", "shape = np.math.ceil(shape * scale) return shape.astype(np.float32) def rescale(image: np.ndarray, scale: float, dtype=np.float32,", "if len(image.shape) == 3: image = image[np.newaxis, ...] image = tf.cast(image, tf.float32) image", "np.ndarray: shape = image.shape shape = shape[:2] if len(image.shape) == 3 else shape[1:3]", "to a given range set by min and max parameter Args: image ([type]):", "dtype=np.float32, strides: int = 1 ) -> np.ndarray: \"\"\" Apply convolution filter to", "-> np.ndarray: \"\"\" #### Image Normalization The first step for DIQA is to", "3: image = image[np.newaxis, ...] image = tf.cast(image, tf.float32) image = image.astype(np.float32) image", "[description] \"\"\" image = tf.cast(image, tf.float32) image = tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image, 16,", "{np.ndarray} -- [description] Returns: np.ndarray -- [description] \"\"\" image = tf.cast(image, tf.float32) image", "parameter Args: image ([type]): [description] new_min ([type], optional): [description]. Defaults to 0. new_max", "= scale_shape(image, scale) interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation) return", "the input image to a given range set by min and max parameter", "= I_{gray} - I^{low} \\end{align*} where the low-frequency image is the result of", "kernel TODO: Verify this methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image ([np.ndarray]): [description] kernel_size", "4' image = image.astype(np.float32) rescale_size = scale_shape(image, scale) interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image", "import cv2 import numpy as np from scipy import signal def image_normalization(image: np.ndarray,", "/ (2 * math.pi * sigma**2) * np.math.exp(-(x ** 2 + y **", "return cv2.imread(filename, flags=mode) def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray: \"\"\" ####", "== 3 else shape[1:3] return shape def scale_shape(image: np.ndarray, scale: float): shape =", "the original size. The main reasons for this normalization are (1) the Human", "for this normalization are (1) the Human Visual System (HVS) is not sensitive", "np.arange(kernel_size) x, y = np.meshgrid(krange, krange) constant = np.round(kernel_size / 2) x -=", "np.ndarray, new_min=0, new_max=255) -> np.ndarray: \"\"\" Normalize the input image to a given", "[np.ndarray]: [description] \"\"\" kernel = gaussian_kernel2d(kernel_size, sigma) if len(image.shape) == 3: image =", "given range set by min and max parameter Args: image ([type]): [description] new_min", "scipy import signal def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray: \"\"\" Normalize the", "def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray: \"\"\" Normalize the input image to", "image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray: shape = image.shape shape = shape[:2] if len(image.shape)", "\"\"\" kernel = gaussian_kernel2d(kernel_size, sigma) if len(image.shape) == 3: image = image[np.newaxis, ...]", "in (3, 4), 'The tensor must be of dimension 3 or 4' image", "normalize_kernel(kernel: np.array) -> np.ndarray: return kernel / np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size: int, sigma:", "low-frequency component of images. Arguments: image {np.ndarray} -- [description] Returns: np.ndarray -- [description]", "filter is defined as: \\begin{align*} \\hat{I} = I_{gray} - I^{low} \\end{align*} where the", "x, y = np.meshgrid(krange, krange) constant = np.round(kernel_size / 2) x -= constant", "([type], optional): [description]. Defaults to 255. Returns: [np.ndarray]: Normalized image \"\"\" original_dtype =", "with gaussian image kernel TODO: Verify this methos with tensorflow https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy Args: image", "str, **kwargs) -> np.ndarray: mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED) return cv2.imread(filename, flags=mode) def image_preprocess(image:", "Returns: [np.ndarray]: [description] \"\"\" kernel = gaussian_kernel2d(kernel_size, sigma) if len(image.shape) == 3: image", "of SCALING_FACTOR. 3. Upscale it back to the original size. The main reasons", "np.ndarray: return kernel / np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) ->", "= image_shape(image, np.float32) shape = np.math.ceil(shape * scale) return shape.astype(np.float32) def rescale(image: np.ndarray,", "be of dimension 3 or 4' image = image.astype(np.float32) rescale_size = scale_shape(image, scale)", "scale_shape(image: np.ndarray, scale: float): shape = image_shape(image, np.float32) shape = np.math.ceil(shape * scale)", "interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC) rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype) def read_image(filename:", "= cv2.resize(image, rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype) def read_image(filename: str, **kwargs) -> np.ndarray: mode", "min and max parameter Args: image ([type]): [description] new_min ([type], optional): [description]. Defaults", "return image.astype(dtype) def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray: shape = image.shape shape =", "np.ndarray: assert len(image.shape) in (3, 4), 'The tensor must be of dimension 3", "return shape.astype(np.float32) def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray: assert len(image.shape)", "np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray: assert len(image.shape) in (3, 4), 'The", "by a factor of SCALING_FACTOR. 3. Upscale it back to the original size.", "3. Upscale it back to the original size. The main reasons for this", "...] image = tf.cast(image, tf.float32) image = image.astype(np.float32) image = signal.convolve2d(image, kernel[:, :,", "np.array) -> np.ndarray: return kernel / np.sum(kernel, axis=-1) def gaussian_kernel2d(kernel_size: int, sigma: float,", "np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray: \"\"\" #### Image Normalization The first step", "tf.float32) image = tf.image.rgb_to_grayscale(image) image_low = gaussian_filter(image, 16, 7 / 6) image_low =", "[description]. Defaults to np.float32. strides ([int], optional): [description]. Defaults to 1. Returns: [np.ndarray]:", "\"\"\" #### Image Normalization The first step for DIQA is to pre-process the", "**kwargs) -> np.ndarray: assert len(image.shape) in (3, 4), 'The tensor must be of", "as np from scipy import signal def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray:", "len(image.shape) == 3 else shape[1:3] return shape def scale_shape(image: np.ndarray, scale: float): shape", "convolution filter to image with gaussian image kernel TODO: Verify this methos with", "np.float32) shape = np.math.ceil(shape * scale) return shape.astype(np.float32) def rescale(image: np.ndarray, scale: float,", "The main reasons for this normalization are (1) the Human Visual System (HVS)", "cv2.INTER_CUBIC) rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation) return rescaled_image.astype(dtype) def read_image(filename: str, **kwargs) ->", "Upscale it back to the original size. The main reasons for this normalization" ]
[ "print('oct_val的值为:', oct_val) # 在数值中使用下画线 one_million = 1_000_000 print(one_million) price = 234_234_234 # price实际的值为234234234", "# 以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111 print('bin_val的值为:', bin_val) bin_val = 0B101 print('bin_val的值为:', bin_val) #", "is protected by copyright laws. # # # # Program Name: # #", "Copyright (C), 2001-2018, yeeku.H.Lee # # # # This program is protected by", "# # author yeeku.H.lee <EMAIL> # # # # version 1.0 # #", "# version 1.0 # # # # Copyright (C), 2001-2018, yeeku.H.Lee # #", "# <br>Date: # ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13 hex_value2 = 0XaF print(\"hexValue1的值为:\",", "hex_value2 = 0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111 print('bin_val的值为:',", "bin_val = 0b111 print('bin_val的值为:', bin_val) bin_val = 0B101 print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val", "网站: <a href=\"http://www.crazyit.org\">疯狂Java联盟</a> # # author yeeku.H.lee <EMAIL> # # # # version", "# 网站: <a href=\"http://www.crazyit.org\">疯狂Java联盟</a> # # author yeeku.H.lee <EMAIL> # # # #", "by copyright laws. # # # # Program Name: # # # #", "# Copyright (C), 2001-2018, yeeku.H.Lee # # # # This program is protected", "yeeku.H.lee <EMAIL> # # # # version 1.0 # # # # Copyright", "= 0B101 print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54 print('oct_val的值为:', oct_val) oct_val =", "######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13 hex_value2 = 0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2)", "= 0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111 print('bin_val的值为:', bin_val)", "href=\"http://www.crazyit.org\">疯狂Java联盟</a> # # author yeeku.H.lee <EMAIL> # # # # version 1.0 #", "print('bin_val的值为:', bin_val) bin_val = 0B101 print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54 print('oct_val的值为:',", "# 在数值中使用下画线 one_million = 1_000_000 print(one_million) price = 234_234_234 # price实际的值为234234234 android =", "0O17 print('oct_val的值为:', oct_val) # 在数值中使用下画线 one_million = 1_000_000 print(one_million) price = 234_234_234 #", "1.0 # # # # Copyright (C), 2001-2018, yeeku.H.Lee # # # #", "print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111 print('bin_val的值为:', bin_val) bin_val =", "protected by copyright laws. # # # # Program Name: # # #", "= 0x13 hex_value2 = 0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val =", "<a href=\"http://www.crazyit.org\">疯狂Java联盟</a> # # author yeeku.H.lee <EMAIL> # # # # version 1.0", "laws. # # # # Program Name: # # # # <br>Date: #", "hex_value1) print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111 print('bin_val的值为:', bin_val) bin_val = 0B101", "# # # # version 1.0 # # # # Copyright (C), 2001-2018,", "print('oct_val的值为:', oct_val) oct_val = 0O17 print('oct_val的值为:', oct_val) # 在数值中使用下画线 one_million = 1_000_000 print(one_million)", "Name: # # # # <br>Date: # ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13", "hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111 print('bin_val的值为:', bin_val) bin_val = 0B101 print('bin_val的值为:', bin_val)", "print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54 print('oct_val的值为:', oct_val) oct_val = 0O17 print('oct_val的值为:',", "= 0b111 print('bin_val的值为:', bin_val) bin_val = 0B101 print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val =", "# # # # <br>Date: # ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13 hex_value2", "# # # # Program Name: # # # # <br>Date: # #########################################################################", "# # # Copyright (C), 2001-2018, yeeku.H.Lee # # # # This program", "Program Name: # # # # <br>Date: # ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1 =", "= 0O17 print('oct_val的值为:', oct_val) # 在数值中使用下画线 one_million = 1_000_000 print(one_million) price = 234_234_234", "program is protected by copyright laws. # # # # Program Name: #", "# # # This program is protected by copyright laws. # # #", "oct_val = 0O17 print('oct_val的值为:', oct_val) # 在数值中使用下画线 one_million = 1_000_000 print(one_million) price =", "This program is protected by copyright laws. # # # # Program Name:", "# Program Name: # # # # <br>Date: # ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1", "oct_val) oct_val = 0O17 print('oct_val的值为:', oct_val) # 在数值中使用下画线 one_million = 1_000_000 print(one_million) price", "utf-8 ######################################################################### # 网站: <a href=\"http://www.crazyit.org\">疯狂Java联盟</a> # # author yeeku.H.lee <EMAIL> # #", "# # # # This program is protected by copyright laws. # #", "coding: utf-8 ######################################################################### # 网站: <a href=\"http://www.crazyit.org\">疯狂Java联盟</a> # # author yeeku.H.lee <EMAIL> #", "# # # # Copyright (C), 2001-2018, yeeku.H.Lee # # # # This", "以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54 print('oct_val的值为:', oct_val) oct_val = 0O17 print('oct_val的值为:', oct_val) # 在数值中使用下画线", "<EMAIL> # # # # version 1.0 # # # # Copyright (C),", "# # # Program Name: # # # # <br>Date: # ######################################################################### #", "<reponame>silence0201/Learn-Python # coding: utf-8 ######################################################################### # 网站: <a href=\"http://www.crazyit.org\">疯狂Java联盟</a> # # author yeeku.H.lee", "0b111 print('bin_val的值为:', bin_val) bin_val = 0B101 print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54", "# 以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54 print('oct_val的值为:', oct_val) oct_val = 0O17 print('oct_val的值为:', oct_val) #", "在数值中使用下画线 one_million = 1_000_000 print(one_million) price = 234_234_234 # price实际的值为234234234 android = 1234_1234", "# author yeeku.H.lee <EMAIL> # # # # version 1.0 # # #", "以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111 print('bin_val的值为:', bin_val) bin_val = 0B101 print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数", "version 1.0 # # # # Copyright (C), 2001-2018, yeeku.H.Lee # # #", "# # <br>Date: # ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13 hex_value2 = 0XaF", "0B101 print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54 print('oct_val的值为:', oct_val) oct_val = 0O17", "yeeku.H.Lee # # # # This program is protected by copyright laws. #", "0o54 print('oct_val的值为:', oct_val) oct_val = 0O17 print('oct_val的值为:', oct_val) # 在数值中使用下画线 one_million = 1_000_000", "print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111 print('bin_val的值为:', bin_val) bin_val = 0B101 print('bin_val的值为:',", "以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13 hex_value2 = 0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数", "# 以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13 hex_value2 = 0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2) #", "# # # <br>Date: # ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13 hex_value2 =", "= 0o54 print('oct_val的值为:', oct_val) oct_val = 0O17 print('oct_val的值为:', oct_val) # 在数值中使用下画线 one_million =", "# # # version 1.0 # # # # Copyright (C), 2001-2018, yeeku.H.Lee", "0x13 hex_value2 = 0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111", "hex_value1 = 0x13 hex_value2 = 0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val", "<br>Date: # ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13 hex_value2 = 0XaF print(\"hexValue1的值为:\", hex_value1)", "oct_val = 0o54 print('oct_val的值为:', oct_val) oct_val = 0O17 print('oct_val的值为:', oct_val) # 在数值中使用下画线 one_million", "0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\", hex_value2) # 以0b或0B开头的整数数值是二进制的整数 bin_val = 0b111 print('bin_val的值为:', bin_val) bin_val", "one_million = 1_000_000 print(one_million) price = 234_234_234 # price实际的值为234234234 android = 1234_1234 #", "= 1_000_000 print(one_million) price = 234_234_234 # price实际的值为234234234 android = 1234_1234 # android实际的值为12341234", "# # version 1.0 # # # # Copyright (C), 2001-2018, yeeku.H.Lee #", "2001-2018, yeeku.H.Lee # # # # This program is protected by copyright laws.", "# This program is protected by copyright laws. # # # # Program", "bin_val = 0B101 print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54 print('oct_val的值为:', oct_val) oct_val", "# ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数 hex_value1 = 0x13 hex_value2 = 0XaF print(\"hexValue1的值为:\", hex_value1) print(\"hexValue2的值为:\",", "# # Copyright (C), 2001-2018, yeeku.H.Lee # # # # This program is", "bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54 print('oct_val的值为:', oct_val) oct_val = 0O17 print('oct_val的值为:', oct_val)", "(C), 2001-2018, yeeku.H.Lee # # # # This program is protected by copyright", "author yeeku.H.lee <EMAIL> # # # # version 1.0 # # # #", "# # Program Name: # # # # <br>Date: # ######################################################################### # 以0x或0X开头的整数数值是十六进制的整数", "oct_val) # 在数值中使用下画线 one_million = 1_000_000 print(one_million) price = 234_234_234 # price实际的值为234234234 android", "######################################################################### # 网站: <a href=\"http://www.crazyit.org\">疯狂Java联盟</a> # # author yeeku.H.lee <EMAIL> # # #", "bin_val) bin_val = 0B101 print('bin_val的值为:', bin_val) # 以0o或0O开头的整数数值是二进制的整数 oct_val = 0o54 print('oct_val的值为:', oct_val)", "copyright laws. # # # # Program Name: # # # # <br>Date:", "# # This program is protected by copyright laws. # # # #", "# coding: utf-8 ######################################################################### # 网站: <a href=\"http://www.crazyit.org\">疯狂Java联盟</a> # # author yeeku.H.lee <EMAIL>" ]
[ "ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\" Check grub configuration for syntax error in GRUB_CMDLINE_LINUX", "was detected in GRUB_CMDLINE_LINUX value of grub configuration. ' 'This error is causing", "produces = (Report, GrubConfigError) tags = (ChecksPhaseTag, IPUWorkflowTag) def process(self): error_detected = detect_config_error('/etc/default/grub')", "def process(self): error_detected = detect_config_error('/etc/default/grub') if error_detected: report_generic( title='Syntax error detected in grub", "if error_detected: report_generic( title='Syntax error detected in grub configuration', summary='Syntax error was detected", "import GrubConfigError from leapp.reporting import Report from leapp.libraries.common.reporting import report_generic from leapp.tags import", "booting and other issues. ' 'Error is automatically fixed by add_upgrade_boot_entry actor.', severity='low'", "in GRUB_CMDLINE_LINUX value. \"\"\" name = 'detect_grub_config_error' consumes = () produces = (Report,", "error detected in grub configuration', summary='Syntax error was detected in GRUB_CMDLINE_LINUX value of", "leapp.tags import ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\" Check grub configuration for syntax error", "Report from leapp.libraries.common.reporting import report_generic from leapp.tags import ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\"", "detect_config_error from leapp.models import GrubConfigError from leapp.reporting import Report from leapp.libraries.common.reporting import report_generic", "= (Report, GrubConfigError) tags = (ChecksPhaseTag, IPUWorkflowTag) def process(self): error_detected = detect_config_error('/etc/default/grub') if", "causing booting and other issues. ' 'Error is automatically fixed by add_upgrade_boot_entry actor.',", "process(self): error_detected = detect_config_error('/etc/default/grub') if error_detected: report_generic( title='Syntax error detected in grub configuration',", "GrubConfigError) tags = (ChecksPhaseTag, IPUWorkflowTag) def process(self): error_detected = detect_config_error('/etc/default/grub') if error_detected: report_generic(", "from leapp.models import GrubConfigError from leapp.reporting import Report from leapp.libraries.common.reporting import report_generic from", "GrubConfigError from leapp.reporting import Report from leapp.libraries.common.reporting import report_generic from leapp.tags import ChecksPhaseTag,", "title='Syntax error detected in grub configuration', summary='Syntax error was detected in GRUB_CMDLINE_LINUX value", "configuration', summary='Syntax error was detected in GRUB_CMDLINE_LINUX value of grub configuration. ' 'This", "report_generic( title='Syntax error detected in grub configuration', summary='Syntax error was detected in GRUB_CMDLINE_LINUX", "report_generic from leapp.tags import ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\" Check grub configuration for", "detected in grub configuration', summary='Syntax error was detected in GRUB_CMDLINE_LINUX value of grub", "import report_generic from leapp.tags import ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\" Check grub configuration", "detect_config_error('/etc/default/grub') if error_detected: report_generic( title='Syntax error detected in grub configuration', summary='Syntax error was", "leapp.reporting import Report from leapp.libraries.common.reporting import report_generic from leapp.tags import ChecksPhaseTag, IPUWorkflowTag class", "error is causing booting and other issues. ' 'Error is automatically fixed by", "and other issues. ' 'Error is automatically fixed by add_upgrade_boot_entry actor.', severity='low' )", "name = 'detect_grub_config_error' consumes = () produces = (Report, GrubConfigError) tags = (ChecksPhaseTag,", "\"\"\" name = 'detect_grub_config_error' consumes = () produces = (Report, GrubConfigError) tags =", "import Report from leapp.libraries.common.reporting import report_generic from leapp.tags import ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor):", "consumes = () produces = (Report, GrubConfigError) tags = (ChecksPhaseTag, IPUWorkflowTag) def process(self):", "configuration. ' 'This error is causing booting and other issues. ' 'Error is", "configuration for syntax error in GRUB_CMDLINE_LINUX value. \"\"\" name = 'detect_grub_config_error' consumes =", "from leapp.libraries.common.reporting import report_generic from leapp.tags import ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\" Check", "IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\" Check grub configuration for syntax error in GRUB_CMDLINE_LINUX value.", "grub configuration for syntax error in GRUB_CMDLINE_LINUX value. \"\"\" name = 'detect_grub_config_error' consumes", "import detect_config_error from leapp.models import GrubConfigError from leapp.reporting import Report from leapp.libraries.common.reporting import", "\"\"\" Check grub configuration for syntax error in GRUB_CMDLINE_LINUX value. \"\"\" name =", "value. \"\"\" name = 'detect_grub_config_error' consumes = () produces = (Report, GrubConfigError) tags", "(Report, GrubConfigError) tags = (ChecksPhaseTag, IPUWorkflowTag) def process(self): error_detected = detect_config_error('/etc/default/grub') if error_detected:", "() produces = (Report, GrubConfigError) tags = (ChecksPhaseTag, IPUWorkflowTag) def process(self): error_detected =", "'detect_grub_config_error' consumes = () produces = (Report, GrubConfigError) tags = (ChecksPhaseTag, IPUWorkflowTag) def", "GRUB_CMDLINE_LINUX value of grub configuration. ' 'This error is causing booting and other", "GRUB_CMDLINE_LINUX value. \"\"\" name = 'detect_grub_config_error' consumes = () produces = (Report, GrubConfigError)", "Actor from leapp.libraries.actor.scanner import detect_config_error from leapp.models import GrubConfigError from leapp.reporting import Report", "from leapp.libraries.actor.scanner import detect_config_error from leapp.models import GrubConfigError from leapp.reporting import Report from", "error_detected: report_generic( title='Syntax error detected in grub configuration', summary='Syntax error was detected in", "= () produces = (Report, GrubConfigError) tags = (ChecksPhaseTag, IPUWorkflowTag) def process(self): error_detected", "leapp.actors import Actor from leapp.libraries.actor.scanner import detect_config_error from leapp.models import GrubConfigError from leapp.reporting", "in grub configuration', summary='Syntax error was detected in GRUB_CMDLINE_LINUX value of grub configuration.", "of grub configuration. ' 'This error is causing booting and other issues. '", "grub configuration', summary='Syntax error was detected in GRUB_CMDLINE_LINUX value of grub configuration. '", "import Actor from leapp.libraries.actor.scanner import detect_config_error from leapp.models import GrubConfigError from leapp.reporting import", "(ChecksPhaseTag, IPUWorkflowTag) def process(self): error_detected = detect_config_error('/etc/default/grub') if error_detected: report_generic( title='Syntax error detected", "IPUWorkflowTag) def process(self): error_detected = detect_config_error('/etc/default/grub') if error_detected: report_generic( title='Syntax error detected in", "leapp.libraries.common.reporting import report_generic from leapp.tags import ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\" Check grub", "DetectGrubConfigError(Actor): \"\"\" Check grub configuration for syntax error in GRUB_CMDLINE_LINUX value. \"\"\" name", "<filename>repos/system_upgrade/el7toel8/actors/detectgrubconfigerror/actor.py from leapp.actors import Actor from leapp.libraries.actor.scanner import detect_config_error from leapp.models import GrubConfigError", "= 'detect_grub_config_error' consumes = () produces = (Report, GrubConfigError) tags = (ChecksPhaseTag, IPUWorkflowTag)", "leapp.models import GrubConfigError from leapp.reporting import Report from leapp.libraries.common.reporting import report_generic from leapp.tags", "error in GRUB_CMDLINE_LINUX value. \"\"\" name = 'detect_grub_config_error' consumes = () produces =", "= detect_config_error('/etc/default/grub') if error_detected: report_generic( title='Syntax error detected in grub configuration', summary='Syntax error", "detected in GRUB_CMDLINE_LINUX value of grub configuration. ' 'This error is causing booting", "grub configuration. ' 'This error is causing booting and other issues. ' 'Error", "value of grub configuration. ' 'This error is causing booting and other issues.", "' 'This error is causing booting and other issues. ' 'Error is automatically", "tags = (ChecksPhaseTag, IPUWorkflowTag) def process(self): error_detected = detect_config_error('/etc/default/grub') if error_detected: report_generic( title='Syntax", "is causing booting and other issues. ' 'Error is automatically fixed by add_upgrade_boot_entry", "from leapp.actors import Actor from leapp.libraries.actor.scanner import detect_config_error from leapp.models import GrubConfigError from", "from leapp.reporting import Report from leapp.libraries.common.reporting import report_generic from leapp.tags import ChecksPhaseTag, IPUWorkflowTag", "summary='Syntax error was detected in GRUB_CMDLINE_LINUX value of grub configuration. ' 'This error", "class DetectGrubConfigError(Actor): \"\"\" Check grub configuration for syntax error in GRUB_CMDLINE_LINUX value. \"\"\"", "= (ChecksPhaseTag, IPUWorkflowTag) def process(self): error_detected = detect_config_error('/etc/default/grub') if error_detected: report_generic( title='Syntax error", "'This error is causing booting and other issues. ' 'Error is automatically fixed", "in GRUB_CMDLINE_LINUX value of grub configuration. ' 'This error is causing booting and", "error_detected = detect_config_error('/etc/default/grub') if error_detected: report_generic( title='Syntax error detected in grub configuration', summary='Syntax", "syntax error in GRUB_CMDLINE_LINUX value. \"\"\" name = 'detect_grub_config_error' consumes = () produces", "for syntax error in GRUB_CMDLINE_LINUX value. \"\"\" name = 'detect_grub_config_error' consumes = ()", "error was detected in GRUB_CMDLINE_LINUX value of grub configuration. ' 'This error is", "import ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\" Check grub configuration for syntax error in", "Check grub configuration for syntax error in GRUB_CMDLINE_LINUX value. \"\"\" name = 'detect_grub_config_error'", "from leapp.tags import ChecksPhaseTag, IPUWorkflowTag class DetectGrubConfigError(Actor): \"\"\" Check grub configuration for syntax", "other issues. ' 'Error is automatically fixed by add_upgrade_boot_entry actor.', severity='low' ) self.produce(GrubConfigError(error_detected=error_detected))", "leapp.libraries.actor.scanner import detect_config_error from leapp.models import GrubConfigError from leapp.reporting import Report from leapp.libraries.common.reporting" ]
[ "_ search = {'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'} search_advanced = {'text': _(u'advanced", "'view': 'search_advanced', 'famfam': 'zoom_in'} search_again = {'text': _(u'search again'), 'view': 'search_again', 'famfam': 'arrow_undo'}", "ugettext_lazy as _ search = {'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'} search_advanced =", "'famfam': 'zoom'} search_advanced = {'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'} search_again =", "_(u'search'), 'view': 'search', 'famfam': 'zoom'} search_advanced = {'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam':", "'zoom'} search_advanced = {'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'} search_again = {'text':", "django.utils.translation import ugettext_lazy as _ search = {'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'}", "search = {'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'} search_advanced = {'text': _(u'advanced search'),", "= {'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'} search_again = {'text': _(u'search again'),", "'view': 'search', 'famfam': 'zoom'} search_advanced = {'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'}", "_(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'} search_again = {'text': _(u'search again'), 'view': 'search_again',", "search'), 'view': 'search_advanced', 'famfam': 'zoom_in'} search_again = {'text': _(u'search again'), 'view': 'search_again', 'famfam':", "import ugettext_lazy as _ search = {'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'} search_advanced", "{'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'} search_again = {'text': _(u'search again'), 'view':", "{'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'} search_advanced = {'text': _(u'advanced search'), 'view': 'search_advanced',", "from django.utils.translation import ugettext_lazy as _ search = {'text': _(u'search'), 'view': 'search', 'famfam':", "= {'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'} search_advanced = {'text': _(u'advanced search'), 'view':", "as _ search = {'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'} search_advanced = {'text':", "'search', 'famfam': 'zoom'} search_advanced = {'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'} search_again", "search_advanced = {'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'} search_again = {'text': _(u'search" ]
[ "__name__ == \"__main__\": target() # decorator(target)() decorator(p_target) # no ouput print(target) # <function", "print(\"Running target()\") if __name__ == \"__main__\": target() # decorator(target)() decorator(p_target) # no ouput", "if __name__ == \"__main__\": target() # decorator(target)() decorator(p_target) # no ouput print(target) #", "print(\"Running target()\") def p_target(): print(\"Running target()\") if __name__ == \"__main__\": target() # decorator(target)()", "def decorator(func): def inner(): print(\"Running inner()\") return inner @decorator def target(): print(\"Running target()\")", "target()\") def p_target(): print(\"Running target()\") if __name__ == \"__main__\": target() # decorator(target)() decorator(p_target)", "== \"__main__\": target() # decorator(target)() decorator(p_target) # no ouput print(target) # <function decorator.<locals>.inner", "decorator(func): def inner(): print(\"Running inner()\") return inner @decorator def target(): print(\"Running target()\") def", "return inner @decorator def target(): print(\"Running target()\") def p_target(): print(\"Running target()\") if __name__", "p_target(): print(\"Running target()\") if __name__ == \"__main__\": target() # decorator(target)() decorator(p_target) # no", "inner @decorator def target(): print(\"Running target()\") def p_target(): print(\"Running target()\") if __name__ ==", "inner()\") return inner @decorator def target(): print(\"Running target()\") def p_target(): print(\"Running target()\") if", "@decorator def target(): print(\"Running target()\") def p_target(): print(\"Running target()\") if __name__ == \"__main__\":", "def inner(): print(\"Running inner()\") return inner @decorator def target(): print(\"Running target()\") def p_target():", "target(): print(\"Running target()\") def p_target(): print(\"Running target()\") if __name__ == \"__main__\": target() #", "inner(): print(\"Running inner()\") return inner @decorator def target(): print(\"Running target()\") def p_target(): print(\"Running", "def target(): print(\"Running target()\") def p_target(): print(\"Running target()\") if __name__ == \"__main__\": target()", "target()\") if __name__ == \"__main__\": target() # decorator(target)() decorator(p_target) # no ouput print(target)", "\"__main__\": target() # decorator(target)() decorator(p_target) # no ouput print(target) # <function decorator.<locals>.inner at", "def p_target(): print(\"Running target()\") if __name__ == \"__main__\": target() # decorator(target)() decorator(p_target) #", "target() # decorator(target)() decorator(p_target) # no ouput print(target) # <function decorator.<locals>.inner at 0x000001D9A9692DC0>", "print(\"Running inner()\") return inner @decorator def target(): print(\"Running target()\") def p_target(): print(\"Running target()\")" ]
[ "0 while i < len(lines): begin = get_all_add_up() allfile += 1 j =", "if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect the above. Enter 1 if this is", "lambda function case # If only relying on auto-tool: this should be a", "identified case # If only relying on auto-tool: this should be a parallelism-used", "det_para += 1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue # These", "(no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user == '2': det_para +=", "continue # Judge if this is a no use of parallelism case if", "ofd) elif MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No", "elif user == '2': det_para += 1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i =", "print(\"PRESS 1 OR 2, NOT ANYTHING ELSE!\") user = input() if user ==", "and scan_block(lines, i, j, \"missing\") and (not scan_block(lines, i, j, \"Pattern identified\")): print(\"Operation", "input() if user == '1': det_no_para += 1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i", "elif user == '3': proces_exception += 1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i =", "exists code in between start clause and while clause between_code = 0 #", "No use of parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible use of parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING", "case\") user = input() while user != '1' and user != '2': print(\"PRESS", "proces_exception += 1 ofd.write(\"process_exception: {}\".format(lines[k])) i = j continue # Judge if this", "should not be counted towards the total count of projects if \"No retrieve", "i += 1 return False def scan_block_numbers(lines, i, j, keyword): ret = 0", "this is a no parallelism case, and enter 2 if this is a", "and enter 3 if this shouldn't count\") user = input() while user !=", "while i < j: if keyword in lines[i]: ret += 1 i +=", "not manual checking, then just count this as a no parallelism use case", "result\".format(no_retrieve), ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of", "auto-tool: this should be a parallelism-used case if \"Use Lambda Function\" in lines[j", "if this is a use lambda case\") user = input() while user !=", "case, and enter 2 if this is a use-parallelism case, and enter 3", "of parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} NO USE OF PARALELLISM\".format(noparrelism),", "OR 2, NOT ANYTHING ELSE!\") user = input() if user == '1': det_no_para", "1 i += 1 return ret def print_code(i, j, lines): while i <", "numbers equal then need to prompt users: if scan_block_numbers(lines, i, j, \"Nodes in", "repos ofd = open(sys.argv[3], 'w') # All files number allfile = 0 #", "1 if i != j: print(lines[i]) print(\"Please inspect the above. Enter 1 if", "i += 1 return ret def print_code(i, j, lines): while i < j:", "i < j and \"========================\" not in lines[i]: print(lines[i]) i += 1 if", "Judge if there is any other exception triggered if scan_block(lines, i, j, \"EXCEPTION", "{}\".format(lines[k])) noparrelism += 1 break i += 1 i = j ofd.write(\"\\n\\n==================================================================\\n\") if", "if \"***\" in lines[i]: i_copy = i while i_copy < j: if \"BOTH", "identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible use", "in lines[i]: print(lines[i]) i += 1 if i != j: print(lines[i]) print(\"Please inspect", "if this is a no retrieve result case # Such project should not", "needs to prompt users on codes between start and while statement: if scan_block(lines,", "== '2': det_para += 1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd) i = j continue", "- 1]: if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect the above. Enter 1 if", "not be counted towards the total count of projects if \"No use of", "== '-a': MANUAL_CHECKING = False else: print(\"The first argument must be either -m", "in between start statement and while statement\"): # If these two numbers equal", "i_copy += 1 continue if check_safe_list(lines[i_copy]): i_copy += 1 continue if \"operation.done\" in", "'2': det_para += 1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd) i = j continue #", "if \"No retrieve result\" in lines[j - 1]: no_retrieve += 1 ofd.write(\"no_retrieve: {}\".format(lines[k]))", "1 continue if \"operation.done\" in lines[i_copy] or \"operation.result\" in lines[i_copy]: return True return", "possible_para += 1 break i_copy += 1 if i_copy == j: ofd.write(\"no_parallelism: {}\".format(lines[k]))", "missing while it's neither use lambda nor no pattern identified: {}\".format(lines[k])) exit(1) #", "return False lines = ifd.readlines() i = 0 while i < len(lines): begin", "if this is a no parallelism case, and enter 2 if this is", "statement and while statement\") == scan_block_numbers(lines, i, j, \"Pattern identified\"): between_code += 1", "and end of one search snippet k = i + 1 # Judge", "{} NO USE OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} PARALELLISM USED\".format(possible_para", "\"Nodes in between start statement and while statement\") == scan_block_numbers(lines, i, j, \"Pattern", "\"NO PATTERN IDENTIFIED\" in lines[j - 1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd)", "'-a': MANUAL_CHECKING = False else: print(\"The first argument must be either -m or", "ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + det_no_para + det_para + use_lambda),", "print_writeofd(\"RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + det_no_para + det_para +", "AUTO TOOL: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + possible_para + nopattern + det_no_pattern +", "# If only relying on auto-tool: this should be a parallelism-used case if", "\"Nodes in between start statement and while statement\"): # If these two numbers", "exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use of Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{}, No retrieve result\".format(no_retrieve), ofd)", "Use Lambda function use_lambda = 0 # Possible parallelism possible_para = 0 #", "(parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break else: i += 1 while i < j and", "\"taskId = response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for safes in safe_list: if safes in string:", "= 0 # Determined to be no pattern det_no_pattern = 0 def get_all_add_up():", "continue # These are for cases where the repo is actually mis-using the", "is any github exception triggered if scan_block(lines, i, j, \"Other Github Exceptions occurred\"):", "result case # Such project should not be counted towards the total count", "and user != '2': print(\"PRESS 1 OR 2, NOT ANYTHING ELSE!\") user =", "print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use of Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{}, No retrieve", "# No retrieve result files no_retrieve = 0 # Use Lambda function use_lambda", "for cases where the repo is actually mis-using the API elif user ==", "1 ofd.write(\"process_exception: {}\".format(lines[k])) i = j continue # Judge if this is a", "scan_block(lines, i, j, \"operation\") and scan_block(lines, i, j, \"missing\") and (not scan_block(lines, i,", "print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user == '2': use_lambda", "ofd) i = j continue # These are for cases where the repo", "ofd.write(\"github_exception: {}\".format(lines[k])) i = j continue # Judge if there is any other", "if this is a use-parallelism case, and enter 3 if this shouldn't count\")", "user != '2': print(\"PRESS 1 OR 2, NOT ANYTHING ELSE!\") user = input()", "in lines[i]: ret += 1 i += 1 return ret def print_code(i, j,", "if MANUAL_CHECKING: possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i += 1 while i <", "print_writeofd(\"RELYING ON AUTO TOOL: {} NO USE OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON AUTO", "1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para += 1 break i_copy += 1 if i_copy ==", "False i_copy += 1 return True i += 1 return False lines =", "\"No retrieve result\" in lines[j - 1]: no_retrieve += 1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i", "safe_list: if safes in string: return True return False def judge_code(i, j, lines):", "+= 1 return True i += 1 return False lines = ifd.readlines() i", "point there shouldn't be any \"operating missing\", sanity check: if scan_block(lines, i, j,", "the above. Enter 1 if this is a no parallelism case, and enter", "j continue else: use_lambda += 1 ofd.write(\"use_lambda: {}\".format(lines[k])) i = j continue #", "user != '1' and user != '2': print(\"PRESS 1 OR 2, NOT ANYTHING", "+ use_lambda), ofd) elif MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL INSPECTION:\", ofd)", "above. Enter 1 if can proceed, and enter 2 if this is a", "No use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github search exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception),", "{}\".format(lines[k])) i = j continue # Judge if this is a no use", "ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github", "enter 3 if this shouldn't count\") user = input() while user != '1'", "+ nopattern + det_no_pattern + use_lambda), ofd) elif MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\", ofd)", "is a use-parallelism case\") user = input() while user != '1' and user", "print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd) i = j continue # If not manual checking,", "cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} NO USE OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING", "string: return True return False def judge_code(i, j, lines): while i < j:", "in lines[i_copy]: return True return False i_copy += 1 return True i +=", "use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github search exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd)", "OCCURS\"): proces_exception += 1 ofd.write(\"process_exception: {}\".format(lines[k])) i = j continue # Judge if", "the total count of projects if \"No use of async\" in lines[j -", "i += 1 while i < j and \"========================\" not in lines[i]: i", "# Determined to be no pattern det_no_pattern = 0 def get_all_add_up(): return no_async", "NOT ANYTHING ELSE!\") user = input() if user == '1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')),", "Lambda function use_lambda = 0 # Possible parallelism possible_para = 0 # Determined", "ret def print_code(i, j, lines): while i < j: if \"Nodes in between", "True i += 1 return False def scan_block_numbers(lines, i, j, keyword): ret =", "the above. Enter 1 if can proceed, and enter 2 if this is", "j continue # Judge if this is a no pattern identified case #", "{}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: nopattern += 1 ofd.write(\"no_pattern: {}\".format(lines[k])) i", "scan_block_numbers(lines, i, j, \"Pattern identified\"): between_code += 1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j,", "doing manual checking if MANUAL_CHECKING: possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i += 1", "case else: if not judge_code(i, j, lines): det_no_pattern += 1 i = j", "this is a no pattern identified case # If only relying on auto-tool:", "no_async += 1 ofd.write(\"no_async: {}\".format(lines[k])) i = j continue # Judge if this", "= j ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING: print_writeofd(\"{}, Total files searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL", "this is a no use of parallelism case if \"No use of parallelism\"", "{}\".format(lines[k])) exit(1) # Check if needs to prompt users on codes between start", "\"=================================================\\n\": j += 1 if j > len(lines): break # Now i and", "i < j: if \"Nodes in between start statement and while statement\" in", "2, NOT ANYTHING ELSE!\") user = input() if user == '1': print_writeofd(\"code_between (proceeds):", "j, keyword): while i < j: if keyword in lines[i]: return True i", "j, keyword): ret = 0 while i < j: if keyword in lines[i]:", "between start statement and while statement\"): # If these two numbers equal then", "to be no pattern det_no_pattern = 0 def get_all_add_up(): return no_async + noparrelism", "continue while i < j: if \"***\" in lines[i]: i_copy = i while", "prompt users on codes between start and while statement: if scan_block(lines, i, j,", "{}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: use_lambda += 1 ofd.write(\"use_lambda: {}\".format(lines[k])) i", "print(\"Operation missing while it's neither use lambda nor no pattern identified: {}\".format(lines[k])) exit(1)", "should be a parallelism-used case if \"NO PATTERN IDENTIFIED\" in lines[j - 1]:", "lines[j - 1]: noparrelism += 1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i = j continue while", "if keyword in lines[i]: ret += 1 i += 1 return ret def", "- 1]: no_retrieve += 1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i = j continue # At", "1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: nopattern += 1", "i_copy = i while \"------\" not in lines[i_copy]: print(lines[i_copy]) i_copy += 1 break", "{}\".format(lines[k])) i = j continue while i < j: if \"***\" in lines[i]:", "no_retrieve += 1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i = j continue # At this point", "print_writeofd(\"RELYING ON MANUAL CHECKING: {} PARALELLISM USED\".format(det_para + use_lambda), ofd) print_writeofd(\"RELYING ON MANUAL", "i while \"------\" not in lines[i_copy]: print(lines[i_copy]) i_copy += 1 break i +=", "def check_safe_list(string): for safes in safe_list: if safes in string: return True return", "github exception triggered if scan_block(lines, i, j, \"Other Github Exceptions occurred\"): github_exception +=", "+ det_no_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} PARALELLISM USED\".format(det_para + use_lambda), ofd)", "input() if user == '1': det_no_para += 1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif", "No retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No", "+= 1 break i_copy += 1 if i_copy == j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism", "+= 1 i += 1 return ret def print_code(i, j, lines): while i", "j continue # Judge if this is a no use of parallelism case", "det_no_pattern = 0 def get_all_add_up(): return no_async + noparrelism + nopattern + github_exception", "(not scan_block(lines, i, j, \"Pattern identified\")): print(\"Operation missing while it's neither use lambda", "1 safe_list = [\"if response:\", \"job_id = response['JobId']\", \"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus':", "is the output file from async_main_google.py ifd = open(sys.argv[2], 'r') # Third argument", "1 break i_copy += 1 if i_copy == j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism +=", "ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + possible_para + nopattern", "response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for safes in safe_list: if safes in string: return True", "USED\".format(det_para + use_lambda), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS\".format(noparrelism +", "missing\", sanity check: if scan_block(lines, i, j, \"operation\") and scan_block(lines, i, j, \"missing\")", "i + 1 while j < len(lines) and lines[j] != \"=================================================\\n\": j +=", "parallelism case if \"No use of parallelism\" in lines[j - 1]: noparrelism +=", "ofd.write(\"use_lambda: {}\".format(lines[k])) i = j continue # Judge if this is a no", "response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for safes in safe_list: if", "github_exception + proces_exception + no_retrieve + use_lambda + possible_para + det_no_pattern def scan_block(lines,", "use lambda function case # If only relying on auto-tool: this should be", "input() if user == '1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif user == '2':", "lines[i]: print(lines[i]) i += 1 if i != j: print(lines[i]) print(\"Please inspect the", "if user == '1': det_no_para += 1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i =", "case, and enter 2 if this is a use-parallelism case\") user = input()", "THE SAME FILE\" in lines[i_copy]: # Only do the following if doing manual", "and while clause between_code = 0 # Determined to be no pattern det_no_pattern", "argument is whether or not to proceed with manual checking: if sys.argv[1] ==", "SAME FILE\" in lines[i_copy]: # Only do the following if doing manual checking", "i_copy == j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism += 1 break i += 1 i", "\"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for safes", "= j continue # Judge if this is a use lambda function case", "not be counted towards the total count of projects if \"No retrieve result\"", "argument is the output file from async_main_google.py ifd = open(sys.argv[2], 'r') # Third", "use of async case # Such project should not be counted towards the", "print_writeofd(\"{}, Use of Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{}, No retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{}, No", "CHECKING: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + det_no_para + det_para + use_lambda), ofd) ofd.close()", "Function\".format(use_lambda), ofd) print_writeofd(\"{}, No retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern),", "there shouldn't be any \"operating missing\", sanity check: if scan_block(lines, i, j, \"operation\")", "this point there shouldn't be any \"operating missing\", sanity check: if scan_block(lines, i,", "exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use of Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{},", "statement: if scan_block(lines, i, j, \"Nodes in between start statement and while statement\"):", "number allfile = 0 # All occurences of found 0 files no_async =", "True return False def judge_code(i, j, lines): while i < j: if \"Nodes", "OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} PARALELLISM USED\".format(possible_para + nopattern +", "towards the total count of projects if \"No use of async\" in lines[j", "print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: use_lambda += 1 ofd.write(\"use_lambda:", "< j: if keyword in lines[i]: return True i += 1 return False", "+= 1 return False def scan_block_numbers(lines, i, j, keyword): ret = 0 while", "print(\"Please inspect the above. Enter 1 if can proceed, and enter 2 if", "break # Now i and j stores the start and end of one", "use_lambda += 1 ofd.write(\"use_lambda: {}\".format(lines[k])) i = j continue # Judge if this", "At this point there shouldn't be any \"operating missing\", sanity check: if scan_block(lines,", "det_para += 1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd) i = j continue # If", "or \"operation.result\" in lines[i_copy]: return True return False i_copy += 1 return True", "retrieve result case # Such project should not be counted towards the total", "noparrelism = 0 # Determined cases of no pattern: nopattern = 0 #", "# These are for cases where the repo is actually mis-using the API", "while i < j and \"========================\" not in lines[i]: i += 1 ofd.write(\"possible_parallelism:", "{'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for safes in safe_list:", "i += 1 safe_list = [\"if response:\", \"job_id = response['JobId']\", \"synthesis_task = {'taskId':", "no_async + noparrelism + nopattern + github_exception + proces_exception + no_retrieve + use_lambda", "if doing manual checking if MANUAL_CHECKING: possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i +=", "1 break i += 1 i = j ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING: print_writeofd(\"{},", "break else: i += 1 while i < j and \"========================\" not in", "INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github search exceptions\".format(github_exception), ofd)", "between start statement and while statement\" in lines[i]: i_copy = i + 1", "if i_copy == j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism += 1 break i += 1", "print_writeofd(\"{}, No use of parallelism\".format(noparrelism + det_no_para), ofd) print_writeofd(\"{}, Use of parallel cases\".format(det_para),", "cases - processing error proces_exception = 0 # No retrieve result files no_retrieve", "open(sys.argv[2], 'r') # Third argument is the output file for a list of", "ofd) i = j continue else: nopattern += 1 ofd.write(\"no_pattern: {}\".format(lines[k])) i =", "\"Nodes in between start statement and while statement\" in lines[i]: i_copy = i", "between_code = 0 # Determined to be no pattern det_no_pattern = 0 def", "begin = get_all_add_up() allfile += 1 j = i + 1 while j", "# Judge if this is a no use of async case # Such", "1 if j > len(lines): break # Now i and j stores the", "if scan_block(lines, i, j, \"Other Github Exceptions occurred\"): github_exception += 1 ofd.write(\"github_exception: {}\".format(lines[k]))", "Third argument is the output file for a list of all repos ofd", "need to prompt users: if scan_block_numbers(lines, i, j, \"Nodes in between start statement", "# Such project should not be counted towards the total count of projects", "j, \"Other Github Exceptions occurred\"): github_exception += 1 ofd.write(\"github_exception: {}\".format(lines[k])) i = j", "in lines[j - 1]: noparrelism += 1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i = j continue", "Determined no parallelism det_no_para = 0 # Determined parallelism det_para = 0 #", "these two numbers equal then need to prompt users: if scan_block_numbers(lines, i, j,", "If only relying on auto-tool: this should be a parallelism-used case if \"NO", "checking, then just count this as a no parallelism use case else: if", "sanity check: if scan_block(lines, i, j, \"operation\") and scan_block(lines, i, j, \"missing\") and", "{} PARALELLISM USED\".format(possible_para + nopattern + det_no_pattern + use_lambda), ofd) print_writeofd(\"RELYING ON AUTO", "on auto-tool: this should be a parallelism-used case if \"Use Lambda Function\" in", "== '-m': MANUAL_CHECKING = True elif sys.argv[1] == '-a': MANUAL_CHECKING = False else:", "+= 1 if i_copy == j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism += 1 break i", "print(\"PRESS 1 OR 2 OR 3, NOT ANYTHING ELSE!\") user = input() if", "statement\"): # If these two numbers equal then need to prompt users: if", "lines[i]: i += 1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para += 1 break i_copy += 1", "j, lines) print(\"Please inspect the above. Enter 1 if can proceed, and enter", "checking: if sys.argv[1] == '-m': MANUAL_CHECKING = True elif sys.argv[1] == '-a': MANUAL_CHECKING", "retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use", "+= 1 return ret def print_code(i, j, lines): while i < j: if", "output file for a list of all repos ofd = open(sys.argv[3], 'w') #", "ofd) print_writeofd(\"{}, No retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern), ofd)", "if can proceed, and enter 2 if this is a use_parallelism case\") user", "sys import re from utils.utils import print_writeofd # First argument is whether or", "False def judge_code(i, j, lines): while i < j: if \"Nodes in between", "cases where the repo is actually mis-using the API elif user == '3':", "MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect the above. Enter 1 if this is a", "either -m or -a, see README.md for details\") exit(1) # Second argument is", "this is a use-parallelism case\") user = input() while user != '1' and", "lines[j - 1]: if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect the above. Enter 1", "while i < j: if keyword in lines[i]: return True i += 1", "0 # Determined cases of no pattern: nopattern = 0 # Number of", "+= 1 continue if check_safe_list(lines[i_copy]): i_copy += 1 continue if \"operation.done\" in lines[i_copy]", "+= 1 if i != j: print(lines[i]) print(\"Please inspect the above. Enter 1", "\"------\" not in lines[i_copy]: print(lines[i_copy]) i_copy += 1 break i += 1 safe_list", "between start statement and while statement\" in lines[i]: i_copy = i while \"------\"", "USE OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} PARALELLISM USED\".format(possible_para + nopattern", "j += 1 if j > len(lines): break # Now i and j", "1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: use_lambda += 1", "occurred\"): github_exception += 1 ofd.write(\"github_exception: {}\".format(lines[k])) i = j continue # Judge if", "Function\" in lines[j - 1]: if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect the above.", "a no parallelism use case else: if not judge_code(i, j, lines): det_no_pattern +=", "start clause and while clause between_code = 0 # Determined to be no", "Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use of Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{}, No retrieve result\".format(no_retrieve),", "ofd.write(\"no_retrieve: {}\".format(lines[k])) i = j continue # At this point there shouldn't be", "utils.utils import print_writeofd # First argument is whether or not to proceed with", "while statement\") == scan_block_numbers(lines, i, j, \"Pattern identified\"): between_code += 1 if MANUAL_CHECKING:", "use-parallelism case, and enter 3 if this shouldn't count\") user = input() while", "in string: return True return False def judge_code(i, j, lines): while i <", "!= j: print(lines[i]) print(\"Please inspect the above. Enter 1 if this is a", "{}\".format(lines[k])) i = j continue # Judge if there is any other exception", "1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd) i = j continue # If not manual", "while statement: if scan_block(lines, i, j, \"Nodes in between start statement and while", "for safes in safe_list: if safes in string: return True return False def", "no parallelism det_no_para = 0 # Determined parallelism det_para = 0 # There", "det_no_para = 0 # Determined parallelism det_para = 0 # There exists code", "+= 1 while i < j and \"========================\" not in lines[i]: i +=", "case # If only relying on auto-tool: this should be a parallelism-used case", "IDENTIFIED\" in lines[j - 1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect", "print_writeofd(\"{}, Github search exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use of Lambda", "+= 1 i = j continue # Judge if this is a no", "parallelism\" in lines[j - 1]: noparrelism += 1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i = j", "of async case # Such project should not be counted towards the total", "in lines[i]: i_copy = i while i_copy < j: if \"BOTH IDENTIFIED IN", "\"'taskStatus': 'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for safes in safe_list: if safes", "MANUAL_CHECKING = False else: print(\"The first argument must be either -m or -a,", "3 if this shouldn't count\") user = input() while user != '1' and", "the output file for a list of all repos ofd = open(sys.argv[3], 'w')", "of Async\".format(no_async), ofd) print_writeofd(\"{}, Github search exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{},", "< j: if \"Nodes in between start statement and while statement\" in lines[i]:", "'2': print(\"PRESS 1 OR 2, NOT ANYTHING ELSE!\") user = input() if user", "+= 1 break i += 1 i = j ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING:", "+= 1 break i += 1 safe_list = [\"if response:\", \"job_id = response['JobId']\",", "print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break else: i += 1 while i < j", "No use of parallelism\".format(noparrelism + det_no_para), ofd) print_writeofd(\"{}, Use of parallel cases\".format(det_para), ofd)", "continue elif user == '2': use_lambda += 1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i", "projects if \"No retrieve result\" in lines[j - 1]: no_retrieve += 1 ofd.write(\"no_retrieve:", "OR 2, NOT ANYTHING ELSE!\") user = input() if user == '1': print_writeofd(\"code_between", "j, \"Nodes in between start statement and while statement\") == scan_block_numbers(lines, i, j,", "'1' and user != '2' and user != '3': print(\"PRESS 1 OR 2", "possible_para = 0 # Determined no parallelism det_no_para = 0 # Determined parallelism", "identified\"): between_code += 1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines) print(\"Please inspect the", "be a parallelism-used case if \"Use Lambda Function\" in lines[j - 1]: if", "ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING: print_writeofd(\"{}, Total files searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd)", "details\") exit(1) # Second argument is the output file from async_main_google.py ifd =", "i_copy += 1 continue if \"operation.done\" in lines[i_copy] or \"operation.result\" in lines[i_copy]: return", "\"missing\") and (not scan_block(lines, i, j, \"Pattern identified\")): print(\"Operation missing while it's neither", "j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism += 1 break i += 1 i = j", "i_copy = i while i_copy < j: if \"BOTH IDENTIFIED IN THE SAME", "\"Pattern identified\"): between_code += 1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines) print(\"Please inspect", "parallelism use case else: if not judge_code(i, j, lines): det_no_pattern += 1 i", "# Determined no parallelism det_no_para = 0 # Determined parallelism det_para = 0", "proces_exception += 1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: nopattern", "MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + det_no_para + det_para + use_lambda), ofd)", "continue # If not manual checking, then just count this as a no", "+ use_lambda + possible_para + det_no_pattern def scan_block(lines, i, j, keyword): while i", "repo is actually mis-using the API elif user == '3': proces_exception += 1", "det_para += 1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break else: i += 1 while", "j and \"========================\" not in lines[i]: i += 1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para +=", "= input() while user != '1' and user != '2': print(\"PRESS 1 OR", "ofd) elif user == '2': det_para += 1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break", "j: if \"BOTH IDENTIFIED IN THE SAME FILE\" in lines[i_copy]: # Only do", "1 return False lines = ifd.readlines() i = 0 while i < len(lines):", "!= '2' and user != '3': print(\"PRESS 1 OR 2 OR 3, NOT", "i = j continue # Judge if this is a no pattern identified", "statement\" in lines[i]: i_copy = i while \"------\" not in lines[i_copy]: print(lines[i_copy]) i_copy", "if not judge_code(i, j, lines): det_no_pattern += 1 i = j continue #", "use lambda case\") user = input() while user != '1' and user !=", "cases - repo no longer exist github_exception = 0 # Number of exception", "github_exception += 1 ofd.write(\"github_exception: {}\".format(lines[k])) i = j continue # Judge if there", "= 0 # All occurences of found 0 files no_async = 0 #", "or not to proceed with manual checking: if sys.argv[1] == '-m': MANUAL_CHECKING =", "a no parallelism case, and enter 2 if this is a use-parallelism case,", "any \"operating missing\", sanity check: if scan_block(lines, i, j, \"operation\") and scan_block(lines, i,", "{}\".format(lines[k])) i = j continue # Judge if this is a no retrieve", "be no pattern det_no_pattern = 0 def get_all_add_up(): return no_async + noparrelism +", "are for cases where the repo is actually mis-using the API elif user", "j stores the start and end of one search snippet k = i", "a no use of async case # Such project should not be counted", "i = j ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING: print_writeofd(\"{}, Total files searched\".format(allfile), ofd) print_writeofd(\"BEFORE", "+ possible_para + nopattern + det_no_pattern + use_lambda), ofd) elif MANUAL_CHECKING: print_writeofd(\"\", ofd)", "j, \"Nodes in between start statement and while statement\"): # If these two", "result files no_retrieve = 0 # Use Lambda function use_lambda = 0 #", "print_writeofd(\"RELYING ON MANUAL CHECKING: {} NO USE OF PARALELLISM\".format(noparrelism + det_no_para), ofd) print_writeofd(\"RELYING", "= j continue # At this point there shouldn't be any \"operating missing\",", "auto-tool: this should be a parallelism-used case if \"NO PATTERN IDENTIFIED\" in lines[j", "!= '2': print(\"PRESS 1 OR 2, NOT ANYTHING ELSE!\") user = input() if", "= i while i_copy < j: if \"BOTH IDENTIFIED IN THE SAME FILE\"", "< len(lines) and lines[j] != \"=================================================\\n\": j += 1 if j > len(lines):", "MANUAL_CHECKING: possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i += 1 while i < j", "MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of", "print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github search", "# Judge if there is any other exception triggered if scan_block(lines, i, j,", "det_no_para += 1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif user == '2': det_para +=", "exit(1) # Check if needs to prompt users on codes between start and", "not in lines[i]: print(lines[i]) i += 1 if i != j: print(lines[i]) print(\"Please", "print_writeofd # First argument is whether or not to proceed with manual checking:", "print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect the above. Enter 1 if this is a no", "print(\"The first argument must be either -m or -a, see README.md for details\")", "check: if scan_block(lines, i, j, \"operation\") and scan_block(lines, i, j, \"missing\") and (not", "NO USE OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} PARALELLISM USED\".format(possible_para +", "= j continue # Judge if this is a no pattern identified case", "j continue else: nopattern += 1 ofd.write(\"no_pattern: {}\".format(lines[k])) i = j continue #", "+= 1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines) print(\"Please inspect the above. Enter", "print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i += 1 while i < j and \"========================\" not in", "start statement and while statement\") == scan_block_numbers(lines, i, j, \"Pattern identified\"): between_code +=", "False else: print(\"The first argument must be either -m or -a, see README.md", "NO USE OF PARALELLISM\".format(noparrelism + det_no_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} PARALELLISM", "exception cases - processing error proces_exception = 0 # No retrieve result files", "in between start statement and while statement\" in lines[i]: i_copy = i +", "i_copy += 1 return True i += 1 return False lines = ifd.readlines()", "-m or -a, see README.md for details\") exit(1) # Second argument is the", "in lines[i_copy]: if lines[i_copy].isspace(): i_copy += 1 continue if check_safe_list(lines[i_copy]): i_copy += 1", "i = 0 while i < len(lines): begin = get_all_add_up() allfile += 1", "1 while i < j and \"========================\" not in lines[i]: print(lines[i]) i +=", "cases\".format(det_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} NO USE OF PARALELLISM\".format(noparrelism + det_no_para),", "if user == '1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif user == '2': det_para", "3, NOT ANYTHING ELSE!\") user = input() if user == '1': det_no_para +=", "0 # Number of exception cases - processing error proces_exception = 0 #", "argument is the output file for a list of all repos ofd =", "lines[i_copy]: return True return False i_copy += 1 return True i += 1", "user == '2': use_lambda += 1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j", "no parallelism noparrelism = 0 # Determined cases of no pattern: nopattern =", "= open(sys.argv[3], 'w') # All files number allfile = 0 # All occurences", "(parallelism): {}\".format(lines[k].strip('\\n')), ofd) i = j continue # If not manual checking, then", "and lines[j] != \"=================================================\\n\": j += 1 if j > len(lines): break #", "lines[i_copy] or \"operation.result\" in lines[i_copy]: return True return False i_copy += 1 return", "case if \"NO PATTERN IDENTIFIED\" in lines[j - 1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern:", "Second argument is the output file from async_main_google.py ifd = open(sys.argv[2], 'r') #", "+= 1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: use_lambda +=", "+ det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism + det_no_para), ofd) print_writeofd(\"{}, Use", "ifd.readlines() i = 0 while i < len(lines): begin = get_all_add_up() allfile +=", "k = i + 1 # Judge if there is any github exception", "total count of projects if \"No use of async\" in lines[j - 1]:", "print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github search exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing", "1 continue if check_safe_list(lines[i_copy]): i_copy += 1 continue if \"operation.done\" in lines[i_copy] or", "ofd) print_writeofd(\"{}, Possible use of parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {}", "of parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible use of parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON AUTO", "If these two numbers equal then need to prompt users: if scan_block_numbers(lines, i,", "0 # Possible parallelism possible_para = 0 # Determined no parallelism det_no_para =", "0 # Determined cases of no parallelism noparrelism = 0 # Determined cases", "nopattern += 1 ofd.write(\"no_pattern: {}\".format(lines[k])) i = j continue # Judge if this", "= j continue else: use_lambda += 1 ofd.write(\"use_lambda: {}\".format(lines[k])) i = j continue", "be counted towards the total count of projects if \"No use of async\"", "is a no retrieve result case # Such project should not be counted", "det_no_pattern + use_lambda), ofd) elif MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL INSPECTION:\",", "Use of parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} NO USE OF", "j > len(lines): break # Now i and j stores the start and", "only relying on auto-tool: this should be a parallelism-used case if \"Use Lambda", "'1' and user != '2': print(\"PRESS 1 OR 2, NOT ANYTHING ELSE!\") user", "+ det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible use of", "start statement and while statement\" in lines[i]: i_copy = i while \"------\" not", "a no use of parallelism case if \"No use of parallelism\" in lines[j", "any github exception triggered if scan_block(lines, i, j, \"Other Github Exceptions occurred\"): github_exception", "det_no_pattern def scan_block(lines, i, j, keyword): while i < j: if keyword in", "parallelism case, and enter 2 if this is a use-parallelism case\") user =", "= j continue # These are for cases where the repo is actually", "2 if this is a use-parallelism case\") user = input() while user !=", "use of parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible use of parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON", "user == '1': det_no_para += 1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j", "if there is any github exception triggered if scan_block(lines, i, j, \"Other Github", "stores the start and end of one search snippet k = i +", "async_main_google.py ifd = open(sys.argv[2], 'r') # Third argument is the output file for", "clause and while clause between_code = 0 # Determined to be no pattern", "< j: if keyword in lines[i]: ret += 1 i += 1 return", "(parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue # These are for cases where", "while \"------\" not in lines[i_copy]: if lines[i_copy].isspace(): i_copy += 1 continue if check_safe_list(lines[i_copy]):", "1 ofd.write(\"no_async: {}\".format(lines[k])) i = j continue # Judge if this is a", "user == '2': det_para += 1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd) i = j", "input() while user != '1' and user != '2': print(\"PRESS 1 OR 2,", "safes in string: return True return False def judge_code(i, j, lines): while i", "1 ofd.write(\"use_lambda: {}\".format(lines[k])) i = j continue # Judge if this is a", "Github Exceptions occurred\"): github_exception += 1 ofd.write(\"github_exception: {}\".format(lines[k])) i = j continue #", "is a no parallelism case, and enter 2 if this is a use-parallelism", "lines[j] != \"=================================================\\n\": j += 1 if j > len(lines): break # Now", "in lines[i_copy]: print(lines[i_copy]) i_copy += 1 break i += 1 safe_list = [\"if", "= response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for safes in safe_list: if safes in string: return", "print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines) print(\"Please inspect the above. Enter 1 if can proceed,", "lambda case\") user = input() while user != '1' and user != '2':", "j < len(lines) and lines[j] != \"=================================================\\n\": j += 1 if j >", "continue else: nopattern += 1 ofd.write(\"no_pattern: {}\".format(lines[k])) i = j continue # Judge", "= 0 # Use Lambda function use_lambda = 0 # Possible parallelism possible_para", "return ret def print_code(i, j, lines): while i < j: if \"Nodes in", "AUTO TOOL: {} NO USE OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {}", "parallelism noparrelism = 0 # Determined cases of no pattern: nopattern = 0", "on codes between start and while statement: if scan_block(lines, i, j, \"Nodes in", "the output file from async_main_google.py ifd = open(sys.argv[2], 'r') # Third argument is", "{}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user == '2': use_lambda += 1", "+= 1 safe_list = [\"if response:\", \"job_id = response['JobId']\", \"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\",", "for a list of all repos ofd = open(sys.argv[3], 'w') # All files", "of exception cases - processing error proces_exception = 0 # No retrieve result", "output file from async_main_google.py ifd = open(sys.argv[2], 'r') # Third argument is the", "IN THE SAME FILE\" in lines[i_copy]: # Only do the following if doing", "TOOL: {} NO USE OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} PARALELLISM", "not in lines[i_copy]: print(lines[i_copy]) i_copy += 1 break i += 1 safe_list =", "len(lines) and lines[j] != \"=================================================\\n\": j += 1 if j > len(lines): break", "possible_para + det_no_pattern def scan_block(lines, i, j, keyword): while i < j: if", "\"operation\") and scan_block(lines, i, j, \"missing\") and (not scan_block(lines, i, j, \"Pattern identified\")):", "= 0 # Determined parallelism det_para = 0 # There exists code in", "first argument must be either -m or -a, see README.md for details\") exit(1)", "README.md for details\") exit(1) # Second argument is the output file from async_main_google.py", "triggered if scan_block(lines, i, j, \"EXCEPTION OCCURS\"): proces_exception += 1 ofd.write(\"process_exception: {}\".format(lines[k])) i", "ANYTHING ELSE!\") user = input() if user == '1': det_no_para += 1 print_writeofd(\"use_lambda", "users: if scan_block_numbers(lines, i, j, \"Nodes in between start statement and while statement\")", "(use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: use_lambda += 1 ofd.write(\"use_lambda: {}\".format(lines[k]))", "parallelism possible_para = 0 # Determined no parallelism det_no_para = 0 # Determined", "else: i += 1 while i < j and \"========================\" not in lines[i]:", "{}\".format(lines[k])) print(\"Please inspect the above. Enter 1 if this is a no parallelism", "argument must be either -m or -a, see README.md for details\") exit(1) #", "and while statement: if scan_block(lines, i, j, \"Nodes in between start statement and", "end of one search snippet k = i + 1 # Judge if", "= input() if user == '1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif user ==", "j: if \"***\" in lines[i]: i_copy = i while i_copy < j: if", "lines[j - 1]: no_async += 1 ofd.write(\"no_async: {}\".format(lines[k])) i = j continue #", "continue if check_safe_list(lines[i_copy]): i_copy += 1 continue if \"operation.done\" in lines[i_copy] or \"operation.result\"", "== '2': det_para += 1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break else: i +=", "'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for safes in safe_list: if safes in", "the repo is actually mis-using the API elif user == '3': proces_exception +=", "OR 2 OR 3, NOT ANYTHING ELSE!\") user = input() if user ==", "if \"Nodes in between start statement and while statement\" in lines[i]: i_copy =", "No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism + det_no_para),", "retrieve result files no_retrieve = 0 # Use Lambda function use_lambda = 0", "MANUAL_CHECKING: print_writeofd(\"{}, Total files searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use", "all repos ofd = open(sys.argv[3], 'w') # All files number allfile = 0", "no pattern identified case # If only relying on auto-tool: this should be", "det_no_para += 1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user", "+ 1 while \"------\" not in lines[i_copy]: if lines[i_copy].isspace(): i_copy += 1 continue", "while user != '1' and user != '2': print(\"PRESS 1 OR 2, NOT", "= 0 while i < len(lines): begin = get_all_add_up() allfile += 1 j", "{}\".format(lines[k].strip('\\n')), ofd) i = j continue # If not manual checking, then just", "All files number allfile = 0 # All occurences of found 0 files", "between_code += 1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines) print(\"Please inspect the above.", "open(sys.argv[3], 'w') # All files number allfile = 0 # All occurences of", "use_lambda += 1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: use_lambda", "user == '1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif user == '2': det_para +=", "ofd) break else: i += 1 while i < j and \"========================\" not", "no parallelism case, and enter 2 if this is a use lambda case\")", "{} NO USE OF PARALELLISM\".format(noparrelism + det_no_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {}", "of projects if \"No retrieve result\" in lines[j - 1]: no_retrieve += 1", "if scan_block(lines, i, j, \"operation\") and scan_block(lines, i, j, \"missing\") and (not scan_block(lines,", "possible_para + nopattern + det_no_pattern + use_lambda), ofd) elif MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\",", "or -a, see README.md for details\") exit(1) # Second argument is the output", "scan_block(lines, i, j, \"Other Github Exceptions occurred\"): github_exception += 1 ofd.write(\"github_exception: {}\".format(lines[k])) i", "elif user == '2': use_lambda += 1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i =", "+= 1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif user == '2': det_para += 1", "[\"if response:\", \"job_id = response['JobId']\", \"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId =", "start and while statement: if scan_block(lines, i, j, \"Nodes in between start statement", "ANYTHING ELSE!\") user = input() if user == '1': det_no_para += 1 print_writeofd(\"possible_parallelism", "a use lambda case\") user = input() while user != '1' and user", "project should not be counted towards the total count of projects if \"No", "j, \"EXCEPTION OCCURS\"): proces_exception += 1 ofd.write(\"process_exception: {}\".format(lines[k])) i = j continue #", "print_writeofd(\"{}, Use of parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} NO USE", "count\") user = input() while user != '1' and user != '2' and", "j and \"========================\" not in lines[i]: print(lines[i]) i += 1 if i !=", "Judge if this is a no use of async case # Such project", "1 return False def scan_block_numbers(lines, i, j, keyword): ret = 0 while i", "user == '1': det_no_para += 1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j", "while i < len(lines): begin = get_all_add_up() allfile += 1 j = i", "TOOL: {} PARALELLISM USED\".format(possible_para + nopattern + det_no_pattern + use_lambda), ofd) print_writeofd(\"RELYING ON", "= [\"if response:\", \"job_id = response['JobId']\", \"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId", "count of projects if \"No use of async\" in lines[j - 1]: no_async", "manual checking: if sys.argv[1] == '-m': MANUAL_CHECKING = True elif sys.argv[1] == '-a':", "lines): while i < j: if \"Nodes in between start statement and while", "= input() while user != '1' and user != '2' and user !=", "parallelism-used case if \"NO PATTERN IDENTIFIED\" in lines[j - 1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\")", "= 0 # Possible parallelism possible_para = 0 # Determined no parallelism det_no_para", "MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines) print(\"Please inspect the above. Enter 1 if can", "i = j continue while i < j: if \"***\" in lines[i]: i_copy", "i < len(lines): begin = get_all_add_up() allfile += 1 j = i +", "# Check if needs to prompt users on codes between start and while", "len(lines): begin = get_all_add_up() allfile += 1 j = i + 1 while", "= j continue elif user == '2': use_lambda += 1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")),", "'w') # All files number allfile = 0 # All occurences of found", "proces_exception + no_retrieve + use_lambda + possible_para + det_no_pattern def scan_block(lines, i, j,", "ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} NO USE OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON", "else: nopattern += 1 ofd.write(\"no_pattern: {}\".format(lines[k])) i = j continue # Judge if", "if this is a no pattern identified case # If only relying on", "= ifd.readlines() i = 0 while i < len(lines): begin = get_all_add_up() allfile", "should not be counted towards the total count of projects if \"No use", "break i += 1 safe_list = [\"if response:\", \"job_id = response['JobId']\", \"synthesis_task =", "files no_retrieve = 0 # Use Lambda function use_lambda = 0 # Possible", "continue # Judge if this is a use lambda function case # If", "parallelism case, and enter 2 if this is a use-parallelism case, and enter", "Determined cases of no pattern: nopattern = 0 # Number of exception cases", "a no pattern identified case # If only relying on auto-tool: this should", "re from utils.utils import print_writeofd # First argument is whether or not to", "statement\") == scan_block_numbers(lines, i, j, \"Pattern identified\"): between_code += 1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\")", "CHECKING: {} PARALELLISM USED\".format(det_para + use_lambda), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} RELEVANT", "i < j: if \"***\" in lines[i]: i_copy = i while i_copy <", "USED\".format(possible_para + nopattern + det_no_pattern + use_lambda), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {}", "\"Pattern identified\")): print(\"Operation missing while it's neither use lambda nor no pattern identified:", "scan_block(lines, i, j, \"Pattern identified\")): print(\"Operation missing while it's neither use lambda nor", "1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i += 1 while i < j and \"========================\" not", "lines[i_copy].isspace(): i_copy += 1 continue if check_safe_list(lines[i_copy]): i_copy += 1 continue if \"operation.done\"", "no pattern: nopattern = 0 # Number of exception cases - repo no", "in lines[i_copy]: # Only do the following if doing manual checking if MANUAL_CHECKING:", "'3': proces_exception += 1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else:", "inspect the above. Enter 1 if can proceed, and enter 2 if this", "case, and enter 2 if this is a use lambda case\") user =", "break i_copy += 1 if i_copy == j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism += 1", "= i + 1 # Judge if there is any github exception triggered", "following if doing manual checking if MANUAL_CHECKING: possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i", "on auto-tool: this should be a parallelism-used case if \"NO PATTERN IDENTIFIED\" in", "No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism), ofd) print_writeofd(\"{},", "= i + 1 while j < len(lines) and lines[j] != \"=================================================\\n\": j", "continue # At this point there shouldn't be any \"operating missing\", sanity check:", "i while i_copy < j: if \"BOTH IDENTIFIED IN THE SAME FILE\" in", "Determined cases of no parallelism noparrelism = 0 # Determined cases of no", "j ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING: print_writeofd(\"{}, Total files searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\",", "a use_parallelism case\") user = input() while user != '1' and user !=", "the API elif user == '3': proces_exception += 1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd)", "# All files number allfile = 0 # All occurences of found 0", "ofd) i = j continue else: use_lambda += 1 ofd.write(\"use_lambda: {}\".format(lines[k])) i =", "lines[i]: i_copy = i while i_copy < j: if \"BOTH IDENTIFIED IN THE", "cases of no pattern: nopattern = 0 # Number of exception cases -", "if scan_block_numbers(lines, i, j, \"Nodes in between start statement and while statement\") ==", "If not manual checking, then just count this as a no parallelism use", "- 1]: noparrelism += 1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i = j continue while i", "0 # All occurences of found 0 files no_async = 0 # Determined", "lines[i]: i_copy = i while \"------\" not in lines[i_copy]: print(lines[i_copy]) i_copy += 1", "if \"BOTH IDENTIFIED IN THE SAME FILE\" in lines[i_copy]: # Only do the", "is a no use of parallelism case if \"No use of parallelism\" in", "while i < j: if \"Nodes in between start statement and while statement\"", "2 OR 3, NOT ANYTHING ELSE!\") user = input() if user == '1':", "1 i = j ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING: print_writeofd(\"{}, Total files searched\".format(allfile), ofd)", "if scan_block(lines, i, j, \"EXCEPTION OCCURS\"): proces_exception += 1 ofd.write(\"process_exception: {}\".format(lines[k])) i =", "1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect the above. Enter 1", "and while statement\") == scan_block_numbers(lines, i, j, \"Pattern identified\"): between_code += 1 if", "no use of async case # Such project should not be counted towards", "if this is a no use of async case # Such project should", "j, lines): while i < j: if \"Nodes in between start statement and", "1 # Judge if there is any github exception triggered if scan_block(lines, i,", "== '3': proces_exception += 1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue", "print_writeofd(\"\", ofd) print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async),", "i, j, keyword): ret = 0 while i < j: if keyword in", "use_lambda + possible_para + det_no_pattern def scan_block(lines, i, j, keyword): while i <", "MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github search exceptions\".format(github_exception),", "snippet k = i + 1 # Judge if there is any github", "1 return ret def print_code(i, j, lines): while i < j: if \"Nodes", "in lines[i]: i_copy = i while \"------\" not in lines[i_copy]: print(lines[i_copy]) i_copy +=", "noparrelism += 1 break i += 1 i = j ofd.write(\"\\n\\n==================================================================\\n\") if not", "and user != '3': print(\"PRESS 1 OR 2 OR 3, NOT ANYTHING ELSE!\")", "for details\") exit(1) # Second argument is the output file from async_main_google.py ifd", "enter 2 if this is a use-parallelism case\") user = input() while user", "Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{}, No retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern +", "2 if this is a use lambda case\") user = input() while user", "0 # Determined to be no pattern det_no_pattern = 0 def get_all_add_up(): return", "i, j, \"missing\") and (not scan_block(lines, i, j, \"Pattern identified\")): print(\"Operation missing while", "== '1': det_no_para += 1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue", "if this is a use-parallelism case\") user = input() while user != '1'", "+= 1 continue if \"operation.done\" in lines[i_copy] or \"operation.result\" in lines[i_copy]: return True", "# Number of exception cases - repo no longer exist github_exception = 0", "be counted towards the total count of projects if \"No retrieve result\" in", "return True return False i_copy += 1 return True i += 1 return", "if not MANUAL_CHECKING: print_writeofd(\"{}, Total files searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd) print_writeofd(\"{},", "code in between start clause and while clause between_code = 0 # Determined", "in between start statement and while statement\" in lines[i]: i_copy = i while", "{}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user == '2': det_para += 1", "# Judge if this is a use lambda function case # If only", "relying on auto-tool: this should be a parallelism-used case if \"NO PATTERN IDENTIFIED\"", "= {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for safes in", "== '1': det_no_para += 1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue", "a parallelism-used case if \"Use Lambda Function\" in lines[j - 1]: if MANUAL_CHECKING:", "no parallelism case, and enter 2 if this is a use-parallelism case, and", "elif sys.argv[1] == '-a': MANUAL_CHECKING = False else: print(\"The first argument must be", "== j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism += 1 break i += 1 i =", "nopattern + det_no_pattern + use_lambda), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} RELEVANT TOTAL", "while statement\"): # If these two numbers equal then need to prompt users:", "user = input() if user == '1': det_no_para += 1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")),", "If only relying on auto-tool: this should be a parallelism-used case if \"Use", "pattern identified: {}\".format(lines[k])) exit(1) # Check if needs to prompt users on codes", "and \"========================\" not in lines[i]: print(lines[i]) i += 1 if i != j:", "the total count of projects if \"No retrieve result\" in lines[j - 1]:", "parallelism case, and enter 2 if this is a use lambda case\") user", "j, \"operation\") and scan_block(lines, i, j, \"missing\") and (not scan_block(lines, i, j, \"Pattern", "# If these two numbers equal then need to prompt users: if scan_block_numbers(lines,", "use case else: if not judge_code(i, j, lines): det_no_pattern += 1 i =", "1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user == '2':", "if \"Use Lambda Function\" in lines[j - 1]: if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please", "ANYTHING ELSE!\") user = input() if user == '1': det_no_para += 1 print_writeofd(\"no_pattern", "input() if user == '1': det_no_para += 1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i", "return no_async + noparrelism + nopattern + github_exception + proces_exception + no_retrieve +", "from utils.utils import print_writeofd # First argument is whether or not to proceed", "2, NOT ANYTHING ELSE!\") user = input() if user == '1': det_no_para +=", "det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism + det_no_para), ofd) print_writeofd(\"{}, Use of", "ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism", "no retrieve result case # Such project should not be counted towards the", "in lines[j - 1]: no_async += 1 ofd.write(\"no_async: {}\".format(lines[k])) i = j continue", "elif user == '2': det_para += 1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break else:", "of projects if \"No use of async\" in lines[j - 1]: no_async +=", "exist github_exception = 0 # Number of exception cases - processing error proces_exception", "not in lines[i_copy]: if lines[i_copy].isspace(): i_copy += 1 continue if check_safe_list(lines[i_copy]): i_copy +=", "1 break i += 1 safe_list = [\"if response:\", \"job_id = response['JobId']\", \"synthesis_task", "ON MANUAL CHECKING: {} NO USE OF PARALELLISM\".format(noparrelism + det_no_para), ofd) print_writeofd(\"RELYING ON", "{}\".format(lines[k].strip('\\n')), ofd) elif user == '2': det_para += 1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd)", "pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible", "scan_block(lines, i, j, keyword): while i < j: if keyword in lines[i]: return", "is a no pattern identified case # If only relying on auto-tool: this", "OR 3, NOT ANYTHING ELSE!\") user = input() if user == '1': det_no_para", "j continue # At this point there shouldn't be any \"operating missing\", sanity", "continue # Judge if there is any other exception triggered if scan_block(lines, i,", "i < j and \"========================\" not in lines[i]: i += 1 ofd.write(\"possible_parallelism: {}\".format(lines[k]))", "of found 0 files no_async = 0 # Determined cases of no parallelism", "det_no_pattern + use_lambda), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS\".format(noparrelism +", "print_writeofd(\"{}, Possible use of parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} NO", "< j: if \"***\" in lines[i]: i_copy = i while i_copy < j:", "'1': det_no_para += 1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif", "if lines[i_copy].isspace(): i_copy += 1 continue if check_safe_list(lines[i_copy]): i_copy += 1 continue if", "no parallelism case, and enter 2 if this is a use-parallelism case\") user", "should be a parallelism-used case if \"Use Lambda Function\" in lines[j - 1]:", "if check_safe_list(lines[i_copy]): i_copy += 1 continue if \"operation.done\" in lines[i_copy] or \"operation.result\" in", "ofd) i = j continue # If not manual checking, then just count", "keyword): while i < j: if keyword in lines[i]: return True i +=", "ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism),", "check_safe_list(lines[i_copy]): i_copy += 1 continue if \"operation.done\" in lines[i_copy] or \"operation.result\" in lines[i_copy]:", "lines[i]: i_copy = i + 1 while \"------\" not in lines[i_copy]: if lines[i_copy].isspace():", "i = j continue # Judge if this is a no use of", "if this is a use_parallelism case\") user = input() while user != '1'", "print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism +", "Use of Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{}, No retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{}, No pattern", "< j and \"========================\" not in lines[i]: i += 1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para", "while i < j: if \"***\" in lines[i]: i_copy = i while i_copy", "Lambda Function\" in lines[j - 1]: if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect the", "\"operating missing\", sanity check: if scan_block(lines, i, j, \"operation\") and scan_block(lines, i, j,", "allfile += 1 j = i + 1 while j < len(lines) and", "j continue elif user == '2': use_lambda += 1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd)", "no pattern identified: {}\".format(lines[k])) exit(1) # Check if needs to prompt users on", "Such project should not be counted towards the total count of projects if", "user = input() if user == '1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif user", "1 OR 2, NOT ANYTHING ELSE!\") user = input() if user == '1':", "'1': det_no_para += 1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif", "if sys.argv[1] == '-m': MANUAL_CHECKING = True elif sys.argv[1] == '-a': MANUAL_CHECKING =", "triggered if scan_block(lines, i, j, \"Other Github Exceptions occurred\"): github_exception += 1 ofd.write(\"github_exception:", "just count this as a no parallelism use case else: if not judge_code(i,", "== '2': use_lambda += 1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue", "Enter 1 if this is a no parallelism case, and enter 2 if", "False lines = ifd.readlines() i = 0 while i < len(lines): begin =", "then just count this as a no parallelism use case else: if not", "of Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{}, No retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern", "+ use_lambda), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + det_no_para", "print_writeofd(\"{}, Total files searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of", "= 0 # There exists code in between start clause and while clause", "if this is a use lambda function case # If only relying on", "ret = 0 while i < j: if keyword in lines[i]: ret +=", "+ det_no_pattern def scan_block(lines, i, j, keyword): while i < j: if keyword", "j, \"Pattern identified\"): between_code += 1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines) print(\"Please", "from async_main_google.py ifd = open(sys.argv[2], 'r') # Third argument is the output file", "in between start statement and while statement\") == scan_block_numbers(lines, i, j, \"Pattern identified\"):", "is any other exception triggered if scan_block(lines, i, j, \"EXCEPTION OCCURS\"): proces_exception +=", "1]: no_async += 1 ofd.write(\"no_async: {}\".format(lines[k])) i = j continue # Judge if", "j, \"missing\") and (not scan_block(lines, i, j, \"Pattern identified\")): print(\"Operation missing while it's", "# Judge if this is a no use of parallelism case if \"No", "'2': det_para += 1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break else: i += 1", "is actually mis-using the API elif user == '3': proces_exception += 1 print_writeofd(\"no_pattern", "else: print(\"The first argument must be either -m or -a, see README.md for", "be either -m or -a, see README.md for details\") exit(1) # Second argument", "True return False i_copy += 1 return True i += 1 return False", "if needs to prompt users on codes between start and while statement: if", "this as a no parallelism use case else: if not judge_code(i, j, lines):", "+= 1 i = j ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING: print_writeofd(\"{}, Total files searched\".format(allfile),", "# Judge if this is a no retrieve result case # Such project", "1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines) print(\"Please inspect the above. Enter 1", "PARALELLISM\".format(noparrelism + det_no_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} PARALELLISM USED\".format(det_para + use_lambda),", "+= 1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user ==", "PARALELLISM USED\".format(det_para + use_lambda), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS\".format(noparrelism", "ofd.write(\"no_async: {}\".format(lines[k])) i = j continue # Judge if this is a no", "return False def scan_block_numbers(lines, i, j, keyword): ret = 0 while i <", "1 j = i + 1 while j < len(lines) and lines[j] !=", "statement\" in lines[i]: i_copy = i + 1 while \"------\" not in lines[i_copy]:", "exception cases - repo no longer exist github_exception = 0 # Number of", "MANUAL CHECKING: {} PARALELLISM USED\".format(det_para + use_lambda), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {}", "of exception cases - repo no longer exist github_exception = 0 # Number", "search exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use of Lambda Function\".format(use_lambda), ofd)", "in lines[i]: return True i += 1 return False def scan_block_numbers(lines, i, j,", "1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i = j continue # At this point there shouldn't", "in lines[j - 1]: if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect the above. Enter", "if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect the above. Enter 1 if", "# Now i and j stores the start and end of one search", "j, \"Pattern identified\")): print(\"Operation missing while it's neither use lambda nor no pattern", "to prompt users on codes between start and while statement: if scan_block(lines, i,", "i = j continue # Judge if this is a use lambda function", "if there is any other exception triggered if scan_block(lines, i, j, \"EXCEPTION OCCURS\"):", "it's neither use lambda nor no pattern identified: {}\".format(lines[k])) exit(1) # Check if", "> len(lines): break # Now i and j stores the start and end", "= input() if user == '1': det_no_para += 1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd)", "use of async\" in lines[j - 1]: no_async += 1 ofd.write(\"no_async: {}\".format(lines[k])) i", "enter 2 if this is a use lambda case\") user = input() while", "parallelism det_para = 0 # There exists code in between start clause and", "i += 1 if i != j: print(lines[i]) print(\"Please inspect the above. Enter", "keyword in lines[i]: ret += 1 i += 1 return ret def print_code(i,", "in lines[i_copy] or \"operation.result\" in lines[i_copy]: return True return False i_copy += 1", "0 while i < j: if keyword in lines[i]: ret += 1 i", "ofd) elif user == '2': det_para += 1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd) i", "= 0 while i < j: if keyword in lines[i]: ret += 1", "response['JobId']\", \"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"] def check_safe_list(string): for", "USE OF PARALELLISM\".format(noparrelism + det_no_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} PARALELLISM USED\".format(det_para", "j continue # These are for cases where the repo is actually mis-using", "\"Use Lambda Function\" in lines[j - 1]: if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect", "-a, see README.md for details\") exit(1) # Second argument is the output file", "= 0 def get_all_add_up(): return no_async + noparrelism + nopattern + github_exception +", "case if \"Use Lambda Function\" in lines[j - 1]: if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k]))", "is a no use of async case # Such project should not be", "files number allfile = 0 # All occurences of found 0 files no_async", "# There exists code in between start clause and while clause between_code =", "github_exception = 0 # Number of exception cases - processing error proces_exception =", "print_writeofd(\"{}, No use of parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible use of parallel cases\".format(possible_para), ofd)", "j continue # Judge if there is any other exception triggered if scan_block(lines,", "= j continue # If not manual checking, then just count this as", "this should be a parallelism-used case if \"Use Lambda Function\" in lines[j -", "TOTAL PROJECTS\".format(noparrelism + possible_para + nopattern + det_no_pattern + use_lambda), ofd) elif MANUAL_CHECKING:", "0 files no_async = 0 # Determined cases of no parallelism noparrelism =", "i, j, \"Other Github Exceptions occurred\"): github_exception += 1 ofd.write(\"github_exception: {}\".format(lines[k])) i =", "- processing error proces_exception = 0 # No retrieve result files no_retrieve =", "print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect the above. Enter 1 if this is a", "while it's neither use lambda nor no pattern identified: {}\".format(lines[k])) exit(1) # Check", "\"No use of parallelism\" in lines[j - 1]: noparrelism += 1 ofd.write(\"no_parallelism: {}\".format(lines[k]))", "safe_list = [\"if response:\", \"job_id = response['JobId']\", \"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\",", "+= 1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue # These are", "{}\".format(lines[k].strip(\"\\n\")), ofd) break else: i += 1 while i < j and \"========================\"", "print_writeofd(\"RELYING ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + possible_para + nopattern +", "Judge if this is a no pattern identified case # If only relying", "i = j continue # At this point there shouldn't be any \"operating", "see README.md for details\") exit(1) # Second argument is the output file from", "ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use of Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{}, No", "and enter 2 if this is a use lambda case\") user = input()", "(proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif user == '2': det_para += 1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')),", "start statement and while statement\" in lines[i]: i_copy = i + 1 while", "no use of parallelism case if \"No use of parallelism\" in lines[j -", "proces_exception = 0 # No retrieve result files no_retrieve = 0 # Use", "ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} NO USE OF PARALELLISM\".format(noparrelism + det_no_para), ofd)", "0 # There exists code in between start clause and while clause between_code", "# Judge if this is a no pattern identified case # If only", "and enter 2 if this is a use-parallelism case, and enter 3 if", "user = input() while user != '1' and user != '2': print(\"PRESS 1", "= response['JobId']\", \"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"] def check_safe_list(string):", "j continue # Judge if this is a no retrieve result case #", "print(lines[i]) i += 1 if i != j: print(lines[i]) print(\"Please inspect the above.", "'2' and user != '3': print(\"PRESS 1 OR 2 OR 3, NOT ANYTHING", "0 # Determined no parallelism det_no_para = 0 # Determined parallelism det_para =", "0 # Determined parallelism det_para = 0 # There exists code in between", "Number of exception cases - processing error proces_exception = 0 # No retrieve", "det_no_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} PARALELLISM USED\".format(det_para + use_lambda), ofd) print_writeofd(\"RELYING", "= 0 # Determined no parallelism det_no_para = 0 # Determined parallelism det_para", "and (not scan_block(lines, i, j, \"Pattern identified\")): print(\"Operation missing while it's neither use", "use of parallelism case if \"No use of parallelism\" in lines[j - 1]:", "continue # Judge if this is a no use of async case #", "= j continue elif user == '2': det_para += 1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")),", "print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue # These are for cases", "ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} PARALELLISM USED\".format(possible_para + nopattern + det_no_pattern +", "parallelism det_no_para = 0 # Determined parallelism det_para = 0 # There exists", "j continue # If not manual checking, then just count this as a", "only relying on auto-tool: this should be a parallelism-used case if \"NO PATTERN", "0 # Number of exception cases - repo no longer exist github_exception =", "i += 1 while i < j and \"========================\" not in lines[i]: print(lines[i])", "ofd) print_writeofd(\"{}, Github search exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use of", "det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible use of parallel", "Check if needs to prompt users on codes between start and while statement:", "return False def judge_code(i, j, lines): while i < j: if \"Nodes in", "import sys import re from utils.utils import print_writeofd # First argument is whether", "if \"No use of parallelism\" in lines[j - 1]: noparrelism += 1 ofd.write(\"no_parallelism:", "files searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd)", "i != j: print(lines[i]) print(\"Please inspect the above. Enter 1 if this is", "+ det_no_para), ofd) print_writeofd(\"{}, Use of parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING:", "ofd) print_writeofd(\"{}, Use of parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} NO", "i = j continue else: nopattern += 1 ofd.write(\"no_pattern: {}\".format(lines[k])) i = j", "use of parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} NO USE OF", "# Determined parallelism det_para = 0 # There exists code in between start", "Possible parallelism possible_para = 0 # Determined no parallelism det_no_para = 0 #", "any other exception triggered if scan_block(lines, i, j, \"EXCEPTION OCCURS\"): proces_exception += 1", "+= 1 ofd.write(\"process_exception: {}\".format(lines[k])) i = j continue # Judge if this is", "cases of no parallelism noparrelism = 0 # Determined cases of no pattern:", "found 0 files no_async = 0 # Determined cases of no parallelism noparrelism", "PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} PARALELLISM USED\".format(possible_para + nopattern + det_no_pattern", "with manual checking: if sys.argv[1] == '-m': MANUAL_CHECKING = True elif sys.argv[1] ==", "== '1': det_no_para += 1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif user == '2':", "# Number of exception cases - processing error proces_exception = 0 # No", "proceed with manual checking: if sys.argv[1] == '-m': MANUAL_CHECKING = True elif sys.argv[1]", "between start clause and while clause between_code = 0 # Determined to be", "\"job_id = response['JobId']\", \"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"] def", "use_lambda), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + det_no_para +", "\"operation.done\" in lines[i_copy] or \"operation.result\" in lines[i_copy]: return True return False i_copy +=", "== '1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif user == '2': det_para += 1", "(process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: nopattern += 1 ofd.write(\"no_pattern: {}\".format(lines[k]))", "+= 1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user ==", "error proces_exception = 0 # No retrieve result files no_retrieve = 0 #", "Judge if this is a use lambda function case # If only relying", "def get_all_add_up(): return no_async + noparrelism + nopattern + github_exception + proces_exception +", "Only do the following if doing manual checking if MANUAL_CHECKING: possible_para += 1", "= 0 # Number of exception cases - repo no longer exist github_exception", "to proceed with manual checking: if sys.argv[1] == '-m': MANUAL_CHECKING = True elif", "count this as a no parallelism use case else: if not judge_code(i, j,", "j = i + 1 while j < len(lines) and lines[j] != \"=================================================\\n\":", "= input() if user == '1': det_no_para += 1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd)", "\"Other Github Exceptions occurred\"): github_exception += 1 ofd.write(\"github_exception: {}\".format(lines[k])) i = j continue", "longer exist github_exception = 0 # Number of exception cases - processing error", "j, lines): det_no_pattern += 1 i = j continue # Judge if this", "nor no pattern identified: {}\".format(lines[k])) exit(1) # Check if needs to prompt users", "ofd) i = j continue elif user == '2': use_lambda += 1 print_writeofd(\"use_lambda", "elif user == '2': det_para += 1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd) i =", "shouldn't be any \"operating missing\", sanity check: if scan_block(lines, i, j, \"operation\") and", "i = j continue else: use_lambda += 1 ofd.write(\"use_lambda: {}\".format(lines[k])) i = j", "ELSE!\") user = input() if user == '1': det_no_para += 1 print_writeofd(\"no_pattern (no_parallelism):", "PATTERN IDENTIFIED\" in lines[j - 1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please", "is a use_parallelism case\") user = input() while user != '1' and user", "1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i = j continue while i < j: if \"***\"", "of one search snippet k = i + 1 # Judge if there", "prompt users: if scan_block_numbers(lines, i, j, \"Nodes in between start statement and while", "counted towards the total count of projects if \"No use of async\" in", "+ proces_exception + no_retrieve + use_lambda + possible_para + det_no_pattern def scan_block(lines, i,", "while i < j and \"========================\" not in lines[i]: print(lines[i]) i += 1", "input() while user != '1' and user != '2' and user != '3':", "parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} NO USE OF PARALELLISM\".format(noparrelism), ofd)", "of parallelism case if \"No use of parallelism\" in lines[j - 1]: noparrelism", "det_no_para += 1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user", "det_para = 0 # There exists code in between start clause and while", "pattern identified case # If only relying on auto-tool: this should be a", "0 # No retrieve result files no_retrieve = 0 # Use Lambda function", "Total files searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async),", "in lines[i]: i += 1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para += 1 break i_copy +=", "# At this point there shouldn't be any \"operating missing\", sanity check: if", "Enter 1 if can proceed, and enter 2 if this is a use_parallelism", "file for a list of all repos ofd = open(sys.argv[3], 'w') # All", "MANUAL CHECKING: {} NO USE OF PARALELLISM\".format(noparrelism + det_no_para), ofd) print_writeofd(\"RELYING ON MANUAL", "use_lambda), ofd) elif MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL INSPECTION:\", ofd) print_writeofd(\"{},", "'3': print(\"PRESS 1 OR 2 OR 3, NOT ANYTHING ELSE!\") user = input()", "\"------\" not in lines[i_copy]: if lines[i_copy].isspace(): i_copy += 1 continue if check_safe_list(lines[i_copy]): i_copy", "i, j, keyword): while i < j: if keyword in lines[i]: return True", "i, j, \"operation\") and scan_block(lines, i, j, \"missing\") and (not scan_block(lines, i, j,", "'2': use_lambda += 1 print_writeofd(\"use_lambda (use_lambda): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else:", "print_writeofd(\"RELYING ON AUTO TOOL: {} PARALELLISM USED\".format(possible_para + nopattern + det_no_pattern + use_lambda),", "above. Enter 1 if this is a no parallelism case, and enter 2", "while statement\" in lines[i]: i_copy = i while \"------\" not in lines[i_copy]: print(lines[i_copy])", "manual checking, then just count this as a no parallelism use case else:", "start and end of one search snippet k = i + 1 #", "this is a use lambda function case # If only relying on auto-tool:", "\"========================\" not in lines[i]: i += 1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para += 1 break", "# First argument is whether or not to proceed with manual checking: if", "identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism + det_no_para), ofd) print_writeofd(\"{},", "i += 1 return False lines = ifd.readlines() i = 0 while i", "this shouldn't count\") user = input() while user != '1' and user !=", "+= 1 ofd.write(\"github_exception: {}\".format(lines[k])) i = j continue # Judge if there is", "in between start clause and while clause between_code = 0 # Determined to", "Judge if this is a no use of parallelism case if \"No use", "is a use-parallelism case, and enter 3 if this shouldn't count\") user =", "# Possible parallelism possible_para = 0 # Determined no parallelism det_no_para = 0", "and \"========================\" not in lines[i]: i += 1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para += 1", "noparrelism + nopattern + github_exception + proces_exception + no_retrieve + use_lambda + possible_para", "{}\".format(lines[k])) i = j continue # At this point there shouldn't be any", "# Only do the following if doing manual checking if MANUAL_CHECKING: possible_para +=", "continue # Judge if this is a no retrieve result case # Such", "and while statement\" in lines[i]: i_copy = i while \"------\" not in lines[i_copy]:", "if scan_block(lines, i, j, \"Nodes in between start statement and while statement\"): #", "other exception triggered if scan_block(lines, i, j, \"EXCEPTION OCCURS\"): proces_exception += 1 ofd.write(\"process_exception:", "lines = ifd.readlines() i = 0 while i < len(lines): begin = get_all_add_up()", "file from async_main_google.py ifd = open(sys.argv[2], 'r') # Third argument is the output", "== '2': det_para += 1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue", "API elif user == '3': proces_exception += 1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i", "two numbers equal then need to prompt users: if scan_block_numbers(lines, i, j, \"Nodes", "There exists code in between start clause and while clause between_code = 0", "enter 2 if this is a use_parallelism case\") user = input() while user", "a no parallelism case, and enter 2 if this is a use-parallelism case\")", "def scan_block_numbers(lines, i, j, keyword): ret = 0 while i < j: if", "1]: if MANUAL_CHECKING: print(\"use_lambda: {}\".format(lines[k])) print(\"Please inspect the above. Enter 1 if this", "def judge_code(i, j, lines): while i < j: if \"Nodes in between start", "this is a use-parallelism case, and enter 3 if this shouldn't count\") user", "ofd.write(\"no_pattern: {}\".format(lines[k])) i = j continue # Judge if this is a no", "+ noparrelism + nopattern + github_exception + proces_exception + no_retrieve + use_lambda +", "1 if i_copy == j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism += 1 break i +=", "to prompt users: if scan_block_numbers(lines, i, j, \"Nodes in between start statement and", "response:\", \"job_id = response['JobId']\", \"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']\", \"'taskStatus': 'inProgress'}\", \"taskId = response['SynthesisTask']['TaskId']\"]", "occurences of found 0 files no_async = 0 # Determined cases of no", "lines[i]: ret += 1 i += 1 return ret def print_code(i, j, lines):", "i = j continue # Judge if there is any other exception triggered", "there is any github exception triggered if scan_block(lines, i, j, \"Other Github Exceptions", "is a use lambda function case # If only relying on auto-tool: this", "projects if \"No use of async\" in lines[j - 1]: no_async += 1", "i, j, \"Pattern identified\")): print(\"Operation missing while it's neither use lambda nor no", "i and j stores the start and end of one search snippet k", "this should be a parallelism-used case if \"NO PATTERN IDENTIFIED\" in lines[j -", "ofd = open(sys.argv[3], 'w') # All files number allfile = 0 # All", "get_all_add_up() allfile += 1 j = i + 1 while j < len(lines)", "ANYTHING ELSE!\") user = input() if user == '1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd)", "no_async = 0 # Determined cases of no parallelism noparrelism = 0 #", "= False else: print(\"The first argument must be either -m or -a, see", "1 i = j continue # Judge if this is a no use", "print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{},", "no longer exist github_exception = 0 # Number of exception cases - processing", "== scan_block_numbers(lines, i, j, \"Pattern identified\"): between_code += 1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i,", "ofd) print_writeofd(\"After MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github", "return True i += 1 return False lines = ifd.readlines() i = 0", "user = input() while user != '1' and user != '2' and user", "lines[i_copy]: if lines[i_copy].isspace(): i_copy += 1 continue if check_safe_list(lines[i_copy]): i_copy += 1 continue", "print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism), ofd)", "Async\".format(no_async), ofd) print_writeofd(\"{}, Github search exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use", "continue else: use_lambda += 1 ofd.write(\"use_lambda: {}\".format(lines[k])) i = j continue # Judge", "No retrieve result files no_retrieve = 0 # Use Lambda function use_lambda =", "'r') # Third argument is the output file for a list of all", "= 0 # Number of exception cases - processing error proces_exception = 0", "def print_code(i, j, lines): while i < j: if \"Nodes in between start", "user = input() if user == '1': det_no_para += 1 print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")),", "ofd.write(\"process_exception: {}\".format(lines[k])) i = j continue # Judge if this is a use", "= 0 # Determined cases of no parallelism noparrelism = 0 # Determined", "and user != '2' and user != '3': print(\"PRESS 1 OR 2 OR", "# Use Lambda function use_lambda = 0 # Possible parallelism possible_para = 0", "parallelism-used case if \"Use Lambda Function\" in lines[j - 1]: if MANUAL_CHECKING: print(\"use_lambda:", "no_retrieve + use_lambda + possible_para + det_no_pattern def scan_block(lines, i, j, keyword): while", "if i != j: print(lines[i]) print(\"Please inspect the above. Enter 1 if this", "!= '1' and user != '2' and user != '3': print(\"PRESS 1 OR", "user == '3': proces_exception += 1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j", "the following if doing manual checking if MANUAL_CHECKING: possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i])", "= j continue else: nopattern += 1 ofd.write(\"no_pattern: {}\".format(lines[k])) i = j continue", "ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + possible_para + nopattern + det_no_pattern", "# Second argument is the output file from async_main_google.py ifd = open(sys.argv[2], 'r')", "of parallelism\" in lines[j - 1]: noparrelism += 1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i =", "# Determined cases of no pattern: nopattern = 0 # Number of exception", "if j > len(lines): break # Now i and j stores the start", "1 ofd.write(\"no_pattern: {}\".format(lines[k])) i = j continue # Judge if this is a", "j: if \"Nodes in between start statement and while statement\" in lines[i]: i_copy", "manual checking if MANUAL_CHECKING: possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i += 1 while", "use_parallelism case\") user = input() while user != '1' and user != '2':", "# Judge if there is any github exception triggered if scan_block(lines, i, j,", "RELEVANT TOTAL PROJECTS\".format(noparrelism + possible_para + nopattern + det_no_pattern + use_lambda), ofd) elif", "if \"No use of async\" in lines[j - 1]: no_async += 1 ofd.write(\"no_async:", "is a use lambda case\") user = input() while user != '1' and", "exception triggered if scan_block(lines, i, j, \"Other Github Exceptions occurred\"): github_exception += 1", "pattern det_no_pattern = 0 def get_all_add_up(): return no_async + noparrelism + nopattern +", "i_copy = i + 1 while \"------\" not in lines[i_copy]: if lines[i_copy].isspace(): i_copy", "import print_writeofd # First argument is whether or not to proceed with manual", "between start and while statement: if scan_block(lines, i, j, \"Nodes in between start", "{}\".format(lines[k].strip(\"\\n\")), ofd) elif user == '2': det_para += 1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd)", "and j stores the start and end of one search snippet k =", "= j continue # Judge if there is any other exception triggered if", "i += 1 i = j ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING: print_writeofd(\"{}, Total files", "ELSE!\") user = input() if user == '1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif", "1 while \"------\" not in lines[i_copy]: if lines[i_copy].isspace(): i_copy += 1 continue if", "no pattern det_no_pattern = 0 def get_all_add_up(): return no_async + noparrelism + nopattern", "scan_block(lines, i, j, \"EXCEPTION OCCURS\"): proces_exception += 1 ofd.write(\"process_exception: {}\".format(lines[k])) i = j", "and enter 2 if this is a use_parallelism case\") user = input() while", "get_all_add_up(): return no_async + noparrelism + nopattern + github_exception + proces_exception + no_retrieve", "must be either -m or -a, see README.md for details\") exit(1) # Second", "while statement\" in lines[i]: i_copy = i + 1 while \"------\" not in", "shouldn't count\") user = input() while user != '1' and user != '2'", "+= 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i += 1 while i < j and \"========================\"", "return True i += 1 return False def scan_block_numbers(lines, i, j, keyword): ret", "parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible use of parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON AUTO TOOL:", "there is any other exception triggered if scan_block(lines, i, j, \"EXCEPTION OCCURS\"): proces_exception", "keyword in lines[i]: return True i += 1 return False def scan_block_numbers(lines, i,", "result\" in lines[j - 1]: no_retrieve += 1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i = j", "judge_code(i, j, lines): det_no_pattern += 1 i = j continue # Judge if", "exit(1) # Second argument is the output file from async_main_google.py ifd = open(sys.argv[2],", "async case # Such project should not be counted towards the total count", "+ 1 # Judge if there is any github exception triggered if scan_block(lines,", "if \"NO PATTERN IDENTIFIED\" in lines[j - 1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")),", "len(lines): break # Now i and j stores the start and end of", "i, j, \"EXCEPTION OCCURS\"): proces_exception += 1 ofd.write(\"process_exception: {}\".format(lines[k])) i = j continue", "sys.argv[1] == '-m': MANUAL_CHECKING = True elif sys.argv[1] == '-a': MANUAL_CHECKING = False", "do the following if doing manual checking if MANUAL_CHECKING: possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\")", "\"BOTH IDENTIFIED IN THE SAME FILE\" in lines[i_copy]: # Only do the following", "TOOL: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + possible_para + nopattern + det_no_pattern + use_lambda),", "print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect the above. Enter 1 if this is", "lambda nor no pattern identified: {}\".format(lines[k])) exit(1) # Check if needs to prompt", "and enter 2 if this is a use-parallelism case\") user = input() while", "= i while \"------\" not in lines[i_copy]: print(lines[i_copy]) i_copy += 1 break i", "# If not manual checking, then just count this as a no parallelism", "= j continue # Judge if this is a no retrieve result case", "lines[i]: return True i += 1 return False def scan_block_numbers(lines, i, j, keyword):", "MANUAL_CHECKING = True elif sys.argv[1] == '-a': MANUAL_CHECKING = False else: print(\"The first", "'2': det_para += 1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue #", "= 0 # Determined cases of no pattern: nopattern = 0 # Number", "i = j continue # Judge if this is a no retrieve result", "i_copy += 1 break i += 1 safe_list = [\"if response:\", \"job_id =", "pattern: nopattern = 0 # Number of exception cases - repo no longer", "ELSE!\") user = input() if user == '1': det_no_para += 1 print_writeofd(\"possible_parallelism (no_parallelism):", "print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif user == '2': det_para += 1 print_writeofd(\"code_between (parallelism):", "relying on auto-tool: this should be a parallelism-used case if \"Use Lambda Function\"", "!= '1' and user != '2': print(\"PRESS 1 OR 2, NOT ANYTHING ELSE!\")", "not judge_code(i, j, lines): det_no_pattern += 1 i = j continue # Judge", "j: print(lines[i]) print(\"Please inspect the above. Enter 1 if this is a no", "Judge if this is a no retrieve result case # Such project should", "processing error proces_exception = 0 # No retrieve result files no_retrieve = 0", "in lines[j - 1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect the", "retrieve result\" in lines[j - 1]: no_retrieve += 1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i =", "1 OR 2 OR 3, NOT ANYTHING ELSE!\") user = input() if user", "+= 1 ofd.write(\"no_pattern: {}\".format(lines[k])) i = j continue # Judge if this is", "no_retrieve = 0 # Use Lambda function use_lambda = 0 # Possible parallelism", "actually mis-using the API elif user == '3': proces_exception += 1 print_writeofd(\"no_pattern (process_exception):", "\"EXCEPTION OCCURS\"): proces_exception += 1 ofd.write(\"process_exception: {}\".format(lines[k])) i = j continue # Judge", "= i + 1 while \"------\" not in lines[i_copy]: if lines[i_copy].isspace(): i_copy +=", "i_copy < j: if \"BOTH IDENTIFIED IN THE SAME FILE\" in lines[i_copy]: #", "PROJECTS\".format(noparrelism + possible_para + nopattern + det_no_pattern + use_lambda), ofd) elif MANUAL_CHECKING: print_writeofd(\"\",", "j continue # Judge if this is a use lambda function case #", "a parallelism-used case if \"NO PATTERN IDENTIFIED\" in lines[j - 1]: if MANUAL_CHECKING:", "in lines[j - 1]: no_retrieve += 1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i = j continue", "i = j continue # If not manual checking, then just count this", "+= 1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i = j continue # At this point there", "ON MANUAL CHECKING: {} PARALELLISM USED\".format(det_para + use_lambda), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING:", "total count of projects if \"No retrieve result\" in lines[j - 1]: no_retrieve", "{}\".format(lines[k])) i = j continue # Judge if this is a no pattern", "use lambda nor no pattern identified: {}\".format(lines[k])) exit(1) # Check if needs to", "parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} NO USE OF PARALELLISM\".format(noparrelism +", "ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + det_no_para + det_para", "this is a use_parallelism case\") user = input() while user != '1' and", "neither use lambda nor no pattern identified: {}\".format(lines[k])) exit(1) # Check if needs", "ofd) print(\"Please inspect the above. Enter 1 if this is a no parallelism", "a no retrieve result case # Such project should not be counted towards", "1 return True i += 1 return False lines = ifd.readlines() i =", "lines[j - 1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect the above.", "be a parallelism-used case if \"NO PATTERN IDENTIFIED\" in lines[j - 1]: if", "keyword): ret = 0 while i < j: if keyword in lines[i]: ret", "if keyword in lines[i]: return True i += 1 return False def scan_block_numbers(lines,", "is the output file for a list of all repos ofd = open(sys.argv[3],", "< j: if \"BOTH IDENTIFIED IN THE SAME FILE\" in lines[i_copy]: # Only", "j continue elif user == '2': det_para += 1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd)", "j continue # Judge if this is a no use of async case", "# Third argument is the output file for a list of all repos", "checking if MANUAL_CHECKING: possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i += 1 while i", "+ det_no_pattern + use_lambda), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS\".format(noparrelism", "i, j, \"Pattern identified\"): between_code += 1 if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines)", "then need to prompt users: if scan_block_numbers(lines, i, j, \"Nodes in between start", "user == '2': det_para += 1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break else: i", "print_writeofd(\"{}, No retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{}, No pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{},", "not in lines[i]: i += 1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para += 1 break i_copy", "use_lambda), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + possible_para +", "ofd) print_writeofd(\"{}, Use of Lambda Function\".format(use_lambda), ofd) print_writeofd(\"{}, No retrieve result\".format(no_retrieve), ofd) print_writeofd(\"{},", "can proceed, and enter 2 if this is a use_parallelism case\") user =", "\"***\" in lines[i]: i_copy = i while i_copy < j: if \"BOTH IDENTIFIED", "of parallelism\".format(noparrelism + det_no_para), ofd) print_writeofd(\"{}, Use of parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING ON", "print(lines[i]) print(\"Please inspect the above. Enter 1 if this is a no parallelism", "i + 1 while \"------\" not in lines[i_copy]: if lines[i_copy].isspace(): i_copy += 1", "user == '1': det_no_para += 1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif user ==", "and while statement\"): # If these two numbers equal then need to prompt", "else: use_lambda += 1 ofd.write(\"use_lambda: {}\".format(lines[k])) i = j continue # Judge if", "= j continue while i < j: if \"***\" in lines[i]: i_copy =", "print(lines[i_copy]) i_copy += 1 break i += 1 safe_list = [\"if response:\", \"job_id", "Judge if there is any github exception triggered if scan_block(lines, i, j, \"Other", "if \"operation.done\" in lines[i_copy] or \"operation.result\" in lines[i_copy]: return True return False i_copy", "+= 1 print_writeofd(\"code_between (parallelism): {}\".format(lines[k].strip('\\n')), ofd) i = j continue # If not", "mis-using the API elif user == '3': proces_exception += 1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")),", "2 if this is a use_parallelism case\") user = input() while user !=", "\"operation.result\" in lines[i_copy]: return True return False i_copy += 1 return True i", "noparrelism += 1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i = j continue while i < j:", "+= 1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break else: i += 1 while i", "list of all repos ofd = open(sys.argv[3], 'w') # All files number allfile", "'-m': MANUAL_CHECKING = True elif sys.argv[1] == '-a': MANUAL_CHECKING = False else: print(\"The", "True i += 1 return False lines = ifd.readlines() i = 0 while", "FILE\" in lines[i_copy]: # Only do the following if doing manual checking if", "elif MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use", "These are for cases where the repo is actually mis-using the API elif", "# All occurences of found 0 files no_async = 0 # Determined cases", "True elif sys.argv[1] == '-a': MANUAL_CHECKING = False else: print(\"The first argument must", "{}\".format(lines[k])) possible_para += 1 break i_copy += 1 if i_copy == j: ofd.write(\"no_parallelism:", "scan_block(lines, i, j, \"missing\") and (not scan_block(lines, i, j, \"Pattern identified\")): print(\"Operation missing", "+= 1 ofd.write(\"use_lambda: {}\".format(lines[k])) i = j continue # Judge if this is", "'1': det_no_para += 1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif user == '2': det_para", "case, and enter 3 if this shouldn't count\") user = input() while user", "1]: no_retrieve += 1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i = j continue # At this", "+ nopattern + det_no_pattern + use_lambda), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} RELEVANT", "statement and while statement\" in lines[i]: i_copy = i while \"------\" not in", "case if \"No use of parallelism\" in lines[j - 1]: noparrelism += 1", "- repo no longer exist github_exception = 0 # Number of exception cases", "import re from utils.utils import print_writeofd # First argument is whether or not", "sys.argv[1] == '-a': MANUAL_CHECKING = False else: print(\"The first argument must be either", "print_code(i, j, lines) print(\"Please inspect the above. Enter 1 if can proceed, and", "i = j continue # These are for cases where the repo is", "of parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} NO USE OF PARALELLISM\".format(noparrelism", "Now i and j stores the start and end of one search snippet", "a no parallelism case, and enter 2 if this is a use lambda", "ret += 1 i += 1 return ret def print_code(i, j, lines): while", "of no pattern: nopattern = 0 # Number of exception cases - repo", "nopattern + github_exception + proces_exception + no_retrieve + use_lambda + possible_para + det_no_pattern", "MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect the above. Enter 1 if this", "= get_all_add_up() allfile += 1 j = i + 1 while j <", "+ no_retrieve + use_lambda + possible_para + det_no_pattern def scan_block(lines, i, j, keyword):", "whether or not to proceed with manual checking: if sys.argv[1] == '-m': MANUAL_CHECKING", "function case # If only relying on auto-tool: this should be a parallelism-used", "if user == '1': det_no_para += 1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif user", "CHECKING: {} NO USE OF PARALELLISM\".format(noparrelism + det_no_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING:", "parallelism\".format(noparrelism + det_no_para), ofd) print_writeofd(\"{}, Use of parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING ON MANUAL", "nopattern + det_no_pattern + use_lambda), ofd) elif MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\", ofd) print_writeofd(\"After", "i, j, \"Nodes in between start statement and while statement\"): # If these", "of async\" in lines[j - 1]: no_async += 1 ofd.write(\"no_async: {}\".format(lines[k])) i =", "ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para += 1 break i_copy += 1 if i_copy == j:", "user != '1' and user != '2' and user != '3': print(\"PRESS 1", "First argument is whether or not to proceed with manual checking: if sys.argv[1]", "print_code(i, j, lines): while i < j: if \"Nodes in between start statement", "user != '3': print(\"PRESS 1 OR 2 OR 3, NOT ANYTHING ELSE!\") user", "ofd) i = j continue elif user == '2': det_para += 1 print_writeofd(\"no_pattern", "nopattern = 0 # Number of exception cases - repo no longer exist", "1 if can proceed, and enter 2 if this is a use_parallelism case\")", "Github search exceptions\".format(github_exception), ofd) print_writeofd(\"{}, Processing exceptions\".format(proces_exception), ofd) print_writeofd(\"{}, Use of Lambda Function\".format(use_lambda),", "files no_async = 0 # Determined cases of no parallelism noparrelism = 0", "ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github search exceptions\".format(github_exception), ofd) print_writeofd(\"{},", "user == '2': det_para += 1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j", "pattern identified\".format(nopattern + det_no_pattern), ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism + det_no_para), ofd)", "Exceptions occurred\"): github_exception += 1 ofd.write(\"github_exception: {}\".format(lines[k])) i = j continue # Judge", "use-parallelism case\") user = input() while user != '1' and user != '2':", "+ use_lambda), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS\".format(noparrelism + possible_para", "i_copy += 1 if i_copy == j: ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism += 1 break", "'1': print_writeofd(\"code_between (proceeds): {}\".format(lines[k].strip('\\n')), ofd) elif user == '2': det_para += 1 print_writeofd(\"code_between", "Possible use of parallel cases\".format(possible_para), ofd) print_writeofd(\"RELYING ON AUTO TOOL: {} NO USE", "a use-parallelism case, and enter 3 if this shouldn't count\") user = input()", "not to proceed with manual checking: if sys.argv[1] == '-m': MANUAL_CHECKING = True", "ifd = open(sys.argv[2], 'r') # Third argument is the output file for a", "while clause between_code = 0 # Determined to be no pattern det_no_pattern =", "j continue while i < j: if \"***\" in lines[i]: i_copy = i", "+ nopattern + github_exception + proces_exception + no_retrieve + use_lambda + possible_para +", "search snippet k = i + 1 # Judge if there is any", "+= 1 return False lines = ifd.readlines() i = 0 while i <", "print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: nopattern += 1 ofd.write(\"no_pattern:", "- 1]: if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_writeofd(\"no_pattern: {}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect the above. Enter", "= input() if user == '1': det_no_para += 1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd)", "searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{},", "is whether or not to proceed with manual checking: if sys.argv[1] == '-m':", "scan_block(lines, i, j, \"Nodes in between start statement and while statement\"): # If", "between start statement and while statement\") == scan_block_numbers(lines, i, j, \"Pattern identified\"): between_code", "AUTO TOOL: {} PARALELLISM USED\".format(possible_para + nopattern + det_no_pattern + use_lambda), ofd) print_writeofd(\"RELYING", "counted towards the total count of projects if \"No retrieve result\" in lines[j", "ofd.write(\"no_parallelism: {}\".format(lines[k])) i = j continue while i < j: if \"***\" in", "function use_lambda = 0 # Possible parallelism possible_para = 0 # Determined no", "case # Such project should not be counted towards the total count of", "= j continue # Judge if this is a no use of async", "i, j, \"Nodes in between start statement and while statement\") == scan_block_numbers(lines, i,", "this is a use lambda case\") user = input() while user != '1'", "exception triggered if scan_block(lines, i, j, \"EXCEPTION OCCURS\"): proces_exception += 1 ofd.write(\"process_exception: {}\".format(lines[k]))", "IDENTIFIED IN THE SAME FILE\" in lines[i_copy]: # Only do the following if", "(no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif user == '2': det_para += 1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")),", "lines[j - 1]: no_retrieve += 1 ofd.write(\"no_retrieve: {}\".format(lines[k])) i = j continue #", "print(\"Please inspect the above. Enter 1 if this is a no parallelism case,", "OF PARALELLISM\".format(noparrelism + det_no_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} PARALELLISM USED\".format(det_para +", "lines) print(\"Please inspect the above. Enter 1 if can proceed, and enter 2", "PARALELLISM USED\".format(possible_para + nopattern + det_no_pattern + use_lambda), ofd) print_writeofd(\"RELYING ON AUTO TOOL:", "!= '3': print(\"PRESS 1 OR 2 OR 3, NOT ANYTHING ELSE!\") user =", "ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism + det_no_para), ofd) print_writeofd(\"{}, Use of parallel", "2 if this is a use-parallelism case, and enter 3 if this shouldn't", "det_no_para), ofd) print_writeofd(\"{}, Use of parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {}", "be any \"operating missing\", sanity check: if scan_block(lines, i, j, \"operation\") and scan_block(lines,", "if user == '1': det_no_para += 1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i =", "# Determined cases of no parallelism noparrelism = 0 # Determined cases of", "break i += 1 i = j ofd.write(\"\\n\\n==================================================================\\n\") if not MANUAL_CHECKING: print_writeofd(\"{}, Total", "{} PARALELLISM USED\".format(det_para + use_lambda), ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL", "lines): det_no_pattern += 1 i = j continue # Judge if this is", "judge_code(i, j, lines): while i < j: if \"Nodes in between start statement", "\"No use of async\" in lines[j - 1]: no_async += 1 ofd.write(\"no_async: {}\".format(lines[k]))", "1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue # These are for", "clause between_code = 0 # Determined to be no pattern det_no_pattern = 0", "where the repo is actually mis-using the API elif user == '3': proces_exception", "= open(sys.argv[2], 'r') # Third argument is the output file for a list", "use of parallelism\" in lines[j - 1]: noparrelism += 1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i", "lines[i_copy]: # Only do the following if doing manual checking if MANUAL_CHECKING: possible_para", "if this is a no use of parallelism case if \"No use of", "allfile = 0 # All occurences of found 0 files no_async = 0", "- 1]: no_async += 1 ofd.write(\"no_async: {}\".format(lines[k])) i = j continue # Judge", "repo no longer exist github_exception = 0 # Number of exception cases -", "1 ofd.write(\"github_exception: {}\".format(lines[k])) i = j continue # Judge if there is any", "identified\")): print(\"Operation missing while it's neither use lambda nor no pattern identified: {}\".format(lines[k]))", "scan_block_numbers(lines, i, j, \"Nodes in between start statement and while statement\") == scan_block_numbers(lines,", "proceed, and enter 2 if this is a use_parallelism case\") user = input()", "of no parallelism noparrelism = 0 # Determined cases of no pattern: nopattern", "i < j: if keyword in lines[i]: ret += 1 i += 1", "Determined parallelism det_para = 0 # There exists code in between start clause", "i = j continue elif user == '2': use_lambda += 1 print_writeofd(\"use_lambda (use_lambda):", "a use-parallelism case\") user = input() while user != '1' and user !=", "this is a no use of async case # Such project should not", "Number of exception cases - repo no longer exist github_exception = 0 #", "continue # Judge if this is a no pattern identified case # If", "1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user == '2':", "while \"------\" not in lines[i_copy]: print(lines[i_copy]) i_copy += 1 break i += 1", "print_writeofd(\"After MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd) print_writeofd(\"{}, Github search", "i < j: if keyword in lines[i]: return True i += 1 return", "this is a no retrieve result case # Such project should not be", "{} RELEVANT TOTAL PROJECTS\".format(noparrelism + possible_para + nopattern + det_no_pattern + use_lambda), ofd)", "j: if keyword in lines[i]: ret += 1 i += 1 return ret", "i += 1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para += 1 break i_copy += 1 if", "= True elif sys.argv[1] == '-a': MANUAL_CHECKING = False else: print(\"The first argument", "{}\".format(lines[k].strip(\"\\n\")), ofd) print(\"Please inspect the above. Enter 1 if this is a no", "+= 1 while i < j and \"========================\" not in lines[i]: print(lines[i]) i", "NOT ANYTHING ELSE!\") user = input() if user == '1': det_no_para += 1", "users on codes between start and while statement: if scan_block(lines, i, j, \"Nodes", "a use lambda function case # If only relying on auto-tool: this should", "+ github_exception + proces_exception + no_retrieve + use_lambda + possible_para + det_no_pattern def", "def scan_block(lines, i, j, keyword): while i < j: if keyword in lines[i]:", "+= 1 print_writeofd(\"no_pattern (process_exception): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue else: nopattern +=", "one search snippet k = i + 1 # Judge if there is", "not MANUAL_CHECKING: print_writeofd(\"{}, Total files searched\".format(allfile), ofd) print_writeofd(\"BEFORE MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No", "ON AUTO TOOL: {} NO USE OF PARALELLISM\".format(noparrelism), ofd) print_writeofd(\"RELYING ON AUTO TOOL:", "= 0 # No retrieve result files no_retrieve = 0 # Use Lambda", "while i_copy < j: if \"BOTH IDENTIFIED IN THE SAME FILE\" in lines[i_copy]:", "< j and \"========================\" not in lines[i]: print(lines[i]) i += 1 if i", "no parallelism use case else: if not judge_code(i, j, lines): det_no_pattern += 1", "det_no_pattern += 1 i = j continue # Judge if this is a", "continue elif user == '2': det_para += 1 print_writeofd(\"no_pattern (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i", "lines[i_copy]: print(lines[i_copy]) i_copy += 1 break i += 1 safe_list = [\"if response:\",", "is a no parallelism case, and enter 2 if this is a use", "+ 1 while j < len(lines) and lines[j] != \"=================================================\\n\": j += 1", "else: if not judge_code(i, j, lines): det_no_pattern += 1 i = j continue", "1 while j < len(lines) and lines[j] != \"=================================================\\n\": j += 1 if", "continue if \"operation.done\" in lines[i_copy] or \"operation.result\" in lines[i_copy]: return True return False", "as a no parallelism use case else: if not judge_code(i, j, lines): det_no_pattern", "if MANUAL_CHECKING: print(\"\\n\\n\\n\\n\\n\\n\") print_code(i, j, lines) print(\"Please inspect the above. Enter 1 if", "start statement and while statement\"): # If these two numbers equal then need", "enter 2 if this is a use-parallelism case, and enter 3 if this", "ELSE!\") user = input() if user == '1': det_no_para += 1 print_writeofd(\"use_lambda (no_parallelism):", "user = input() if user == '1': det_no_para += 1 print_writeofd(\"use_lambda (no_parallelism): {}\".format(lines[k].strip(\"\\n\")),", "{}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue # These are for cases where the", "All occurences of found 0 files no_async = 0 # Determined cases of", "+= 1 if j > len(lines): break # Now i and j stores", "return True return False def judge_code(i, j, lines): while i < j: if", "safes in safe_list: if safes in string: return True return False def judge_code(i,", "use of parallelism\".format(noparrelism + det_no_para), ofd) print_writeofd(\"{}, Use of parallel cases\".format(det_para), ofd) print_writeofd(\"RELYING", "ofd) print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL INSPECTION:\", ofd) print_writeofd(\"{}, No use of Async\".format(no_async), ofd)", "and while statement\" in lines[i]: i_copy = i + 1 while \"------\" not", "if safes in string: return True return False def judge_code(i, j, lines): while", "+= 1 ofd.write(\"possible_parallelism: {}\".format(lines[k])) possible_para += 1 break i_copy += 1 if i_copy", "while j < len(lines) and lines[j] != \"=================================================\\n\": j += 1 if j", "False def scan_block_numbers(lines, i, j, keyword): ret = 0 while i < j:", "count of projects if \"No retrieve result\" in lines[j - 1]: no_retrieve +=", "while user != '1' and user != '2' and user != '3': print(\"PRESS", "print_writeofd(\"no_pattern (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user == '2': det_para", "(no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) i = j continue elif user == '2': use_lambda +=", "ON AUTO TOOL: {} PARALELLISM USED\".format(possible_para + nopattern + det_no_pattern + use_lambda), ofd)", "use_lambda = 0 # Possible parallelism possible_para = 0 # Determined no parallelism", "statement and while statement\"): # If these two numbers equal then need to", "possible_para += 1 print(\"\\n\\n\\n\\n\\n\\n\") print(lines[i]) i += 1 while i < j and", "+ det_no_pattern + use_lambda), ofd) elif MANUAL_CHECKING: print_writeofd(\"\", ofd) print_writeofd(\"\", ofd) print_writeofd(\"After MANUAL", "1 print_writeofd(\"possible_parallelism (parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) break else: i += 1 while i <", "+ possible_para + det_no_pattern def scan_block(lines, i, j, keyword): while i < j:", "codes between start and while statement: if scan_block(lines, i, j, \"Nodes in between", "ofd.write(\"no_parallelism: {}\".format(lines[k])) noparrelism += 1 break i += 1 i = j ofd.write(\"\\n\\n==================================================================\\n\")", "+= 1 ofd.write(\"no_async: {}\".format(lines[k])) i = j continue # Judge if this is", "= j continue # Judge if this is a no use of parallelism", "Determined to be no pattern det_no_pattern = 0 def get_all_add_up(): return no_async +", "if this shouldn't count\") user = input() while user != '1' and user", "async\" in lines[j - 1]: no_async += 1 ofd.write(\"no_async: {}\".format(lines[k])) i = j", "+= 1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i = j continue while i < j: if", "j: if keyword in lines[i]: return True i += 1 return False def", "in lines[i]: i_copy = i + 1 while \"------\" not in lines[i_copy]: if", "the start and end of one search snippet k = i + 1", "i + 1 # Judge if there is any github exception triggered if", "1 if this is a no parallelism case, and enter 2 if this", "return False i_copy += 1 return True i += 1 return False lines", "equal then need to prompt users: if scan_block_numbers(lines, i, j, \"Nodes in between", "check_safe_list(string): for safes in safe_list: if safes in string: return True return False", "identified: {}\".format(lines[k])) exit(1) # Check if needs to prompt users on codes between", "0 # Use Lambda function use_lambda = 0 # Possible parallelism possible_para =", "ofd) print_writeofd(\"RELYING ON MANUAL CHECKING: {} PARALELLISM USED\".format(det_para + use_lambda), ofd) print_writeofd(\"RELYING ON", "i = j continue elif user == '2': det_para += 1 print_writeofd(\"no_pattern (parallelism):", "scan_block_numbers(lines, i, j, keyword): ret = 0 while i < j: if keyword", "towards the total count of projects if \"No retrieve result\" in lines[j -", "0 def get_all_add_up(): return no_async + noparrelism + nopattern + github_exception + proces_exception", "statement and while statement\" in lines[i]: i_copy = i + 1 while \"------\"", "1]: noparrelism += 1 ofd.write(\"no_parallelism: {}\".format(lines[k])) i = j continue while i <", "a list of all repos ofd = open(sys.argv[3], 'w') # All files number", "print(lines[i]) i += 1 while i < j and \"========================\" not in lines[i]:", "user != '2' and user != '3': print(\"PRESS 1 OR 2 OR 3,", "print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif user == '2': det_para += 1 print_writeofd(\"possible_parallelism (parallelism):", "+= 1 j = i + 1 while j < len(lines) and lines[j]", "!= \"=================================================\\n\": j += 1 if j > len(lines): break # Now i", "1 while i < j and \"========================\" not in lines[i]: i += 1", "{}\".format(lines[k])) i = j continue # Judge if this is a use lambda", "in safe_list: if safes in string: return True return False def judge_code(i, j,", "1 print_writeofd(\"possible_parallelism (no_parallelism): {}\".format(lines[k].strip(\"\\n\")), ofd) elif user == '2': det_para += 1 print_writeofd(\"possible_parallelism", "ofd) print_writeofd(\"{}, No use of parallelism\".format(noparrelism), ofd) print_writeofd(\"{}, Possible use of parallel cases\".format(possible_para),", "< len(lines): begin = get_all_add_up() allfile += 1 j = i + 1", "inspect the above. Enter 1 if this is a no parallelism case, and", "of all repos ofd = open(sys.argv[3], 'w') # All files number allfile =", "\"========================\" not in lines[i]: print(lines[i]) i += 1 if i != j: print(lines[i])" ]
[]
[ "is raised if that's not the case. from djmodels.contrib.gis.utils.layermapping import LayerMapping, LayerMapError #", "LayerMapping requires DJMODELS_SETTINGS_MODULE to be set, # and ImproperlyConfigured is raised if that's", "from djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA", "DJMODELS_SETTINGS_MODULE to be set, # and ImproperlyConfigured is raised if that's not the", "not the case. from djmodels.contrib.gis.utils.layermapping import LayerMapping, LayerMapError # NOQA except ImproperlyConfigured: pass", "# LayerMapping requires DJMODELS_SETTINGS_MODULE to be set, # and ImproperlyConfigured is raised if", "NOQA from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA from djmodels.contrib.gis.utils.srs import add_srs_entry #", "utilities for GeoDjango. \"\"\" from djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA from djmodels.contrib.gis.utils.ogrinspect import", "mapping, ogrinspect # NOQA from djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA from djmodels.core.exceptions import", "for GeoDjango. \"\"\" from djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA from djmodels.contrib.gis.utils.ogrinspect import mapping,", "that's not the case. from djmodels.contrib.gis.utils.layermapping import LayerMapping, LayerMapError # NOQA except ImproperlyConfigured:", "import add_srs_entry # NOQA from djmodels.core.exceptions import ImproperlyConfigured try: # LayerMapping requires DJMODELS_SETTINGS_MODULE", "import mapping, ogrinspect # NOQA from djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA from djmodels.core.exceptions", "requires DJMODELS_SETTINGS_MODULE to be set, # and ImproperlyConfigured is raised if that's not", "raised if that's not the case. from djmodels.contrib.gis.utils.layermapping import LayerMapping, LayerMapError # NOQA", "\"\"\" This module contains useful utilities for GeoDjango. \"\"\" from djmodels.contrib.gis.utils.ogrinfo import ogrinfo", "add_srs_entry # NOQA from djmodels.core.exceptions import ImproperlyConfigured try: # LayerMapping requires DJMODELS_SETTINGS_MODULE to", "ogrinspect # NOQA from djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA from djmodels.core.exceptions import ImproperlyConfigured", "and ImproperlyConfigured is raised if that's not the case. from djmodels.contrib.gis.utils.layermapping import LayerMapping,", "useful utilities for GeoDjango. \"\"\" from djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA from djmodels.contrib.gis.utils.ogrinspect", "be set, # and ImproperlyConfigured is raised if that's not the case. from", "# NOQA from djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA from djmodels.core.exceptions import ImproperlyConfigured try:", "ImproperlyConfigured is raised if that's not the case. from djmodels.contrib.gis.utils.layermapping import LayerMapping, LayerMapError", "from djmodels.core.exceptions import ImproperlyConfigured try: # LayerMapping requires DJMODELS_SETTINGS_MODULE to be set, #", "if that's not the case. from djmodels.contrib.gis.utils.layermapping import LayerMapping, LayerMapError # NOQA except", "\"\"\" from djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect #", "djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA from", "from djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA from djmodels.core.exceptions import ImproperlyConfigured try: # LayerMapping", "# NOQA from djmodels.core.exceptions import ImproperlyConfigured try: # LayerMapping requires DJMODELS_SETTINGS_MODULE to be", "This module contains useful utilities for GeoDjango. \"\"\" from djmodels.contrib.gis.utils.ogrinfo import ogrinfo #", "djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA from djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA from", "ogrinfo # NOQA from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA from djmodels.contrib.gis.utils.srs import", "import ImproperlyConfigured try: # LayerMapping requires DJMODELS_SETTINGS_MODULE to be set, # and ImproperlyConfigured", "GeoDjango. \"\"\" from djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect", "djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA from djmodels.core.exceptions import ImproperlyConfigured try: # LayerMapping requires", "from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA from djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA", "module contains useful utilities for GeoDjango. \"\"\" from djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA", "djmodels.core.exceptions import ImproperlyConfigured try: # LayerMapping requires DJMODELS_SETTINGS_MODULE to be set, # and", "set, # and ImproperlyConfigured is raised if that's not the case. from djmodels.contrib.gis.utils.layermapping", "# NOQA from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA from djmodels.contrib.gis.utils.srs import add_srs_entry", "to be set, # and ImproperlyConfigured is raised if that's not the case.", "# and ImproperlyConfigured is raised if that's not the case. from djmodels.contrib.gis.utils.layermapping import", "NOQA from djmodels.core.exceptions import ImproperlyConfigured try: # LayerMapping requires DJMODELS_SETTINGS_MODULE to be set,", "try: # LayerMapping requires DJMODELS_SETTINGS_MODULE to be set, # and ImproperlyConfigured is raised", "ImproperlyConfigured try: # LayerMapping requires DJMODELS_SETTINGS_MODULE to be set, # and ImproperlyConfigured is", "contains useful utilities for GeoDjango. \"\"\" from djmodels.contrib.gis.utils.ogrinfo import ogrinfo # NOQA from", "NOQA from djmodels.contrib.gis.utils.srs import add_srs_entry # NOQA from djmodels.core.exceptions import ImproperlyConfigured try: #", "import ogrinfo # NOQA from djmodels.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA from djmodels.contrib.gis.utils.srs" ]
[ "charactercreator_character \"\"\" CLASS_COUNT = \"\"\" SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS cleric, (SELECT", "__name__ == \"__main__\": conn = connect_to_db() curs = conn.cursor() char_count = execute_query(curs, CHARACTER_COUNT)", "connect_to_db() curs = conn.cursor() char_count = execute_query(curs, CHARACTER_COUNT) results = execute_query(curs, GET_CHARACTERS) class_count", "char_wep_count = execute_query(curs, CHAR_WEP_COUNT) avg_items = execute_query(curs, AVG_ITEMS) avg_weapons = execute_query(curs, AVG_WEAPONS) print(results[0])", "print(\"Average Number of Items Per Character:\", avg_items) print(\"Average Number of Weapons Per Character:\",", "SELECT COUNT(*) FROM charactercreator_character \"\"\" CLASS_COUNT = \"\"\" SELECT (SELECT COUNT(*) FROM charactercreator_cleric)", "SELECT COUNT(*) FROM armory_item \"\"\" WEP_COUNT = \"\"\" SELECT COUNT(*) name FROM armory_item", "\"\"\" if __name__ == \"__main__\": conn = connect_to_db() curs = conn.cursor() char_count =", "charactercreator_character_inventory.character_id ) \"\"\" if __name__ == \"__main__\": conn = connect_to_db() curs = conn.cursor()", "JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS =", "armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" if __name__ ==", "charactercreator_character_inventory.character_id LIMIT 20 \"\"\" AVG_WEAPONS = \"\"\" SELECT AVG(num_weapons) FROM ( SELECT charactercreator_character_inventory.character_id,", "ITEM_COUNT = \"\"\" SELECT COUNT(*) FROM armory_item \"\"\" WEP_COUNT = \"\"\" SELECT COUNT(*)", "SELECT AVG(num_items) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items FROM charactercreator_character_inventory INNER JOIN", "\"\"\" SELECT COUNT(*) FROM armory_item \"\"\" WEP_COUNT = \"\"\" SELECT COUNT(*) name FROM", "COUNT(*) FROM charactercreator_thief) AS theif \"\"\" ITEM_COUNT = \"\"\" SELECT COUNT(*) FROM armory_item", "SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id =", "(SELECT COUNT(*) FROM armory_weapon ) \"\"\" CHAR_ITEM_COUNT = \"\"\" SELECT character_id, COUNT(*) FROM", "FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items FROM charactercreator_character_inventory INNER JOIN armory_item ON", "BY charactercreator_character_inventory.character_id ) \"\"\" if __name__ == \"__main__\": conn = connect_to_db() curs =", "CLASS_COUNT = \"\"\" SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS cleric, (SELECT COUNT(*) FROM", "ITEMS_NO_WEPS = \"\"\" SELECT( SELECT COUNT(*) FROM armory_item ) - (SELECT COUNT(*) FROM", "GROUP BY item_id LIMIT 20; \"\"\" CHAR_WEP_COUNT = \"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM", "avg_weapons = execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character Count:\", char_count) print(\"Class Count (cleric, fighter, mage,", "CLASS_COUNT) item_count = execute_query(curs, ITEM_COUNT) wep_count = execute_query(curs, WEP_COUNT) items_no_weps = execute_query(curs, ITEMS_NO_WEPS)", "= \"\"\" SELECT COUNT(*) FROM armory_item \"\"\" WEP_COUNT = \"\"\" SELECT COUNT(*) name", "charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS = \"\"\" SELECT AVG(num_items) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*)", "( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id", "= execute_query(curs, CHAR_ITEM_COUNT) char_wep_count = execute_query(curs, CHAR_WEP_COUNT) avg_items = execute_query(curs, AVG_ITEMS) avg_weapons =", "(SELECT COUNT(*) FROM charactercreator_fighter) AS fighter, (SELECT COUNT(*) FROM charactercreator_mage) AS mage, (SELECT", "ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS = \"\"\" SELECT", "num_weapons FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id", "ID:\", char_wep_count) print(\"Average Number of Items Per Character:\", avg_items) print(\"Average Number of Weapons", "= armory_item.item_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" if __name__ == \"__main__\": conn =", "\"\"\" CHAR_ITEM_COUNT = \"\"\" SELECT character_id, COUNT(*) FROM charactercreator_character_inventory GROUP BY item_id LIMIT", "\"\"\" CHARACTER_COUNT = \"\"\" SELECT COUNT(*) FROM charactercreator_character \"\"\" CLASS_COUNT = \"\"\" SELECT", "GROUP BY charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS = \"\"\" SELECT AVG(num_items) FROM ( SELECT", "AS fighter, (SELECT COUNT(*) FROM charactercreator_mage) AS mage, (SELECT COUNT(*) FROM charactercreator_necromancer) AS", "= execute_query(curs, CHAR_WEP_COUNT) avg_items = execute_query(curs, AVG_ITEMS) avg_weapons = execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character", "query): cursor.execute(query) return cursor.fetchall() GET_CHARACTERS = \"\"\" SELECT * FROM charactercreator_character \"\"\" CHARACTER_COUNT", "results = execute_query(curs, GET_CHARACTERS) class_count = execute_query(curs, CLASS_COUNT) item_count = execute_query(curs, ITEM_COUNT) wep_count", "CHAR_WEP_COUNT) avg_items = execute_query(curs, AVG_ITEMS) avg_weapons = execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character Count:\", char_count)", ") - (SELECT COUNT(*) FROM armory_weapon ) \"\"\" CHAR_ITEM_COUNT = \"\"\" SELECT character_id,", "cursor.execute(query) return cursor.fetchall() GET_CHARACTERS = \"\"\" SELECT * FROM charactercreator_character \"\"\" CHARACTER_COUNT =", "= \"\"\" SELECT( SELECT COUNT(*) FROM armory_item ) - (SELECT COUNT(*) FROM armory_weapon", "FROM charactercreator_character \"\"\" CHARACTER_COUNT = \"\"\" SELECT COUNT(*) FROM charactercreator_character \"\"\" CLASS_COUNT =", "FROM armory_item INNER JOIN armory_weapon ON armory_item.item_id = armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS = \"\"\"", "CHARACTER_COUNT = \"\"\" SELECT COUNT(*) FROM charactercreator_character \"\"\" CLASS_COUNT = \"\"\" SELECT (SELECT", "GET_CHARACTERS) class_count = execute_query(curs, CLASS_COUNT) item_count = execute_query(curs, ITEM_COUNT) wep_count = execute_query(curs, WEP_COUNT)", "SELECT COUNT(*) name FROM armory_item INNER JOIN armory_weapon ON armory_item.item_id = armory_weapon.item_ptr_id \"\"\"", "FROM armory_weapon ) \"\"\" CHAR_ITEM_COUNT = \"\"\" SELECT character_id, COUNT(*) FROM charactercreator_character_inventory GROUP", "= execute_query(curs, AVG_ITEMS) avg_weapons = execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character Count:\", char_count) print(\"Class Count", "charactercreator_thief) AS theif \"\"\" ITEM_COUNT = \"\"\" SELECT COUNT(*) FROM armory_item \"\"\" WEP_COUNT", "\"\"\" AVG_WEAPONS = \"\"\" SELECT AVG(num_weapons) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons", "\"\"\" CLASS_COUNT = \"\"\" SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS cleric, (SELECT COUNT(*)", "INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS", "= execute_query(curs, ITEMS_NO_WEPS) char_item_count = execute_query(curs, CHAR_ITEM_COUNT) char_wep_count = execute_query(curs, CHAR_WEP_COUNT) avg_items =", "= execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character Count:\", char_count) print(\"Class Count (cleric, fighter, mage, necromancer,", "= \"\"\" SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS cleric, (SELECT COUNT(*) FROM charactercreator_fighter)", "= \"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id =", "per character ID:\", char_item_count) print(\"Weapons per character ID:\", char_wep_count) print(\"Average Number of Items", "print(\"Items without Weapons:\", items_no_weps) print(\"Items per character ID:\", char_item_count) print(\"Weapons per character ID:\",", "cleric, (SELECT COUNT(*) FROM charactercreator_fighter) AS fighter, (SELECT COUNT(*) FROM charactercreator_mage) AS mage,", "charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS = \"\"\" SELECT AVG(num_items)", "per character ID:\", char_wep_count) print(\"Average Number of Items Per Character:\", avg_items) print(\"Average Number", "armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS = \"\"\" SELECT AVG(num_items) FROM (", "ITEM_COUNT) wep_count = execute_query(curs, WEP_COUNT) items_no_weps = execute_query(curs, ITEMS_NO_WEPS) char_item_count = execute_query(curs, CHAR_ITEM_COUNT)", "= \"\"\" SELECT AVG(num_items) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items FROM charactercreator_character_inventory", "mage, necromancer, theif):\", class_count) print(\"Item Count\", item_count) print(\"Weapon Count:\", wep_count) print(\"Items without Weapons:\",", "char_wep_count) print(\"Average Number of Items Per Character:\", avg_items) print(\"Average Number of Weapons Per", "charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY", "name FROM armory_item INNER JOIN armory_weapon ON armory_item.item_id = armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS =", "execute_query(cursor, query): cursor.execute(query) return cursor.fetchall() GET_CHARACTERS = \"\"\" SELECT * FROM charactercreator_character \"\"\"", "character ID:\", char_item_count) print(\"Weapons per character ID:\", char_wep_count) print(\"Average Number of Items Per", "20; \"\"\" CHAR_WEP_COUNT = \"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory INNER JOIN armory_weapon", "SELECT( SELECT COUNT(*) FROM armory_item ) - (SELECT COUNT(*) FROM armory_weapon ) \"\"\"", "COUNT(*) FROM charactercreator_necromancer) AS necromancer, (SELECT COUNT(*) FROM charactercreator_thief) AS theif \"\"\" ITEM_COUNT", "def connect_to_db(db_name=\"rpg_db.sqlite3\"): return sqlite3.connect(db_name) def execute_query(cursor, query): cursor.execute(query) return cursor.fetchall() GET_CHARACTERS = \"\"\"", "ITEMS_NO_WEPS) char_item_count = execute_query(curs, CHAR_ITEM_COUNT) char_wep_count = execute_query(curs, CHAR_WEP_COUNT) avg_items = execute_query(curs, AVG_ITEMS)", "= \"\"\" SELECT COUNT(*) FROM charactercreator_character \"\"\" CLASS_COUNT = \"\"\" SELECT (SELECT COUNT(*)", "FROM charactercreator_character_inventory INNER JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id GROUP BY charactercreator_character_inventory.character_id )", "ID:\", char_item_count) print(\"Weapons per character ID:\", char_wep_count) print(\"Average Number of Items Per Character:\",", "CHARACTER_COUNT) results = execute_query(curs, GET_CHARACTERS) class_count = execute_query(curs, CLASS_COUNT) item_count = execute_query(curs, ITEM_COUNT)", "\"__main__\": conn = connect_to_db() curs = conn.cursor() char_count = execute_query(curs, CHARACTER_COUNT) results =", "= connect_to_db() curs = conn.cursor() char_count = execute_query(curs, CHARACTER_COUNT) results = execute_query(curs, GET_CHARACTERS)", "SELECT AVG(num_weapons) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons FROM charactercreator_character_inventory INNER JOIN", "item_count = execute_query(curs, ITEM_COUNT) wep_count = execute_query(curs, WEP_COUNT) items_no_weps = execute_query(curs, ITEMS_NO_WEPS) char_item_count", "execute_query(curs, WEP_COUNT) items_no_weps = execute_query(curs, ITEMS_NO_WEPS) char_item_count = execute_query(curs, CHAR_ITEM_COUNT) char_wep_count = execute_query(curs,", "execute_query(curs, CHAR_ITEM_COUNT) char_wep_count = execute_query(curs, CHAR_WEP_COUNT) avg_items = execute_query(curs, AVG_ITEMS) avg_weapons = execute_query(curs,", "AVG_ITEMS = \"\"\" SELECT AVG(num_items) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items FROM", "= armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id LIMIT 20 \"\"\" AVG_WEAPONS = \"\"\" SELECT AVG(num_weapons)", "charactercreator_character_inventory.item_id = armory_item.item_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" if __name__ == \"__main__\": conn", "COUNT(*) FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id", "FROM armory_item ) - (SELECT COUNT(*) FROM armory_weapon ) \"\"\" CHAR_ITEM_COUNT = \"\"\"", "items_no_weps = execute_query(curs, ITEMS_NO_WEPS) char_item_count = execute_query(curs, CHAR_ITEM_COUNT) char_wep_count = execute_query(curs, CHAR_WEP_COUNT) avg_items", "CHAR_WEP_COUNT = \"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id", "print(\"Character Count:\", char_count) print(\"Class Count (cleric, fighter, mage, necromancer, theif):\", class_count) print(\"Item Count\",", "cursor.fetchall() GET_CHARACTERS = \"\"\" SELECT * FROM charactercreator_character \"\"\" CHARACTER_COUNT = \"\"\" SELECT", "ON charactercreator_character_inventory.item_id = armory_item.item_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" if __name__ == \"__main__\":", "print(\"Weapon Count:\", wep_count) print(\"Items without Weapons:\", items_no_weps) print(\"Items per character ID:\", char_item_count) print(\"Weapons", "charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id", "= execute_query(curs, WEP_COUNT) items_no_weps = execute_query(curs, ITEMS_NO_WEPS) char_item_count = execute_query(curs, CHAR_ITEM_COUNT) char_wep_count =", "= execute_query(curs, GET_CHARACTERS) class_count = execute_query(curs, CLASS_COUNT) item_count = execute_query(curs, ITEM_COUNT) wep_count =", "SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS cleric, (SELECT COUNT(*) FROM charactercreator_fighter) AS fighter,", "armory_item.item_id = armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS = \"\"\" SELECT( SELECT COUNT(*) FROM armory_item )", "\"\"\" ITEM_COUNT = \"\"\" SELECT COUNT(*) FROM armory_item \"\"\" WEP_COUNT = \"\"\" SELECT", "CHAR_ITEM_COUNT) char_wep_count = execute_query(curs, CHAR_WEP_COUNT) avg_items = execute_query(curs, AVG_ITEMS) avg_weapons = execute_query(curs, AVG_WEAPONS)", "\"\"\" AVG_ITEMS = \"\"\" SELECT AVG(num_items) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items", "Count:\", wep_count) print(\"Items without Weapons:\", items_no_weps) print(\"Items per character ID:\", char_item_count) print(\"Weapons per", "armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id LIMIT 20 \"\"\" AVG_WEAPONS =", "charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\"", "armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id LIMIT 20 \"\"\" AVG_WEAPONS = \"\"\" SELECT AVG(num_weapons) FROM", "COUNT(*) FROM charactercreator_character_inventory GROUP BY item_id LIMIT 20; \"\"\" CHAR_WEP_COUNT = \"\"\" SELECT", "= \"\"\" SELECT character_id, COUNT(*) FROM charactercreator_character_inventory GROUP BY item_id LIMIT 20; \"\"\"", "fighter, (SELECT COUNT(*) FROM charactercreator_mage) AS mage, (SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer,", "necromancer, (SELECT COUNT(*) FROM charactercreator_thief) AS theif \"\"\" ITEM_COUNT = \"\"\" SELECT COUNT(*)", "AS necromancer, (SELECT COUNT(*) FROM charactercreator_thief) AS theif \"\"\" ITEM_COUNT = \"\"\" SELECT", "armory_weapon ON armory_item.item_id = armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS = \"\"\" SELECT( SELECT COUNT(*) FROM", "SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP", "execute_query(curs, CHARACTER_COUNT) results = execute_query(curs, GET_CHARACTERS) class_count = execute_query(curs, CLASS_COUNT) item_count = execute_query(curs,", "execute_query(curs, CLASS_COUNT) item_count = execute_query(curs, ITEM_COUNT) wep_count = execute_query(curs, WEP_COUNT) items_no_weps = execute_query(curs,", "BY item_id LIMIT 20; \"\"\" CHAR_WEP_COUNT = \"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory", "execute_query(curs, CHAR_WEP_COUNT) avg_items = execute_query(curs, AVG_ITEMS) avg_weapons = execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character Count:\",", "char_item_count) print(\"Weapons per character ID:\", char_wep_count) print(\"Average Number of Items Per Character:\", avg_items)", "\"\"\" SELECT character_id, COUNT(*) FROM charactercreator_character_inventory GROUP BY item_id LIMIT 20; \"\"\" CHAR_WEP_COUNT", "ON armory_item.item_id = armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS = \"\"\" SELECT( SELECT COUNT(*) FROM armory_item", "execute_query(curs, ITEMS_NO_WEPS) char_item_count = execute_query(curs, CHAR_ITEM_COUNT) char_wep_count = execute_query(curs, CHAR_WEP_COUNT) avg_items = execute_query(curs,", "charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id LIMIT 20", "character ID:\", char_wep_count) print(\"Average Number of Items Per Character:\", avg_items) print(\"Average Number of", "wep_count = execute_query(curs, WEP_COUNT) items_no_weps = execute_query(curs, ITEMS_NO_WEPS) char_item_count = execute_query(curs, CHAR_ITEM_COUNT) char_wep_count", "conn.cursor() char_count = execute_query(curs, CHARACTER_COUNT) results = execute_query(curs, GET_CHARACTERS) class_count = execute_query(curs, CLASS_COUNT)", "wep_count) print(\"Items without Weapons:\", items_no_weps) print(\"Items per character ID:\", char_item_count) print(\"Weapons per character", "COUNT(*) FROM charactercreator_character \"\"\" CLASS_COUNT = \"\"\" SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS", "* FROM charactercreator_character \"\"\" CHARACTER_COUNT = \"\"\" SELECT COUNT(*) FROM charactercreator_character \"\"\" CLASS_COUNT", "= execute_query(curs, ITEM_COUNT) wep_count = execute_query(curs, WEP_COUNT) items_no_weps = execute_query(curs, ITEMS_NO_WEPS) char_item_count =", "avg_items = execute_query(curs, AVG_ITEMS) avg_weapons = execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character Count:\", char_count) print(\"Class", "FROM charactercreator_character \"\"\" CLASS_COUNT = \"\"\" SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS cleric,", "charactercreator_character_inventory INNER JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\"", "COUNT(*) FROM charactercreator_mage) AS mage, (SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer, (SELECT COUNT(*)", "FROM charactercreator_cleric) AS cleric, (SELECT COUNT(*) FROM charactercreator_fighter) AS fighter, (SELECT COUNT(*) FROM", "\"\"\" CHAR_WEP_COUNT = \"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory INNER JOIN armory_weapon ON", "armory_weapon ) \"\"\" CHAR_ITEM_COUNT = \"\"\" SELECT character_id, COUNT(*) FROM charactercreator_character_inventory GROUP BY", "armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS = \"\"\"", "GROUP BY charactercreator_character_inventory.character_id ) \"\"\" if __name__ == \"__main__\": conn = connect_to_db() curs", "AVG_ITEMS) avg_weapons = execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character Count:\", char_count) print(\"Class Count (cleric, fighter,", "charactercreator_cleric) AS cleric, (SELECT COUNT(*) FROM charactercreator_fighter) AS fighter, (SELECT COUNT(*) FROM charactercreator_mage)", "execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character Count:\", char_count) print(\"Class Count (cleric, fighter, mage, necromancer, theif):\",", "armory_item ) - (SELECT COUNT(*) FROM armory_weapon ) \"\"\" CHAR_ITEM_COUNT = \"\"\" SELECT", "print(results[0]) print(\"Character Count:\", char_count) print(\"Class Count (cleric, fighter, mage, necromancer, theif):\", class_count) print(\"Item", "(SELECT COUNT(*) FROM charactercreator_cleric) AS cleric, (SELECT COUNT(*) FROM charactercreator_fighter) AS fighter, (SELECT", "AS theif \"\"\" ITEM_COUNT = \"\"\" SELECT COUNT(*) FROM armory_item \"\"\" WEP_COUNT =", "execute_query(curs, ITEM_COUNT) wep_count = execute_query(curs, WEP_COUNT) items_no_weps = execute_query(curs, ITEMS_NO_WEPS) char_item_count = execute_query(curs,", "char_count) print(\"Class Count (cleric, fighter, mage, necromancer, theif):\", class_count) print(\"Item Count\", item_count) print(\"Weapon", "FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id )", "FROM charactercreator_fighter) AS fighter, (SELECT COUNT(*) FROM charactercreator_mage) AS mage, (SELECT COUNT(*) FROM", "GROUP BY charactercreator_character_inventory.character_id LIMIT 20 \"\"\" AVG_WEAPONS = \"\"\" SELECT AVG(num_weapons) FROM (", "= armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS = \"\"\" SELECT AVG(num_items) FROM", "ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id LIMIT 20 \"\"\" AVG_WEAPONS = \"\"\"", "return sqlite3.connect(db_name) def execute_query(cursor, query): cursor.execute(query) return cursor.fetchall() GET_CHARACTERS = \"\"\" SELECT *", "INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id LIMIT 20 \"\"\"", "class_count = execute_query(curs, CLASS_COUNT) item_count = execute_query(curs, ITEM_COUNT) wep_count = execute_query(curs, WEP_COUNT) items_no_weps", "LIMIT 20; \"\"\" CHAR_WEP_COUNT = \"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory INNER JOIN", "AVG_WEAPONS = \"\"\" SELECT AVG(num_weapons) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons FROM", "GET_CHARACTERS = \"\"\" SELECT * FROM charactercreator_character \"\"\" CHARACTER_COUNT = \"\"\" SELECT COUNT(*)", "char_item_count = execute_query(curs, CHAR_ITEM_COUNT) char_wep_count = execute_query(curs, CHAR_WEP_COUNT) avg_items = execute_query(curs, AVG_ITEMS) avg_weapons", "\"\"\" ITEMS_NO_WEPS = \"\"\" SELECT( SELECT COUNT(*) FROM armory_item ) - (SELECT COUNT(*)", "(SELECT COUNT(*) FROM charactercreator_mage) AS mage, (SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer, (SELECT", "LIMIT 20 \"\"\" AVG_WEAPONS = \"\"\" SELECT AVG(num_weapons) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*)", "return cursor.fetchall() GET_CHARACTERS = \"\"\" SELECT * FROM charactercreator_character \"\"\" CHARACTER_COUNT = \"\"\"", "\"\"\" SELECT COUNT(*) FROM charactercreator_character \"\"\" CLASS_COUNT = \"\"\" SELECT (SELECT COUNT(*) FROM", "FROM charactercreator_thief) AS theif \"\"\" ITEM_COUNT = \"\"\" SELECT COUNT(*) FROM armory_item \"\"\"", "COUNT(*) AS num_weapons FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP", "theif \"\"\" ITEM_COUNT = \"\"\" SELECT COUNT(*) FROM armory_item \"\"\" WEP_COUNT = \"\"\"", "\"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id", "JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id LIMIT 20 \"\"\" AVG_WEAPONS", "armory_item \"\"\" WEP_COUNT = \"\"\" SELECT COUNT(*) name FROM armory_item INNER JOIN armory_weapon", "COUNT(*) AS num_items FROM charactercreator_character_inventory INNER JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id GROUP", "- (SELECT COUNT(*) FROM armory_weapon ) \"\"\" CHAR_ITEM_COUNT = \"\"\" SELECT character_id, COUNT(*)", "mage, (SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer, (SELECT COUNT(*) FROM charactercreator_thief) AS theif", "charactercreator_necromancer) AS necromancer, (SELECT COUNT(*) FROM charactercreator_thief) AS theif \"\"\" ITEM_COUNT = \"\"\"", "if __name__ == \"__main__\": conn = connect_to_db() curs = conn.cursor() char_count = execute_query(curs,", "\"\"\" SELECT AVG(num_items) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items FROM charactercreator_character_inventory INNER", "= \"\"\" SELECT AVG(num_weapons) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons FROM charactercreator_character_inventory", "= conn.cursor() char_count = execute_query(curs, CHARACTER_COUNT) results = execute_query(curs, GET_CHARACTERS) class_count = execute_query(curs,", "print(\"Items per character ID:\", char_item_count) print(\"Weapons per character ID:\", char_wep_count) print(\"Average Number of", "FROM charactercreator_character_inventory GROUP BY item_id LIMIT 20; \"\"\" CHAR_WEP_COUNT = \"\"\" SELECT charactercreator_character_inventory.character_id,", "SELECT character_id, COUNT(*) FROM charactercreator_character_inventory GROUP BY item_id LIMIT 20; \"\"\" CHAR_WEP_COUNT =", "COUNT(*) FROM charactercreator_cleric) AS cleric, (SELECT COUNT(*) FROM charactercreator_fighter) AS fighter, (SELECT COUNT(*)", "(cleric, fighter, mage, necromancer, theif):\", class_count) print(\"Item Count\", item_count) print(\"Weapon Count:\", wep_count) print(\"Items", "Count:\", char_count) print(\"Class Count (cleric, fighter, mage, necromancer, theif):\", class_count) print(\"Item Count\", item_count)", "COUNT(*) FROM charactercreator_fighter) AS fighter, (SELECT COUNT(*) FROM charactercreator_mage) AS mage, (SELECT COUNT(*)", "sqlite3 def connect_to_db(db_name=\"rpg_db.sqlite3\"): return sqlite3.connect(db_name) def execute_query(cursor, query): cursor.execute(query) return cursor.fetchall() GET_CHARACTERS =", "armory_item INNER JOIN armory_weapon ON armory_item.item_id = armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS = \"\"\" SELECT(", "AVG_WEAPONS) print(results[0]) print(\"Character Count:\", char_count) print(\"Class Count (cleric, fighter, mage, necromancer, theif):\", class_count)", "WEP_COUNT) items_no_weps = execute_query(curs, ITEMS_NO_WEPS) char_item_count = execute_query(curs, CHAR_ITEM_COUNT) char_wep_count = execute_query(curs, CHAR_WEP_COUNT)", "necromancer, theif):\", class_count) print(\"Item Count\", item_count) print(\"Weapon Count:\", wep_count) print(\"Items without Weapons:\", items_no_weps)", "AVG(num_weapons) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons FROM charactercreator_character_inventory INNER JOIN armory_weapon", "\"\"\" SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS cleric, (SELECT COUNT(*) FROM charactercreator_fighter) AS", "armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS = \"\"\" SELECT( SELECT COUNT(*) FROM armory_item ) - (SELECT", "items_no_weps) print(\"Items per character ID:\", char_item_count) print(\"Weapons per character ID:\", char_wep_count) print(\"Average Number", "FROM armory_item \"\"\" WEP_COUNT = \"\"\" SELECT COUNT(*) name FROM armory_item INNER JOIN", "= \"\"\" SELECT * FROM charactercreator_character \"\"\" CHARACTER_COUNT = \"\"\" SELECT COUNT(*) FROM", "= execute_query(curs, CHARACTER_COUNT) results = execute_query(curs, GET_CHARACTERS) class_count = execute_query(curs, CLASS_COUNT) item_count =", "JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" if __name__", "== \"__main__\": conn = connect_to_db() curs = conn.cursor() char_count = execute_query(curs, CHARACTER_COUNT) results", "FROM charactercreator_necromancer) AS necromancer, (SELECT COUNT(*) FROM charactercreator_thief) AS theif \"\"\" ITEM_COUNT =", "print(\"Item Count\", item_count) print(\"Weapon Count:\", wep_count) print(\"Items without Weapons:\", items_no_weps) print(\"Items per character", "FROM charactercreator_mage) AS mage, (SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer, (SELECT COUNT(*) FROM", "\"\"\" SELECT * FROM charactercreator_character \"\"\" CHARACTER_COUNT = \"\"\" SELECT COUNT(*) FROM charactercreator_character", "= \"\"\" SELECT COUNT(*) name FROM armory_item INNER JOIN armory_weapon ON armory_item.item_id =", "COUNT(*) name FROM armory_item INNER JOIN armory_weapon ON armory_item.item_id = armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS", "COUNT(*) FROM armory_item ) - (SELECT COUNT(*) FROM armory_weapon ) \"\"\" CHAR_ITEM_COUNT =", "curs = conn.cursor() char_count = execute_query(curs, CHARACTER_COUNT) results = execute_query(curs, GET_CHARACTERS) class_count =", "= execute_query(curs, CLASS_COUNT) item_count = execute_query(curs, ITEM_COUNT) wep_count = execute_query(curs, WEP_COUNT) items_no_weps =", "without Weapons:\", items_no_weps) print(\"Items per character ID:\", char_item_count) print(\"Weapons per character ID:\", char_wep_count)", "\"\"\" SELECT COUNT(*) name FROM armory_item INNER JOIN armory_weapon ON armory_item.item_id = armory_weapon.item_ptr_id", "charactercreator_character_inventory GROUP BY item_id LIMIT 20; \"\"\" CHAR_WEP_COUNT = \"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*)", "sqlite3.connect(db_name) def execute_query(cursor, query): cursor.execute(query) return cursor.fetchall() GET_CHARACTERS = \"\"\" SELECT * FROM", "num_items FROM charactercreator_character_inventory INNER JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id GROUP BY charactercreator_character_inventory.character_id", "COUNT(*) FROM armory_item \"\"\" WEP_COUNT = \"\"\" SELECT COUNT(*) name FROM armory_item INNER", "print(\"Class Count (cleric, fighter, mage, necromancer, theif):\", class_count) print(\"Item Count\", item_count) print(\"Weapon Count:\",", "Weapons:\", items_no_weps) print(\"Items per character ID:\", char_item_count) print(\"Weapons per character ID:\", char_wep_count) print(\"Average", "SELECT * FROM charactercreator_character \"\"\" CHARACTER_COUNT = \"\"\" SELECT COUNT(*) FROM charactercreator_character \"\"\"", "20 \"\"\" AVG_WEAPONS = \"\"\" SELECT AVG(num_weapons) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS", "SELECT COUNT(*) FROM armory_item ) - (SELECT COUNT(*) FROM armory_weapon ) \"\"\" CHAR_ITEM_COUNT", "Count\", item_count) print(\"Weapon Count:\", wep_count) print(\"Items without Weapons:\", items_no_weps) print(\"Items per character ID:\",", "\"\"\" SELECT( SELECT COUNT(*) FROM armory_item ) - (SELECT COUNT(*) FROM armory_weapon )", "BY charactercreator_character_inventory.character_id LIMIT 20 \"\"\" AVG_WEAPONS = \"\"\" SELECT AVG(num_weapons) FROM ( SELECT", "connect_to_db(db_name=\"rpg_db.sqlite3\"): return sqlite3.connect(db_name) def execute_query(cursor, query): cursor.execute(query) return cursor.fetchall() GET_CHARACTERS = \"\"\" SELECT", "BY charactercreator_character_inventory.character_id ) \"\"\" AVG_ITEMS = \"\"\" SELECT AVG(num_items) FROM ( SELECT charactercreator_character_inventory.character_id,", "armory_item.item_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" if __name__ == \"__main__\": conn = connect_to_db()", "INNER JOIN armory_weapon ON armory_item.item_id = armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS = \"\"\" SELECT( SELECT", "print(\"Weapons per character ID:\", char_wep_count) print(\"Average Number of Items Per Character:\", avg_items) print(\"Average", "item_id LIMIT 20; \"\"\" CHAR_WEP_COUNT = \"\"\" SELECT charactercreator_character_inventory.character_id, COUNT(*) FROM charactercreator_character_inventory INNER", "execute_query(curs, GET_CHARACTERS) class_count = execute_query(curs, CLASS_COUNT) item_count = execute_query(curs, ITEM_COUNT) wep_count = execute_query(curs,", "AS num_items FROM charactercreator_character_inventory INNER JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id GROUP BY", "AVG(num_items) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items FROM charactercreator_character_inventory INNER JOIN armory_item", "SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items FROM charactercreator_character_inventory INNER JOIN armory_item ON charactercreator_character_inventory.item_id =", "execute_query(curs, AVG_ITEMS) avg_weapons = execute_query(curs, AVG_WEAPONS) print(results[0]) print(\"Character Count:\", char_count) print(\"Class Count (cleric,", "FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id LIMIT", "item_count) print(\"Weapon Count:\", wep_count) print(\"Items without Weapons:\", items_no_weps) print(\"Items per character ID:\", char_item_count)", "class_count) print(\"Item Count\", item_count) print(\"Weapon Count:\", wep_count) print(\"Items without Weapons:\", items_no_weps) print(\"Items per", "AS num_weapons FROM charactercreator_character_inventory INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY", "Count (cleric, fighter, mage, necromancer, theif):\", class_count) print(\"Item Count\", item_count) print(\"Weapon Count:\", wep_count)", ") \"\"\" CHAR_ITEM_COUNT = \"\"\" SELECT character_id, COUNT(*) FROM charactercreator_character_inventory GROUP BY item_id", "fighter, mage, necromancer, theif):\", class_count) print(\"Item Count\", item_count) print(\"Weapon Count:\", wep_count) print(\"Items without", "(SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer, (SELECT COUNT(*) FROM charactercreator_thief) AS theif \"\"\"", "charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id GROUP BY charactercreator_character_inventory.character_id LIMIT 20 \"\"\" AVG_WEAPONS = \"\"\" SELECT", "JOIN armory_weapon ON armory_item.item_id = armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS = \"\"\" SELECT( SELECT COUNT(*)", "theif):\", class_count) print(\"Item Count\", item_count) print(\"Weapon Count:\", wep_count) print(\"Items without Weapons:\", items_no_weps) print(\"Items", "AS cleric, (SELECT COUNT(*) FROM charactercreator_fighter) AS fighter, (SELECT COUNT(*) FROM charactercreator_mage) AS", "Number of Items Per Character:\", avg_items) print(\"Average Number of Weapons Per Character:\", avg_weapons)", "import sqlite3 def connect_to_db(db_name=\"rpg_db.sqlite3\"): return sqlite3.connect(db_name) def execute_query(cursor, query): cursor.execute(query) return cursor.fetchall() GET_CHARACTERS", "charactercreator_character \"\"\" CHARACTER_COUNT = \"\"\" SELECT COUNT(*) FROM charactercreator_character \"\"\" CLASS_COUNT = \"\"\"", "charactercreator_character_inventory.character_id, COUNT(*) AS num_items FROM charactercreator_character_inventory INNER JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id", "FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons FROM charactercreator_character_inventory INNER JOIN armory_weapon ON", "INNER JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id GROUP BY charactercreator_character_inventory.character_id ) \"\"\" if", "( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items FROM charactercreator_character_inventory INNER JOIN armory_item ON charactercreator_character_inventory.item_id", "\"\"\" WEP_COUNT = \"\"\" SELECT COUNT(*) name FROM armory_item INNER JOIN armory_weapon ON", "char_count = execute_query(curs, CHARACTER_COUNT) results = execute_query(curs, GET_CHARACTERS) class_count = execute_query(curs, CLASS_COUNT) item_count", "charactercreator_fighter) AS fighter, (SELECT COUNT(*) FROM charactercreator_mage) AS mage, (SELECT COUNT(*) FROM charactercreator_necromancer)", "(SELECT COUNT(*) FROM charactercreator_thief) AS theif \"\"\" ITEM_COUNT = \"\"\" SELECT COUNT(*) FROM", "def execute_query(cursor, query): cursor.execute(query) return cursor.fetchall() GET_CHARACTERS = \"\"\" SELECT * FROM charactercreator_character", "AS mage, (SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer, (SELECT COUNT(*) FROM charactercreator_thief) AS", "character_id, COUNT(*) FROM charactercreator_character_inventory GROUP BY item_id LIMIT 20; \"\"\" CHAR_WEP_COUNT = \"\"\"", ") \"\"\" if __name__ == \"__main__\": conn = connect_to_db() curs = conn.cursor() char_count", "WEP_COUNT = \"\"\" SELECT COUNT(*) name FROM armory_item INNER JOIN armory_weapon ON armory_item.item_id", "charactercreator_mage) AS mage, (SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer, (SELECT COUNT(*) FROM charactercreator_thief)", "CHAR_ITEM_COUNT = \"\"\" SELECT character_id, COUNT(*) FROM charactercreator_character_inventory GROUP BY item_id LIMIT 20;", "\"\"\" SELECT AVG(num_weapons) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons FROM charactercreator_character_inventory INNER", "conn = connect_to_db() curs = conn.cursor() char_count = execute_query(curs, CHARACTER_COUNT) results = execute_query(curs,", "= armory_weapon.item_ptr_id \"\"\" ITEMS_NO_WEPS = \"\"\" SELECT( SELECT COUNT(*) FROM armory_item ) -", ") \"\"\" AVG_ITEMS = \"\"\" SELECT AVG(num_items) FROM ( SELECT charactercreator_character_inventory.character_id, COUNT(*) AS", "COUNT(*) FROM armory_weapon ) \"\"\" CHAR_ITEM_COUNT = \"\"\" SELECT character_id, COUNT(*) FROM charactercreator_character_inventory" ]
[ "See LICENSE.txt in the project root for license information. # ------------------------------------------------------------------------------------------------------ import torch.nn", "Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for", "License. See LICENSE.txt in the project root for license information. # ------------------------------------------------------------------------------------------------------ import", "# ------------------------------------------------------------------------------------------------------ import torch.nn as nn class LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel,", "# ------------------------------------------------------------------------------------------------------ # Copyright (c) <NAME>. All rights reserved. # Licensed under the", "license information. # ------------------------------------------------------------------------------------------------------ import torch.nn as nn class LinearRegressionModel(nn.Module): def __init__(self, input_dim,", "self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): #pylint: disable=arguments-differ prediction = self.linear(x) return", "------------------------------------------------------------------------------------------------------ # Copyright (c) <NAME>. All rights reserved. # Licensed under the BSD", "3-Clause License. See LICENSE.txt in the project root for license information. # ------------------------------------------------------------------------------------------------------", "output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): #pylint: disable=arguments-differ prediction", "= nn.Linear(input_dim, output_dim) def forward(self, x): #pylint: disable=arguments-differ prediction = self.linear(x) return prediction", "as nn class LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim,", "<filename>bugprediction/linear_regression_model.py # ------------------------------------------------------------------------------------------------------ # Copyright (c) <NAME>. All rights reserved. # Licensed under", "class LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def", "BSD 3-Clause License. See LICENSE.txt in the project root for license information. #", "torch.nn as nn class LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear =", "LICENSE.txt in the project root for license information. # ------------------------------------------------------------------------------------------------------ import torch.nn as", "for license information. # ------------------------------------------------------------------------------------------------------ import torch.nn as nn class LinearRegressionModel(nn.Module): def __init__(self,", "under the BSD 3-Clause License. See LICENSE.txt in the project root for license", "the project root for license information. # ------------------------------------------------------------------------------------------------------ import torch.nn as nn class", "------------------------------------------------------------------------------------------------------ import torch.nn as nn class LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__()", "All rights reserved. # Licensed under the BSD 3-Clause License. See LICENSE.txt in", "LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self,", "def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x):", "# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root", "in the project root for license information. # ------------------------------------------------------------------------------------------------------ import torch.nn as nn", "input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): #pylint: disable=arguments-differ", "Copyright (c) <NAME>. All rights reserved. # Licensed under the BSD 3-Clause License.", "import torch.nn as nn class LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear", "super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): #pylint: disable=arguments-differ prediction =", "rights reserved. # Licensed under the BSD 3-Clause License. See LICENSE.txt in the", "root for license information. # ------------------------------------------------------------------------------------------------------ import torch.nn as nn class LinearRegressionModel(nn.Module): def", "(c) <NAME>. All rights reserved. # Licensed under the BSD 3-Clause License. See", "nn class LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim)", "project root for license information. # ------------------------------------------------------------------------------------------------------ import torch.nn as nn class LinearRegressionModel(nn.Module):", "<NAME>. All rights reserved. # Licensed under the BSD 3-Clause License. See LICENSE.txt", "self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): #pylint: disable=arguments-differ prediction = self.linear(x)", "__init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): #pylint:", "reserved. # Licensed under the BSD 3-Clause License. See LICENSE.txt in the project", "information. # ------------------------------------------------------------------------------------------------------ import torch.nn as nn class LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim):", "the BSD 3-Clause License. See LICENSE.txt in the project root for license information.", "# Copyright (c) <NAME>. All rights reserved. # Licensed under the BSD 3-Clause" ]
[ "the models print(\"Feature extraction for train\") train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies) #", "+ \"/\" + \"training_bodies.p\") else: train_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"training_headlines.p\") train_preprocessed_bodies", "base_preprocess_path + \"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path + \"/\" + \"training_bodies.p\") else: train_preprocessed_headlines", "for the report. ''' t2 = time.time() print(\"Time for the total is:\", t2", "for train\") train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies) # TF-IDF weight extraction train_tfidf_weights,", "TF-IDF weight extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines, test_bodies) #", "os.path.exists( base_preprocess_path + \"/\" + \"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies", "validation_headlines, validation_bodies) # Sentence weighting for validation validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature extraction for", "the test print(\"Start of pre-processing for test\") if not (os.path.exists(base_preprocess_path + \"/\" +", "= time.time() # Importing the data train = FakeNewsData(trainStancePath, trainBodyPath) test = FakeNewsData(testStancePath,", "Data importing, spliting, pre processing, feature transformation, modelling and visualization. Check README for", "Preprocess the validation print(\"Start of pre-processing for validation\") if not (os.path.exists(base_preprocess_path + \"/\"", "+ \"/\" + \"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path + \"/\" +", "train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies) # TF-IDF weight extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights", "Sentence weighting for test test_sentence_weights = test_features.sentence_weighting() # Combine the features to prepare", "headlines, bodies ''' This files combine all the data mining part starting from", "test_bodies = headlines_bodies(test.headlineInstances, test.articleBody) if not (os.path.exists(base_feature_path + \"/\" + \"train_features.p\") and os.path.exists(", "for i in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels def headlines_bodies(temp_headline, temp_body): headlines = []", "# Target variables train_target_labels = target_labels(train_stances) validation_target_labels = target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances) #", "Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies) # Sentence weighting for test test_sentence_weights = test_features.sentence_weighting() #", "+ \"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"validation_bodies.p\") # Preprocess the test", "\"/\" + \"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines,", "''' This files combine all the data mining part starting from Data importing,", "src.preprocess import Preprocess from src.feature_extraction import Features from src.models import Models from src.score", "primary_id = \"Body ID\" stance = \"Stance\" body = \"articleBody\" headline = \"Headline\"", "final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path + \"/\" + \"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights,", "def headlines_bodies(temp_headline, temp_body): headlines = [] bodies = [] for i in range(len(temp_headline)):", "import Features from src.models import Models from src.score import LABELS from src.utils import", "+ \"test_bodies.p\") else: test_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path", "output_file(final_train_features, base_feature_path + \"/\" + \"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path +", "+ \"validation_features.p\") and os.path.exists( base_feature_path + \"/\" + \"test_features.p\")): # Feature extraction and", "+ \"test_features.p\")): # Feature extraction and combining them for the models print(\"Feature extraction", "else: print(\"Feature Extraction\") final_train_features = input_file(base_feature_path + \"/\" + \"train_features.p\") final_validation_features = input_file(base_feature_path", "inout for the models final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path + \"/\" +", "else: test_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path + \"/\"", "+ \"/\" + \"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"training_bodies.p\") # Preprocess", "src.models import Models from src.score import LABELS from src.utils import input_file, output_file import", "\"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path + \"/\" + \"test_bodies.p\") else: test_preprocessed_headlines = input_file(base_preprocess_path", "= train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines, test_bodies) # Sentence weighting for train train_sentence_weights =", "+ \"/\" + \"validation_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"validation_bodies.p\")): preprocessed_validation_data =", "input_file(base_feature_path + \"/\" + \"train_features.p\") final_validation_features = input_file(base_feature_path + \"/\" + \"validation_features.p\") final_test_features", "from src.train_validation_split import DataSplit from src.preprocess import Preprocess from src.feature_extraction import Features from", "\"training_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\")", "news Challenge Authors: <NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901) main.py: Main file for", "i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines, bodies ''' This files combine all", "+ \"training_bodies.p\") # Preprocess the validation print(\"Start of pre-processing for validation\") if not", "= input_file(base_preprocess_path + \"/\" + \"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"validation_bodies.p\")", "= \"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\" # header", "= target_labels(test.headlineInstances) # Modelling the features print(\"Start of Modelling\") models = Models(final_train_features, final_validation_features,", "validation_preprocessed_bodies, validation_headlines, validation_bodies) # Sentence weighting for validation validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature extraction", "= Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\" +", "report. ''' t2 = time.time() print(\"Time for the total is:\", t2 - t0)", "= \"output\" def target_labels(stances): labels = [] for i in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return", "preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path + \"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path + \"/\" + \"test_bodies.p\")", "\"Headline\" base_preprocess_path = \"preprocessed_data\" base_feature_path = \"final_features\" output = \"output\" def target_labels(stances): labels", "them as an inout for the models final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path", "(os.path.exists(base_preprocess_path + \"/\" + \"test_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"test_bodies.p\")): preprocessed_test_data", "os.path.exists( base_feature_path + \"/\" + \"validation_features.p\") and os.path.exists( base_feature_path + \"/\" + \"test_features.p\")):", "- t0) # Target variables train_target_labels = target_labels(train_stances) validation_target_labels = target_labels(validation_stances) test_target_labels =", "= \"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\" # header attributes primary_id = \"Body ID\" stance", "= Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies) # Sentence weighting for test test_sentence_weights = test_features.sentence_weighting()", "# Preprocess the test print(\"Start of pre-processing for test\") if not (os.path.exists(base_preprocess_path +", "models print(\"Feature extraction for train\") train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies) # TF-IDF", "+ \"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path", "weight extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines, test_bodies) # Sentence", "for train\") if not (os.path.exists(base_preprocess_path + \"/\" + \"training_headlines.p\") and os.path.exists( base_preprocess_path +", "\"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path + \"/\" + \"training_bodies.p\") else: train_preprocessed_headlines = input_file(base_preprocess_path + \"/\"", "body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path + \"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies,", "models final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path + \"/\" + \"train_features.p\") final_validation_features =", "from src.data_import import FakeNewsData from src.train_validation_split import DataSplit from src.preprocess import Preprocess from", "= \"articleBody\" headline = \"Headline\" base_preprocess_path = \"preprocessed_data\" base_feature_path = \"final_features\" output =", "is:\", t1 - t0) # Target variables train_target_labels = target_labels(train_stances) validation_target_labels = target_labels(validation_stances)", "<NAME> (z5113901) main.py: Main file for program execution \"\"\" from src.data_import import FakeNewsData", "of Modelling\") models = Models(final_train_features, final_validation_features, final_test_features, train_target_labels, validation_target_labels, test_target_labels) # Calling the", "output_file(train_preprocessed_bodies, base_preprocess_path + \"/\" + \"training_bodies.p\") else: train_preprocessed_headlines = input_file(base_preprocess_path + \"/\" +", "part starting from Data importing, spliting, pre processing, feature transformation, modelling and visualization.", "Target variables train_target_labels = target_labels(train_stances) validation_target_labels = target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances) # Modelling", "== \"__main__\": t0 = time.time() # Importing the data train = FakeNewsData(trainStancePath, trainBodyPath)", "predicted labels to produce the correctness visualizations graphs for the report. ''' t2", "= headlines_bodies(test.headlineInstances, test.articleBody) if not (os.path.exists(base_feature_path + \"/\" + \"train_features.p\") and os.path.exists( base_feature_path", "<NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901) main.py: Main file for program execution \"\"\"", "\"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"validation_bodies.p\") # Preprocess the test print(\"Start", "body = \"articleBody\" headline = \"Headline\" base_preprocess_path = \"preprocessed_data\" base_feature_path = \"final_features\" output", "as sp import os import time # Global Variables trainStancePath = \"data/train_stances.csv\" testStancePath", "to know the actual labels and the predicted labels to produce the correctness", "base_preprocess_path + \"/\" + \"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies =", "\"/\" + \"test_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances,", "test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies) # Sentence weighting for test test_sentence_weights =", "labels to produce the correctness visualizations graphs for the report. ''' t2 =", "\"Body ID\" stance = \"Stance\" body = \"articleBody\" headline = \"Headline\" base_preprocess_path =", "and the predicted labels to produce the correctness visualizations graphs for the report.", "time.time() # Importing the data train = FakeNewsData(trainStancePath, trainBodyPath) test = FakeNewsData(testStancePath, testBodyPath)", "train print(\"Start of pre-processing for train\") if not (os.path.exists(base_preprocess_path + \"/\" + \"training_headlines.p\")", "base_feature_path = \"final_features\" output = \"output\" def target_labels(stances): labels = [] for i", "train, validation and test train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies = headlines_bodies(validation_stances,", "= input_file(base_preprocess_path + \"/\" + \"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"test_bodies.p\")", "\"__main__\": t0 = time.time() # Importing the data train = FakeNewsData(trainStancePath, trainBodyPath) test", "final_validation_features = input_file(base_feature_path + \"/\" + \"validation_features.p\") final_test_features = input_file(base_feature_path + \"/\" +", "models.get_lr() models.get_dt() models.get_nb() models.get_rf() ''' Used read_from_csv in utils to know the actual", "base_preprocess_path + \"/\" + \"validation_bodies.p\") else: validation_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"validation_headlines.p\")", "# Extracting IDs for data splitting ids = list(train.articleBody.keys()) # The DataSplit generates", "Models from src.score import LABELS from src.utils import input_file, output_file import scipy.sparse as", "target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances) # Modelling the features print(\"Start of Modelling\") models =", "\"Stance\" body = \"articleBody\" headline = \"Headline\" base_preprocess_path = \"preprocessed_data\" base_feature_path = \"final_features\"", "\"test_bodies.p\") else: test_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path +", "target_labels(test.headlineInstances) # Modelling the features print(\"Start of Modelling\") models = Models(final_train_features, final_validation_features, final_test_features,", "test_target_labels) # Calling the 4 models models.get_lr() models.get_dt() models.get_nb() models.get_rf() ''' Used read_from_csv", "importing, spliting, pre processing, feature transformation, modelling and visualization. Check README for clear", "feature transformation, modelling and visualization. Check README for clear understanding of what is", "<NAME>(z5173917), <NAME> (z5113901) main.py: Main file for program execution \"\"\" from src.data_import import", "import LABELS from src.utils import input_file, output_file import scipy.sparse as sp import os", "the data mining part starting from Data importing, spliting, pre processing, feature transformation,", "<filename>main.py \"\"\" COMP9417 Assignment: Fake news Challenge Authors: <NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME>", "the features to prepare them as an inout for the models final_train_features =", "validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\"", "extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines, test_bodies) # Sentence weighting", "body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path + \"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies,", "to prepare them as an inout for the models final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A", "\"validation_features.p\") and os.path.exists( base_feature_path + \"/\" + \"test_features.p\")): # Feature extraction and combining", "def target_labels(stances): labels = [] for i in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels def", "(z5058240), <NAME>(z5173917), <NAME> (z5113901) main.py: Main file for program execution \"\"\" from src.data_import", "preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path + \"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path + \"/\" + \"training_bodies.p\")", "else: validation_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path + \"/\"", "This files combine all the data mining part starting from Data importing, spliting,", "if not (os.path.exists(base_preprocess_path + \"/\" + \"training_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" +", "test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path + \"/\" + \"test_features.p\") else: print(\"Feature Extraction\") final_train_features = input_file(base_feature_path", "combine all the data mining part starting from Data importing, spliting, pre processing,", "and os.path.exists( base_preprocess_path + \"/\" + \"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines,", "\"/\" + \"validation_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances,", "models = Models(final_train_features, final_validation_features, final_test_features, train_target_labels, validation_target_labels, test_target_labels) # Calling the 4 models", "= Models(final_train_features, final_validation_features, final_test_features, train_target_labels, validation_target_labels, test_target_labels) # Calling the 4 models models.get_lr()", "in utils to know the actual labels and the predicted labels to produce", "print(\"Start of Modelling\") models = Models(final_train_features, final_validation_features, final_test_features, train_target_labels, validation_target_labels, test_target_labels) # Calling", "utils to know the actual labels and the predicted labels to produce the", "transformation, modelling and visualization. Check README for clear understanding of what is happening.", "+ \"training_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody,", "output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\" + \"validation_bodies.p\") else:", "base_preprocess_path + \"/\" + \"test_bodies.p\") else: test_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"test_headlines.p\")", "the predicted labels to produce the correctness visualizations graphs for the report. '''", "prepare them as an inout for the models final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features,", "+ \"test_features.p\") t1 = time.time() print(\"Time for feature extraction is:\", t1 - t0)", "os.path.exists( base_preprocess_path + \"/\" + \"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies", "execution \"\"\" from src.data_import import FakeNewsData from src.train_validation_split import DataSplit from src.preprocess import", "= [] for i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines, bodies ''' This", "= headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody) if not (os.path.exists(base_feature_path + \"/\"", "sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path + \"/\" + \"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features,", "# Sentence weighting for train train_sentence_weights = train_features.sentence_weighting() print(\"Feature extraction for validation\") validation_features", "= input_file(base_preprocess_path + \"/\" + \"validation_bodies.p\") # Preprocess the test print(\"Start of pre-processing", "of pre-processing for train\") if not (os.path.exists(base_preprocess_path + \"/\" + \"training_headlines.p\") and os.path.exists(", "test_headlines, test_bodies) # Sentence weighting for test test_sentence_weights = test_features.sentence_weighting() # Combine the", "# Combine the features to prepare them as an inout for the models", "= validation_features.sentence_weighting() print(\"Feature extraction for test\") test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies) #", "= headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody)", "# The DataSplit generates the train and validation splits according to our split", "validation and test train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody)", "attributes primary_id = \"Body ID\" stance = \"Stance\" body = \"articleBody\" headline =", "+ \"test_bodies.p\") # Split headlines and bodies for train, validation and test train_headlines,", "base_preprocess_path = \"preprocessed_data\" base_feature_path = \"final_features\" output = \"output\" def target_labels(stances): labels =", "Used read_from_csv in utils to know the actual labels and the predicted labels", "+ \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path + \"/\" + \"training_bodies.p\") else: train_preprocessed_headlines = input_file(base_preprocess_path +", "final_train_features = input_file(base_feature_path + \"/\" + \"train_features.p\") final_validation_features = input_file(base_feature_path + \"/\" +", "[] bodies = [] for i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines, bodies", "what is happening. ''' if __name__ == \"__main__\": t0 = time.time() # Importing", "train_bodies = headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies = headlines_bodies(test.headlineInstances,", "\"data/competition_test_bodies.csv\" # header attributes primary_id = \"Body ID\" stance = \"Stance\" body =", "ID\" stance = \"Stance\" body = \"articleBody\" headline = \"Headline\" base_preprocess_path = \"preprocessed_data\"", "\"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"test_bodies.p\") # Split headlines and bodies", "\"\"\" from src.data_import import FakeNewsData from src.train_validation_split import DataSplit from src.preprocess import Preprocess", "= train_validation_split.split() # Preprocess the train print(\"Start of pre-processing for train\") if not", "output_file import scipy.sparse as sp import os import time # Global Variables trainStancePath", "= input_file(base_feature_path + \"/\" + \"test_features.p\") t1 = time.time() print(\"Time for feature extraction", "is happening. ''' if __name__ == \"__main__\": t0 = time.time() # Importing the", "headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances = train_validation_split.split() # Preprocess the train print(\"Start of pre-processing", "and validation splits according to our split size print(\"Data Splitting\") train_validation_split = DataSplit(ids=ids,", "t0) # Target variables train_target_labels = target_labels(train_stances) validation_target_labels = target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances)", "\"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\" + \"validation_bodies.p\") else: validation_preprocessed_headlines = input_file(base_preprocess_path + \"/\"", "and bodies for train, validation and test train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody) validation_headlines,", "DataSplit from src.preprocess import Preprocess from src.feature_extraction import Features from src.models import Models", "# Importing the data train = FakeNewsData(trainStancePath, trainBodyPath) test = FakeNewsData(testStancePath, testBodyPath) #", "final_test_features, train_target_labels, validation_target_labels, test_target_labels) # Calling the 4 models models.get_lr() models.get_dt() models.get_nb() models.get_rf()", "validation print(\"Start of pre-processing for validation\") if not (os.path.exists(base_preprocess_path + \"/\" + \"validation_headlines.p\")", "Feature extraction and combining them for the models print(\"Feature extraction for train\") train_features", "\"/\" + \"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"training_bodies.p\") # Preprocess the", "features print(\"Start of Modelling\") models = Models(final_train_features, final_validation_features, final_test_features, train_target_labels, validation_target_labels, test_target_labels) #", "\"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path +", "print(\"Feature extraction for train\") train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies) # TF-IDF weight", "for test test_sentence_weights = test_features.sentence_weighting() # Combine the features to prepare them as", "the correctness visualizations graphs for the report. ''' t2 = time.time() print(\"Time for", "if not (os.path.exists(base_preprocess_path + \"/\" + \"test_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" +", "\"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\" # header attributes primary_id = \"Body ID\" stance =", "output_file(final_test_features, base_feature_path + \"/\" + \"test_features.p\") else: print(\"Feature Extraction\") final_train_features = input_file(base_feature_path +", "train = FakeNewsData(trainStancePath, trainBodyPath) test = FakeNewsData(testStancePath, testBodyPath) # Extracting IDs for data", "(os.path.exists(base_preprocess_path + \"/\" + \"training_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"training_bodies.p\")): preprocessed_train_data", "Challenge Authors: <NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901) main.py: Main file for program", "base_feature_path + \"/\" + \"test_features.p\") else: print(\"Feature Extraction\") final_train_features = input_file(base_feature_path + \"/\"", "= \"Headline\" base_preprocess_path = \"preprocessed_data\" base_feature_path = \"final_features\" output = \"output\" def target_labels(stances):", "validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody) if not (os.path.exists(base_feature_path", "\"validation_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\")", "+ \"/\" + \"training_bodies.p\") # Preprocess the validation print(\"Start of pre-processing for validation\")", "src.feature_extraction import Features from src.models import Models from src.score import LABELS from src.utils", "+ \"test_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody,", "Calling the 4 models models.get_lr() models.get_dt() models.get_nb() models.get_rf() ''' Used read_from_csv in utils", "Fake news Challenge Authors: <NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901) main.py: Main file", "os import time # Global Variables trainStancePath = \"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\" trainBodyPath", "input_file(base_feature_path + \"/\" + \"test_features.p\") t1 = time.time() print(\"Time for feature extraction is:\",", "validation validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature extraction for test\") test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines,", "Models(final_train_features, final_validation_features, final_test_features, train_target_labels, validation_target_labels, test_target_labels) # Calling the 4 models models.get_lr() models.get_dt()", "= Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path + \"/\" +", "+ \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\" + \"validation_bodies.p\") else: validation_preprocessed_headlines = input_file(base_preprocess_path +", "trainBodyPath = \"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\" # header attributes primary_id = \"Body ID\"", "train\") if not (os.path.exists(base_preprocess_path + \"/\" + \"training_headlines.p\") and os.path.exists( base_preprocess_path + \"/\"", "input_file(base_feature_path + \"/\" + \"validation_features.p\") final_test_features = input_file(base_feature_path + \"/\" + \"test_features.p\") t1", "t2 = time.time() print(\"Time for the total is:\", t2 - t0) print(\"\\nEnd of", "mining part starting from Data importing, spliting, pre processing, feature transformation, modelling and", "validation splits according to our split size print(\"Data Splitting\") train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances,", "starting from Data importing, spliting, pre processing, feature transformation, modelling and visualization. Check", "visualizations graphs for the report. ''' t2 = time.time() print(\"Time for the total", "validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies) # Sentence weighting for validation validation_sentence_weights =", "\"/\" + \"train_features.p\") final_validation_features = input_file(base_feature_path + \"/\" + \"validation_features.p\") final_test_features = input_file(base_feature_path", "base_preprocess_path + \"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\" + \"validation_bodies.p\") else: validation_preprocessed_headlines", "for clear understanding of what is happening. ''' if __name__ == \"__main__\": t0", "Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path + \"/\" + \"training_headlines.p\")", "for validation validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature extraction for test\") test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies,", "\"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path + \"/\" + \"test_bodies.p\") else: test_preprocessed_headlines = input_file(base_preprocess_path + \"/\"", "and os.path.exists( base_feature_path + \"/\" + \"test_features.p\")): # Feature extraction and combining them", "+ \"/\" + \"validation_bodies.p\") else: validation_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"validation_headlines.p\") validation_preprocessed_bodies", "= test_features.sentence_weighting() # Combine the features to prepare them as an inout for", "import time # Global Variables trainStancePath = \"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\" trainBodyPath =", "base_preprocess_path + \"/\" + \"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies =", "Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies) # TF-IDF weight extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction(", "print(\"Feature extraction for validation\") validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies) # Sentence weighting", "+ \"training_bodies.p\") else: train_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path", "Authors: <NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901) main.py: Main file for program execution", "= [] bodies = [] for i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines,", "\"/\" + \"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path + \"/\" + \"validation_features.p\")", "labels and the predicted labels to produce the correctness visualizations graphs for the", "= time.time() print(\"Time for the total is:\", t2 - t0) print(\"\\nEnd of tests\\n\")", "for program execution \"\"\" from src.data_import import FakeNewsData from src.train_validation_split import DataSplit from", "README for clear understanding of what is happening. ''' if __name__ == \"__main__\":", "the data train = FakeNewsData(trainStancePath, trainBodyPath) test = FakeNewsData(testStancePath, testBodyPath) # Extracting IDs", "os.path.exists( base_feature_path + \"/\" + \"test_features.p\")): # Feature extraction and combining them for", "+ \"/\" + \"validation_features.p\") final_test_features = input_file(base_feature_path + \"/\" + \"test_features.p\") t1 =", "# Modelling the features print(\"Start of Modelling\") models = Models(final_train_features, final_validation_features, final_test_features, train_target_labels,", "output = \"output\" def target_labels(stances): labels = [] for i in range(len(stances)): labels.append(LABELS.index(stances[i][stance]))", "trainStancePath = \"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\" #", "__name__ == \"__main__\": t0 = time.time() # Importing the data train = FakeNewsData(trainStancePath,", "train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies =", "# Calling the 4 models models.get_lr() models.get_dt() models.get_nb() models.get_rf() ''' Used read_from_csv in", "generates the train and validation splits according to our split size print(\"Data Splitting\")", "validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path +", "models.get_nb() models.get_rf() ''' Used read_from_csv in utils to know the actual labels and", "\"train_features.p\") and os.path.exists( base_feature_path + \"/\" + \"validation_features.p\") and os.path.exists( base_feature_path + \"/\"", "\"/\" + \"validation_bodies.p\") else: validation_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"validation_headlines.p\") validation_preprocessed_bodies =", "\"validation_bodies.p\") else: validation_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path +", "final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path + \"/\" + \"test_features.p\") else: print(\"Feature Extraction\")", "= FakeNewsData(trainStancePath, trainBodyPath) test = FakeNewsData(testStancePath, testBodyPath) # Extracting IDs for data splitting", "preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path + \"/\"", "main.py: Main file for program execution \"\"\" from src.data_import import FakeNewsData from src.train_validation_split", "splits according to our split size print(\"Data Splitting\") train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8)", "data train = FakeNewsData(trainStancePath, trainBodyPath) test = FakeNewsData(testStancePath, testBodyPath) # Extracting IDs for", "Variables trainStancePath = \"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\"", "test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path + \"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path +", "weighting for test test_sentence_weights = test_features.sentence_weighting() # Combine the features to prepare them", "final_test_features = input_file(base_feature_path + \"/\" + \"test_features.p\") t1 = time.time() print(\"Time for feature", "weighting for validation validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature extraction for test\") test_features = Features(test_preprocessed_headlines,", "import FakeNewsData from src.train_validation_split import DataSplit from src.preprocess import Preprocess from src.feature_extraction import", "output_file(train_preprocessed_headlines, base_preprocess_path + \"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path + \"/\" + \"training_bodies.p\") else:", "\"validation_bodies.p\") # Preprocess the test print(\"Start of pre-processing for test\") if not (os.path.exists(base_preprocess_path", "os.path.exists( base_preprocess_path + \"/\" + \"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies", "stance = \"Stance\" body = \"articleBody\" headline = \"Headline\" base_preprocess_path = \"preprocessed_data\" base_feature_path", "+ \"/\" + \"train_features.p\") and os.path.exists( base_feature_path + \"/\" + \"validation_features.p\") and os.path.exists(", "\"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path + \"/\" + \"test_features.p\") else: print(\"Feature", "test_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path + \"/\" +", "for validation\") if not (os.path.exists(base_preprocess_path + \"/\" + \"validation_headlines.p\") and os.path.exists( base_preprocess_path +", "headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody) if not (os.path.exists(base_feature_path + \"/\" +", "print(\"Feature Extraction\") final_train_features = input_file(base_feature_path + \"/\" + \"train_features.p\") final_validation_features = input_file(base_feature_path +", "= target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances) # Modelling the features print(\"Start of Modelling\") models", "labels def headlines_bodies(temp_headline, temp_body): headlines = [] bodies = [] for i in", "of pre-processing for validation\") if not (os.path.exists(base_preprocess_path + \"/\" + \"validation_headlines.p\") and os.path.exists(", "Main file for program execution \"\"\" from src.data_import import FakeNewsData from src.train_validation_split import", "output_file(test_preprocessed_headlines, base_preprocess_path + \"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path + \"/\" + \"test_bodies.p\") else:", "validation\") if not (os.path.exists(base_preprocess_path + \"/\" + \"validation_headlines.p\") and os.path.exists( base_preprocess_path + \"/\"", "\"training_bodies.p\") # Preprocess the validation print(\"Start of pre-processing for validation\") if not (os.path.exists(base_preprocess_path", "\"/\" + \"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines,", "extraction and combining them for the models print(\"Feature extraction for train\") train_features =", "(os.path.exists(base_preprocess_path + \"/\" + \"validation_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"validation_bodies.p\")): preprocessed_validation_data", "if not (os.path.exists(base_preprocess_path + \"/\" + \"validation_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" +", "+ \"train_features.p\") and os.path.exists( base_feature_path + \"/\" + \"validation_features.p\") and os.path.exists( base_feature_path +", "''' Used read_from_csv in utils to know the actual labels and the predicted", "not (os.path.exists(base_preprocess_path + \"/\" + \"test_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"test_bodies.p\")):", "\"/\" + \"training_bodies.p\") else: train_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"training_headlines.p\") train_preprocessed_bodies =", "validation\") validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies) # Sentence weighting for validation validation_sentence_weights", "\"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path + \"/\" + \"validation_features.p\") final_test_features =", "input_file(base_preprocess_path + \"/\" + \"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"test_bodies.p\") #", "print(\"Start of pre-processing for train\") if not (os.path.exists(base_preprocess_path + \"/\" + \"training_headlines.p\") and", "import scipy.sparse as sp import os import time # Global Variables trainStancePath =", "t1 - t0) # Target variables train_target_labels = target_labels(train_stances) validation_target_labels = target_labels(validation_stances) test_target_labels", "print(\"Data Splitting\") train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances = train_validation_split.split() # Preprocess", "\"/\" + \"train_features.p\") and os.path.exists( base_feature_path + \"/\" + \"validation_features.p\") and os.path.exists( base_feature_path", "+ \"/\" + \"test_features.p\") t1 = time.time() print(\"Time for feature extraction is:\", t1", "the train print(\"Start of pre-processing for train\") if not (os.path.exists(base_preprocess_path + \"/\" +", "+ \"train_features.p\") final_validation_features = input_file(base_feature_path + \"/\" + \"validation_features.p\") final_test_features = input_file(base_feature_path +", "the train and validation splits according to our split size print(\"Data Splitting\") train_validation_split", "else: train_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path + \"/\"", "range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines, bodies ''' This files combine all the data", "models models.get_lr() models.get_dt() models.get_nb() models.get_rf() ''' Used read_from_csv in utils to know the", "+ \"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path + \"/\" + \"training_bodies.p\") else: train_preprocessed_headlines =", "+ \"/\" + \"test_bodies.p\") else: test_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"test_headlines.p\") test_preprocessed_bodies", "variables train_target_labels = target_labels(train_stances) validation_target_labels = target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances) # Modelling the", "for test\") test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies) # Sentence weighting for test", "import Models from src.score import LABELS from src.utils import input_file, output_file import scipy.sparse", "graphs for the report. ''' t2 = time.time() print(\"Time for the total is:\",", "+ \"/\" + \"test_bodies.p\") # Split headlines and bodies for train, validation and", "if not (os.path.exists(base_feature_path + \"/\" + \"train_features.p\") and os.path.exists( base_feature_path + \"/\" +", "\"validation_features.p\") final_test_features = input_file(base_feature_path + \"/\" + \"test_features.p\") t1 = time.time() print(\"Time for", "validation_features.sentence_weighting() print(\"Feature extraction for test\") test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies) # Sentence", "Sentence weighting for train train_sentence_weights = train_features.sentence_weighting() print(\"Feature extraction for validation\") validation_features =", "Preprocess the test print(\"Start of pre-processing for test\") if not (os.path.exists(base_preprocess_path + \"/\"", "import DataSplit from src.preprocess import Preprocess from src.feature_extraction import Features from src.models import", "processing, feature transformation, modelling and visualization. Check README for clear understanding of what", "base_preprocess_path + \"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path + \"/\" + \"test_bodies.p\") else: test_preprocessed_headlines", "target_labels(train_stances) validation_target_labels = target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances) # Modelling the features print(\"Start of", "spliting, pre processing, feature transformation, modelling and visualization. Check README for clear understanding", "actual labels and the predicted labels to produce the correctness visualizations graphs for", "output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\" + \"validation_bodies.p\") else: validation_preprocessed_headlines = input_file(base_preprocess_path + \"/\" +", "test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path + \"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path + \"/\"", "combining them for the models print(\"Feature extraction for train\") train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies,", "Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\" + \"validation_headlines.p\")", "time.time() print(\"Time for feature extraction is:\", t1 - t0) # Target variables train_target_labels", "models.get_rf() ''' Used read_from_csv in utils to know the actual labels and the", "# Split headlines and bodies for train, validation and test train_headlines, train_bodies =", "train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path + \"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path +", "our split size print(\"Data Splitting\") train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances =", "= preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path + \"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path + \"/\" +", "bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines, bodies ''' This files combine all the data mining", "understanding of what is happening. ''' if __name__ == \"__main__\": t0 = time.time()", "test_bodies) # Sentence weighting for train train_sentence_weights = train_features.sentence_weighting() print(\"Feature extraction for validation\")", "for test\") if not (os.path.exists(base_preprocess_path + \"/\" + \"test_headlines.p\") and os.path.exists( base_preprocess_path +", "\"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\" + \"validation_bodies.p\") else: validation_preprocessed_headlines = input_file(base_preprocess_path", "the 4 models models.get_lr() models.get_dt() models.get_nb() models.get_rf() ''' Used read_from_csv in utils to", "testBodyPath) # Extracting IDs for data splitting ids = list(train.articleBody.keys()) # The DataSplit", "+ \"/\" + \"training_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"training_bodies.p\")): preprocessed_train_data =", "+ \"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path", "train_stances, validation_stances = train_validation_split.split() # Preprocess the train print(\"Start of pre-processing for train\")", "final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path + \"/\" + \"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights,", "preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path + \"/\"", "\"training_bodies.p\") else: train_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path +", "\"/\" + \"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines,", "Preprocess the train print(\"Start of pre-processing for train\") if not (os.path.exists(base_preprocess_path + \"/\"", "an inout for the models final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path + \"/\"", "train_features.sentence_weighting() print(\"Feature extraction for validation\") validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies) # Sentence", "= input_file(base_feature_path + \"/\" + \"train_features.p\") final_validation_features = input_file(base_feature_path + \"/\" + \"validation_features.p\")", "Split headlines and bodies for train, validation and test train_headlines, train_bodies = headlines_bodies(train_stances,", "models.get_dt() models.get_nb() models.get_rf() ''' Used read_from_csv in utils to know the actual labels", "\"test_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\")", "\"/\" + \"test_features.p\") t1 = time.time() print(\"Time for feature extraction is:\", t1 -", "test_tfidf_weights = train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines, test_bodies) # Sentence weighting for train train_sentence_weights", "print(\"Time for feature extraction is:\", t1 - t0) # Target variables train_target_labels =", "return headlines, bodies ''' This files combine all the data mining part starting", "t0 = time.time() # Importing the data train = FakeNewsData(trainStancePath, trainBodyPath) test =", "headlines.append(temp_headline[i][headline]) return headlines, bodies ''' This files combine all the data mining part", "+ \"validation_bodies.p\") # Preprocess the test print(\"Start of pre-processing for test\") if not", "\"test_features.p\")): # Feature extraction and combining them for the models print(\"Feature extraction for", "range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels def headlines_bodies(temp_headline, temp_body): headlines = [] bodies = []", "\"\"\" COMP9417 Assignment: Fake news Challenge Authors: <NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901)", "base_preprocess_path + \"/\" + \"training_bodies.p\") else: train_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"training_headlines.p\")", "train.articleBody) test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody) if not (os.path.exists(base_feature_path + \"/\" + \"train_features.p\")", "Preprocess from src.feature_extraction import Features from src.models import Models from src.score import LABELS", "= train_features.sentence_weighting() print(\"Feature extraction for validation\") validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies) #", "= sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path + \"/\" + \"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A", "print(\"Start of pre-processing for test\") if not (os.path.exists(base_preprocess_path + \"/\" + \"test_headlines.p\") and", "\"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path +", "src.train_validation_split import DataSplit from src.preprocess import Preprocess from src.feature_extraction import Features from src.models", "\"preprocessed_data\" base_feature_path = \"final_features\" output = \"output\" def target_labels(stances): labels = [] for", "= input_file(base_preprocess_path + \"/\" + \"training_bodies.p\") # Preprocess the validation print(\"Start of pre-processing", "input_file, output_file import scipy.sparse as sp import os import time # Global Variables", "return labels def headlines_bodies(temp_headline, temp_body): headlines = [] bodies = [] for i", "clear understanding of what is happening. ''' if __name__ == \"__main__\": t0 =", "\"/\" + \"test_bodies.p\") else: test_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"test_headlines.p\") test_preprocessed_bodies =", "test_target_labels = target_labels(test.headlineInstances) # Modelling the features print(\"Start of Modelling\") models = Models(final_train_features,", "sp import os import time # Global Variables trainStancePath = \"data/train_stances.csv\" testStancePath =", "train_validation_split.split() # Preprocess the train print(\"Start of pre-processing for train\") if not (os.path.exists(base_preprocess_path", "labels.append(LABELS.index(stances[i][stance])) return labels def headlines_bodies(temp_headline, temp_body): headlines = [] bodies = [] for", "visualization. Check README for clear understanding of what is happening. ''' if __name__", "+ \"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"training_bodies.p\") # Preprocess the validation", "Modelling the features print(\"Start of Modelling\") models = Models(final_train_features, final_validation_features, final_test_features, train_target_labels, validation_target_labels,", "\"/\" + \"test_bodies.p\") # Split headlines and bodies for train, validation and test", "train train_sentence_weights = train_features.sentence_weighting() print(\"Feature extraction for validation\") validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines,", "test_bodies) # Sentence weighting for test test_sentence_weights = test_features.sentence_weighting() # Combine the features", "the validation print(\"Start of pre-processing for validation\") if not (os.path.exists(base_preprocess_path + \"/\" +", "feature extraction is:\", t1 - t0) # Target variables train_target_labels = target_labels(train_stances) validation_target_labels", "train_preprocessed_bodies, train_headlines, train_bodies) # TF-IDF weight extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction( validation_headlines,", "not (os.path.exists(base_preprocess_path + \"/\" + \"validation_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"validation_bodies.p\")):", "= sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path + \"/\" + \"test_features.p\") else: print(\"Feature Extraction\") final_train_features", "<NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901) main.py: Main file for program execution \"\"\" from", "headlines = [] bodies = [] for i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return", "DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances = train_validation_split.split() # Preprocess the train print(\"Start of", "\"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path + \"/\" + \"training_bodies.p\") else: train_preprocessed_headlines = input_file(base_preprocess_path", "# Global Variables trainStancePath = \"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\" testBodyPath", "the features print(\"Start of Modelling\") models = Models(final_train_features, final_validation_features, final_test_features, train_target_labels, validation_target_labels, test_target_labels)", "headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody) if", "\"/\" + \"training_bodies.p\") # Preprocess the validation print(\"Start of pre-processing for validation\") if", "\"test_features.p\") else: print(\"Feature Extraction\") final_train_features = input_file(base_feature_path + \"/\" + \"train_features.p\") final_validation_features =", "headlines_bodies(test.headlineInstances, test.articleBody) if not (os.path.exists(base_feature_path + \"/\" + \"train_features.p\") and os.path.exists( base_feature_path +", "src.data_import import FakeNewsData from src.train_validation_split import DataSplit from src.preprocess import Preprocess from src.feature_extraction", "train\") train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies) # TF-IDF weight extraction train_tfidf_weights, validation_tfidf_weights,", "+ \"/\" + \"train_features.p\") final_validation_features = input_file(base_feature_path + \"/\" + \"validation_features.p\") final_test_features =", "= [] for i in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels def headlines_bodies(temp_headline, temp_body): headlines", "target_labels(stances): labels = [] for i in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels def headlines_bodies(temp_headline,", "train_headlines, train_bodies) # TF-IDF weight extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction( validation_headlines, validation_bodies,", "list(train.articleBody.keys()) # The DataSplit generates the train and validation splits according to our", "t1 = time.time() print(\"Time for feature extraction is:\", t1 - t0) # Target", "import input_file, output_file import scipy.sparse as sp import os import time # Global", "\"final_features\" output = \"output\" def target_labels(stances): labels = [] for i in range(len(stances)):", "read_from_csv in utils to know the actual labels and the predicted labels to", "headlines and bodies for train, validation and test train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody)", "\"/\" + \"validation_features.p\") and os.path.exists( base_feature_path + \"/\" + \"test_features.p\")): # Feature extraction", "train_bodies) # TF-IDF weight extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines,", "preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path", "train_target_labels, validation_target_labels, test_target_labels) # Calling the 4 models models.get_lr() models.get_dt() models.get_nb() models.get_rf() '''", "= Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies) # Sentence weighting for validation validation_sentence_weights = validation_features.sentence_weighting()", "train and validation splits according to our split size print(\"Data Splitting\") train_validation_split =", "output_file(final_validation_features, base_feature_path + \"/\" + \"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path +", "= Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path + \"/\" +", "= input_file(base_feature_path + \"/\" + \"validation_features.p\") final_test_features = input_file(base_feature_path + \"/\" + \"test_features.p\")", "for the models print(\"Feature extraction for train\") train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies)", "+ \"/\" + \"validation_bodies.p\") # Preprocess the test print(\"Start of pre-processing for test\")", "= preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\" +", "''' if __name__ == \"__main__\": t0 = time.time() # Importing the data train", "according to our split size print(\"Data Splitting\") train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances,", "and combining them for the models print(\"Feature extraction for train\") train_features = Features(train_preprocessed_headlines,", "train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path + \"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path + \"/\"", "pre-processing for validation\") if not (os.path.exists(base_preprocess_path + \"/\" + \"validation_headlines.p\") and os.path.exists( base_preprocess_path", "of what is happening. ''' if __name__ == \"__main__\": t0 = time.time() #", "# Preprocess the train print(\"Start of pre-processing for train\") if not (os.path.exists(base_preprocess_path +", "trainBodyPath) test = FakeNewsData(testStancePath, testBodyPath) # Extracting IDs for data splitting ids =", "in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels def headlines_bodies(temp_headline, temp_body): headlines = [] bodies =", "Modelling\") models = Models(final_train_features, final_validation_features, final_test_features, train_target_labels, validation_target_labels, test_target_labels) # Calling the 4", "extraction for test\") test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies) # Sentence weighting for", "final_validation_features, final_test_features, train_target_labels, validation_target_labels, test_target_labels) # Calling the 4 models models.get_lr() models.get_dt() models.get_nb()", "= input_file(base_preprocess_path + \"/\" + \"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"training_bodies.p\")", "train_sentence_weights = train_features.sentence_weighting() print(\"Feature extraction for validation\") validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies)", "\"/\" + \"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path + \"/\" + \"test_features.p\")", "+ \"test_features.p\") else: print(\"Feature Extraction\") final_train_features = input_file(base_feature_path + \"/\" + \"train_features.p\") final_validation_features", "+ \"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path", "for train, validation and test train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies =", "Assignment: Fake news Challenge Authors: <NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901) main.py: Main", "# Sentence weighting for test test_sentence_weights = test_features.sentence_weighting() # Combine the features to", "COMP9417 Assignment: Fake news Challenge Authors: <NAME>(z5222766), <NAME> (z5058240), <NAME>(z5173917), <NAME> (z5113901) main.py:", "+ \"/\" + \"test_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"test_bodies.p\")): preprocessed_test_data =", "(os.path.exists(base_feature_path + \"/\" + \"train_features.p\") and os.path.exists( base_feature_path + \"/\" + \"validation_features.p\") and", "body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies,", "ids = list(train.articleBody.keys()) # The DataSplit generates the train and validation splits according", "Global Variables trainStancePath = \"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\" testBodyPath =", "= FakeNewsData(testStancePath, testBodyPath) # Extracting IDs for data splitting ids = list(train.articleBody.keys()) #", "preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\" + \"validation_bodies.p\")", "data splitting ids = list(train.articleBody.keys()) # The DataSplit generates the train and validation", "+ \"/\" + \"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"validation_bodies.p\") # Preprocess", "+ \"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path + \"/\" + \"test_features.p\") else:", "FakeNewsData from src.train_validation_split import DataSplit from src.preprocess import Preprocess from src.feature_extraction import Features", "\"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path +", "scipy.sparse as sp import os import time # Global Variables trainStancePath = \"data/train_stances.csv\"", "i in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels def headlines_bodies(temp_headline, temp_body): headlines = [] bodies", "splitting ids = list(train.articleBody.keys()) # The DataSplit generates the train and validation splits", "test = FakeNewsData(testStancePath, testBodyPath) # Extracting IDs for data splitting ids = list(train.articleBody.keys())", "from src.preprocess import Preprocess from src.feature_extraction import Features from src.models import Models from", "+ \"/\" + \"test_bodies.p\")): preprocessed_test_data = Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies()", "headlines_bodies(temp_headline, temp_body): headlines = [] bodies = [] for i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])])", "input_file(base_preprocess_path + \"/\" + \"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"validation_bodies.p\") #", "input_file(base_preprocess_path + \"/\" + \"validation_bodies.p\") # Preprocess the test print(\"Start of pre-processing for", "Importing the data train = FakeNewsData(trainStancePath, trainBodyPath) test = FakeNewsData(testStancePath, testBodyPath) # Extracting", "\"/\" + \"validation_features.p\") final_test_features = input_file(base_feature_path + \"/\" + \"test_features.p\") t1 = time.time()", "input_file(base_preprocess_path + \"/\" + \"training_bodies.p\") # Preprocess the validation print(\"Start of pre-processing for", "data mining part starting from Data importing, spliting, pre processing, feature transformation, modelling", "extraction for train\") train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies) # TF-IDF weight extraction", "train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances = train_validation_split.split() # Preprocess the train", "output_file(test_preprocessed_bodies, base_preprocess_path + \"/\" + \"test_bodies.p\") else: test_preprocessed_headlines = input_file(base_preprocess_path + \"/\" +", "files combine all the data mining part starting from Data importing, spliting, pre", "+ \"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path + \"/\" + \"validation_features.p\") final_test_features", "+ \"/\" + \"validation_features.p\") and os.path.exists( base_feature_path + \"/\" + \"test_features.p\")): # Feature", "# Feature extraction and combining them for the models print(\"Feature extraction for train\")", "IDs for data splitting ids = list(train.articleBody.keys()) # The DataSplit generates the train", "print(\"Feature extraction for test\") test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies) # Sentence weighting", "validation_bodies = headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody) if not (os.path.exists(base_feature_path +", "temp_body): headlines = [] bodies = [] for i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline])", "and visualization. Check README for clear understanding of what is happening. ''' if", "\"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\" # header attributes", "not (os.path.exists(base_feature_path + \"/\" + \"train_features.p\") and os.path.exists( base_feature_path + \"/\" + \"validation_features.p\")", "from src.score import LABELS from src.utils import input_file, output_file import scipy.sparse as sp", "\"/\" + \"training_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances,", "= preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path + \"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path + \"/\" +", "time # Global Variables trainStancePath = \"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\"", "test\") test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies) # Sentence weighting for test test_sentence_weights", "labels = [] for i in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels def headlines_bodies(temp_headline, temp_body):", "test\") if not (os.path.exists(base_preprocess_path + \"/\" + \"test_headlines.p\") and os.path.exists( base_preprocess_path + \"/\"", "= DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances = train_validation_split.split() # Preprocess the train print(\"Start", "from src.feature_extraction import Features from src.models import Models from src.score import LABELS from", "base_feature_path + \"/\" + \"test_features.p\")): # Feature extraction and combining them for the", "in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines, bodies ''' This files combine all the", "import Preprocess from src.feature_extraction import Features from src.models import Models from src.score import", "Splitting\") train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances = train_validation_split.split() # Preprocess the", "test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody) if not (os.path.exists(base_feature_path + \"/\" + \"train_features.p\") and", "the report. ''' t2 = time.time() print(\"Time for the total is:\", t2 -", "test_headlines, test_bodies) # Sentence weighting for train train_sentence_weights = train_features.sentence_weighting() print(\"Feature extraction for", "+ \"/\" + \"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path + \"/\" +", "for data splitting ids = list(train.articleBody.keys()) # The DataSplit generates the train and", "test_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"test_bodies.p\") # Split headlines and bodies for", "Check README for clear understanding of what is happening. ''' if __name__ ==", "validation_headlines, validation_bodies, test_headlines, test_bodies) # Sentence weighting for train train_sentence_weights = train_features.sentence_weighting() print(\"Feature", "4 models models.get_lr() models.get_dt() models.get_nb() models.get_rf() ''' Used read_from_csv in utils to know", "all the data mining part starting from Data importing, spliting, pre processing, feature", "Combine the features to prepare them as an inout for the models final_train_features", "bodies for train, validation and test train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies", "+ \"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"test_bodies.p\") # Split headlines and", "and test train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody) test_headlines,", "for feature extraction is:\", t1 - t0) # Target variables train_target_labels = target_labels(train_stances)", "test_features.sentence_weighting() # Combine the features to prepare them as an inout for the", "happening. ''' if __name__ == \"__main__\": t0 = time.time() # Importing the data", "sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path + \"/\" + \"test_features.p\") else: print(\"Feature Extraction\") final_train_features =", "test test_sentence_weights = test_features.sentence_weighting() # Combine the features to prepare them as an", "preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies() output_file(validation_preprocessed_headlines, base_preprocess_path + \"/\"", "headline = \"Headline\" base_preprocess_path = \"preprocessed_data\" base_feature_path = \"final_features\" output = \"output\" def", "the actual labels and the predicted labels to produce the correctness visualizations graphs", "pre-processing for test\") if not (os.path.exists(base_preprocess_path + \"/\" + \"test_headlines.p\") and os.path.exists( base_preprocess_path", "+ \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path + \"/\" + \"test_bodies.p\") else: test_preprocessed_headlines = input_file(base_preprocess_path +", "\"/\" + \"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"validation_bodies.p\") # Preprocess the", "+ \"/\" + \"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"test_bodies.p\") # Split", "+ \"/\" + \"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies = preprocessed_validation_data.get_clean_headlines_and_bodies()", "test_preprocessed_bodies, test_headlines, test_bodies) # Sentence weighting for test test_sentence_weights = test_features.sentence_weighting() # Combine", "input_file(base_preprocess_path + \"/\" + \"test_bodies.p\") # Split headlines and bodies for train, validation", "src.score import LABELS from src.utils import input_file, output_file import scipy.sparse as sp import", "+ \"validation_bodies.p\") else: validation_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path", "produce the correctness visualizations graphs for the report. ''' t2 = time.time() print(\"Time", "# TF-IDF weight extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines, test_bodies)", "\"/\" + \"validation_bodies.p\") # Preprocess the test print(\"Start of pre-processing for test\") if", "from Data importing, spliting, pre processing, feature transformation, modelling and visualization. Check README", "+ \"/\" + \"validation_headlines.p\") output_file(validation_preprocessed_bodies, base_preprocess_path + \"/\" + \"validation_bodies.p\") else: validation_preprocessed_headlines =", "them for the models print(\"Feature extraction for train\") train_features = Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines,", "and os.path.exists( base_feature_path + \"/\" + \"validation_features.p\") and os.path.exists( base_feature_path + \"/\" +", "\"/\" + \"test_headlines.p\") test_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"test_bodies.p\") # Split headlines", "if __name__ == \"__main__\": t0 = time.time() # Importing the data train =", "validation_target_labels, test_target_labels) # Calling the 4 models models.get_lr() models.get_dt() models.get_nb() models.get_rf() ''' Used", "train_target_labels = target_labels(train_stances) validation_target_labels = target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances) # Modelling the features", "bodies = [] for i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines, bodies '''", "to produce the correctness visualizations graphs for the report. ''' t2 = time.time()", "import os import time # Global Variables trainStancePath = \"data/train_stances.csv\" testStancePath = \"data/competition_test_stances.csv\"", "\"train_features.p\") final_validation_features = input_file(base_feature_path + \"/\" + \"validation_features.p\") final_test_features = input_file(base_feature_path + \"/\"", "# header attributes primary_id = \"Body ID\" stance = \"Stance\" body = \"articleBody\"", "Extracting IDs for data splitting ids = list(train.articleBody.keys()) # The DataSplit generates the", "train_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path + \"/\" +", "= \"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\" # header attributes primary_id =", "extraction for validation\") validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies) # Sentence weighting for", "from src.utils import input_file, output_file import scipy.sparse as sp import os import time", "Sentence weighting for validation validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature extraction for test\") test_features =", "train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines, test_bodies) # Sentence weighting for train train_sentence_weights = train_features.sentence_weighting()", "to our split size print(\"Data Splitting\") train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances", "+ \"/\" + \"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies()", "testBodyPath = \"data/competition_test_bodies.csv\" # header attributes primary_id = \"Body ID\" stance = \"Stance\"", "features to prepare them as an inout for the models final_train_features = sp.bmat([[train_tfidf_weights,", "validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines, test_bodies) # Sentence weighting for train", "input_file(base_preprocess_path + \"/\" + \"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"training_bodies.p\") #", "preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path + \"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path", "= sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path + \"/\" + \"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A", "base_feature_path + \"/\" + \"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path + \"/\"", "\"articleBody\" headline = \"Headline\" base_preprocess_path = \"preprocessed_data\" base_feature_path = \"final_features\" output = \"output\"", "[] for i in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels def headlines_bodies(temp_headline, temp_body): headlines =", "\"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\" # header attributes primary_id = \"Body", "# Sentence weighting for validation validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature extraction for test\") test_features", "(z5113901) main.py: Main file for program execution \"\"\" from src.data_import import FakeNewsData from", "pre-processing for train\") if not (os.path.exists(base_preprocess_path + \"/\" + \"training_headlines.p\") and os.path.exists( base_preprocess_path", "+ \"validation_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody,", "base_feature_path + \"/\" + \"validation_features.p\") and os.path.exists( base_feature_path + \"/\" + \"test_features.p\")): #", "know the actual labels and the predicted labels to produce the correctness visualizations", "split_size=0.8) train_stances, validation_stances = train_validation_split.split() # Preprocess the train print(\"Start of pre-processing for", "= \"final_features\" output = \"output\" def target_labels(stances): labels = [] for i in", "test print(\"Start of pre-processing for test\") if not (os.path.exists(base_preprocess_path + \"/\" + \"test_headlines.p\")", "of pre-processing for test\") if not (os.path.exists(base_preprocess_path + \"/\" + \"test_headlines.p\") and os.path.exists(", "split size print(\"Data Splitting\") train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances = train_validation_split.split()", "train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights = train_features.tfidf_extraction( validation_headlines, validation_bodies, test_headlines, test_bodies) # Sentence weighting for", "for the models final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path + \"/\" + \"train_features.p\")", "= target_labels(train_stances) validation_target_labels = target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances) # Modelling the features print(\"Start", "from src.models import Models from src.score import LABELS from src.utils import input_file, output_file", "base_preprocess_path + \"/\" + \"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines, validation_preprocessed_bodies =", "\"training_headlines.p\") train_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"training_bodies.p\") # Preprocess the validation print(\"Start", "test train_headlines, train_bodies = headlines_bodies(train_stances, train.articleBody) validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies", "for train train_sentence_weights = train_features.sentence_weighting() print(\"Feature extraction for validation\") validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies,", "FakeNewsData(testStancePath, testBodyPath) # Extracting IDs for data splitting ids = list(train.articleBody.keys()) # The", "modelling and visualization. Check README for clear understanding of what is happening. '''", "\"test_bodies.p\") # Split headlines and bodies for train, validation and test train_headlines, train_bodies", "train.articleBody) validation_headlines, validation_bodies = headlines_bodies(validation_stances, train.articleBody) test_headlines, test_bodies = headlines_bodies(test.headlineInstances, test.articleBody) if not", "validation_preprocessed_headlines = input_file(base_preprocess_path + \"/\" + \"validation_headlines.p\") validation_preprocessed_bodies = input_file(base_preprocess_path + \"/\" +", "Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies) # Sentence weighting for validation validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature", "as an inout for the models final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path +", "[] for i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines, bodies ''' This files", "size print(\"Data Splitting\") train_validation_split = DataSplit(ids=ids, headline=train.headlineInstances, split_size=0.8) train_stances, validation_stances = train_validation_split.split() #", "train_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"training_bodies.p\") # Preprocess the validation print(\"Start of", "validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature extraction for test\") test_features = Features(test_preprocessed_headlines, test_preprocessed_bodies, test_headlines, test_bodies)", "= \"data/competition_test_bodies.csv\" # header attributes primary_id = \"Body ID\" stance = \"Stance\" body", "\"output\" def target_labels(stances): labels = [] for i in range(len(stances)): labels.append(LABELS.index(stances[i][stance])) return labels", "sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path + \"/\" + \"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features,", "base_feature_path + \"/\" + \"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path + \"/\"", "print(\"Start of pre-processing for validation\") if not (os.path.exists(base_preprocess_path + \"/\" + \"validation_headlines.p\") and", "= Features(train_preprocessed_headlines, train_preprocessed_bodies, train_headlines, train_bodies) # TF-IDF weight extraction train_tfidf_weights, validation_tfidf_weights, test_tfidf_weights =", "= \"preprocessed_data\" base_feature_path = \"final_features\" output = \"output\" def target_labels(stances): labels = []", "''' t2 = time.time() print(\"Time for the total is:\", t2 - t0) print(\"\\nEnd", "Preprocess(headline=test.headlineInstances, body=test.articleBody, preprocess_type=\"lemma\") test_preprocessed_headlines, test_preprocessed_bodies = preprocessed_test_data.get_clean_headlines_and_bodies() output_file(test_preprocessed_headlines, base_preprocess_path + \"/\" + \"test_headlines.p\")", "and os.path.exists( base_preprocess_path + \"/\" + \"validation_bodies.p\")): preprocessed_validation_data = Preprocess(headline=validation_stances, body=train.articleBody, preprocess_type=\"lemma\") validation_preprocessed_headlines,", "Extraction\") final_train_features = input_file(base_feature_path + \"/\" + \"train_features.p\") final_validation_features = input_file(base_feature_path + \"/\"", "+ \"/\" + \"test_features.p\")): # Feature extraction and combining them for the models", "for validation\") validation_features = Features(validation_preprocessed_headlines, validation_preprocessed_bodies, validation_headlines, validation_bodies) # Sentence weighting for validation", "+ \"/\" + \"test_headlines.p\") output_file(test_preprocessed_bodies, base_preprocess_path + \"/\" + \"test_bodies.p\") else: test_preprocessed_headlines =", "Features from src.models import Models from src.score import LABELS from src.utils import input_file,", "preprocess_type=\"lemma\") train_preprocessed_headlines, train_preprocessed_bodies = preprocessed_train_data.get_clean_headlines_and_bodies() output_file(train_preprocessed_headlines, base_preprocess_path + \"/\" + \"training_headlines.p\") output_file(train_preprocessed_bodies, base_preprocess_path", "# Preprocess the validation print(\"Start of pre-processing for validation\") if not (os.path.exists(base_preprocess_path +", "validation_preprocessed_bodies = input_file(base_preprocess_path + \"/\" + \"validation_bodies.p\") # Preprocess the test print(\"Start of", "+ \"validation_features.p\") final_test_features = input_file(base_feature_path + \"/\" + \"test_features.p\") t1 = time.time() print(\"Time", "\"test_features.p\") t1 = time.time() print(\"Time for feature extraction is:\", t1 - t0) #", "and os.path.exists( base_preprocess_path + \"/\" + \"training_bodies.p\")): preprocessed_train_data = Preprocess(headline=train_stances, body=train.articleBody, preprocess_type=\"lemma\") train_preprocessed_headlines,", "testStancePath = \"data/competition_test_stances.csv\" trainBodyPath = \"data/train_bodies.csv\" testBodyPath = \"data/competition_test_bodies.csv\" # header attributes primary_id", "validation_bodies, test_headlines, test_bodies) # Sentence weighting for train train_sentence_weights = train_features.sentence_weighting() print(\"Feature extraction", "the models final_train_features = sp.bmat([[train_tfidf_weights, train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path + \"/\" + \"train_features.p\") final_validation_features", "LABELS from src.utils import input_file, output_file import scipy.sparse as sp import os import", "header attributes primary_id = \"Body ID\" stance = \"Stance\" body = \"articleBody\" headline", "\"/\" + \"test_features.p\") else: print(\"Feature Extraction\") final_train_features = input_file(base_feature_path + \"/\" + \"train_features.p\")", "= time.time() print(\"Time for feature extraction is:\", t1 - t0) # Target variables", "validation_stances = train_validation_split.split() # Preprocess the train print(\"Start of pre-processing for train\") if", "test_sentence_weights = test_features.sentence_weighting() # Combine the features to prepare them as an inout", "= input_file(base_preprocess_path + \"/\" + \"test_bodies.p\") # Split headlines and bodies for train,", "+ \"/\" + \"test_features.p\") else: print(\"Feature Extraction\") final_train_features = input_file(base_feature_path + \"/\" +", "file for program execution \"\"\" from src.data_import import FakeNewsData from src.train_validation_split import DataSplit", "weighting for train train_sentence_weights = train_features.sentence_weighting() print(\"Feature extraction for validation\") validation_features = Features(validation_preprocessed_headlines,", "bodies ''' This files combine all the data mining part starting from Data", "extraction is:\", t1 - t0) # Target variables train_target_labels = target_labels(train_stances) validation_target_labels =", "correctness visualizations graphs for the report. ''' t2 = time.time() print(\"Time for the", "= \"Body ID\" stance = \"Stance\" body = \"articleBody\" headline = \"Headline\" base_preprocess_path", "pre processing, feature transformation, modelling and visualization. Check README for clear understanding of", "= list(train.articleBody.keys()) # The DataSplit generates the train and validation splits according to", "validation_bodies) # Sentence weighting for validation validation_sentence_weights = validation_features.sentence_weighting() print(\"Feature extraction for test\")", "not (os.path.exists(base_preprocess_path + \"/\" + \"training_headlines.p\") and os.path.exists( base_preprocess_path + \"/\" + \"training_bodies.p\")):", "validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path + \"/\" + \"validation_features.p\") final_test_features = sp.bmat([[test_tfidf_weights, test_sentence_weights.T]]).A output_file(final_test_features, base_feature_path", "src.utils import input_file, output_file import scipy.sparse as sp import os import time #", "for i in range(len(temp_headline)): bodies.append(temp_body[int(temp_headline[i][primary_id])]) headlines.append(temp_headline[i][headline]) return headlines, bodies ''' This files combine", "validation_target_labels = target_labels(validation_stances) test_target_labels = target_labels(test.headlineInstances) # Modelling the features print(\"Start of Modelling\")", "The DataSplit generates the train and validation splits according to our split size", "= \"Stance\" body = \"articleBody\" headline = \"Headline\" base_preprocess_path = \"preprocessed_data\" base_feature_path =", "test.articleBody) if not (os.path.exists(base_feature_path + \"/\" + \"train_features.p\") and os.path.exists( base_feature_path + \"/\"", "train_sentence_weights.T]]).A output_file(final_train_features, base_feature_path + \"/\" + \"train_features.p\") final_validation_features = sp.bmat([[validation_tfidf_weights, validation_sentence_weights.T]]).A output_file(final_validation_features, base_feature_path", "\"/\" + \"test_features.p\")): # Feature extraction and combining them for the models print(\"Feature", "program execution \"\"\" from src.data_import import FakeNewsData from src.train_validation_split import DataSplit from src.preprocess", "DataSplit generates the train and validation splits according to our split size print(\"Data", "FakeNewsData(trainStancePath, trainBodyPath) test = FakeNewsData(testStancePath, testBodyPath) # Extracting IDs for data splitting ids" ]
[ "on 2021-12-18 01:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "), ] operations = [ migrations.RemoveField( model_name=\"faculty\", name=\"department\", ), migrations.AddField( model_name=\"faculty\", name=\"department\", field=models.ManyToManyField(to=\"past_questions.Department\"),", "01:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ( \"past_questions\",", "( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations = [ migrations.RemoveField( model_name=\"faculty\", name=\"department\", ), migrations.AddField(", "2021-12-18 01:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (", "[ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations = [ migrations.RemoveField( model_name=\"faculty\", name=\"department\", ),", "models class Migration(migrations.Migration): dependencies = [ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations =", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\",", "<gh_stars>1-10 # Generated by Django 4.0 on 2021-12-18 01:46 from django.db import migrations,", "= [ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations = [ migrations.RemoveField( model_name=\"faculty\", name=\"department\",", "operations = [ migrations.RemoveField( model_name=\"faculty\", name=\"department\", ), migrations.AddField( model_name=\"faculty\", name=\"department\", field=models.ManyToManyField(to=\"past_questions.Department\"), ), ]", "# Generated by Django 4.0 on 2021-12-18 01:46 from django.db import migrations, models", "Generated by Django 4.0 on 2021-12-18 01:46 from django.db import migrations, models class", "dependencies = [ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations = [ migrations.RemoveField( model_name=\"faculty\",", "by Django 4.0 on 2021-12-18 01:46 from django.db import migrations, models class Migration(migrations.Migration):", "migrations, models class Migration(migrations.Migration): dependencies = [ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations", "\"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations = [ migrations.RemoveField( model_name=\"faculty\", name=\"department\", ), migrations.AddField( model_name=\"faculty\", name=\"department\",", "4.0 on 2021-12-18 01:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ),", "] operations = [ migrations.RemoveField( model_name=\"faculty\", name=\"department\", ), migrations.AddField( model_name=\"faculty\", name=\"department\", field=models.ManyToManyField(to=\"past_questions.Department\"), ),", "Django 4.0 on 2021-12-18 01:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "import migrations, models class Migration(migrations.Migration): dependencies = [ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ]", "\"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations = [ migrations.RemoveField( model_name=\"faculty\", name=\"department\", ), migrations.AddField( model_name=\"faculty\",", "class Migration(migrations.Migration): dependencies = [ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations = [", "Migration(migrations.Migration): dependencies = [ ( \"past_questions\", \"0003_alter_department_level_alter_faculty_department_and_more\", ), ] operations = [ migrations.RemoveField(" ]
[ "spaces in front of \"print\" guess = input() guess = int(guess) if guess", "is too low.') #Eight spaces in front of \"print\" if guess > number:", "+ ' guesses!') if guess != number: number = str(number) print('Nope. The number", "number, silly! Sorry, but that counts as a guess.') continue if guess ==", "is too high') if guess == number: break except: print('That\\'s not a number,", "\"print\" if guess > number: print('Your guess is too high') if guess ==", "a guess.') continue if guess == number: guessesTaken = str(guessesTaken + 1) print('Good", "counts as a guess.') continue if guess == number: guessesTaken = str(guessesTaken +", "== number: guessesTaken = str(guessesTaken + 1) print('Good job, ' + myName +", "print('Hello! What is your name?') myName = input() number = random.randint(1, 20) print('Well,", "range(6): try: print('Take a guess.') #Four spaces in front of \"print\" guess =", "if guess == number: break except: print('That\\'s not a number, silly! Sorry, but", "= int(guess) if guess < number: #\"if\" statement part of for statement body.", "number between 1 and 20. Can you guess it in six tries? :)')", "Sorry, but that counts as a guess.') continue if guess == number: guessesTaken", "input() number = random.randint(1, 20) print('Well, ' + myName + ', I am", "in ' + guessesTaken + ' guesses!') if guess != number: number =", "guess != number: number = str(number) print('Nope. The number I was thinking of", "#\"if\" statement part of for statement body. print('Your guess is too low.') #Eight", "guessesTaken = 0 print('Hello! What is your name?') myName = input() number =", "am thinking of a number between 1 and 20. Can you guess it", "the Number game. import random guessesTaken = 0 print('Hello! What is your name?')", "guessesTaken + ' guesses!') if guess != number: number = str(number) print('Nope. The", "Guess the Number game. import random guessesTaken = 0 print('Hello! What is your", "name?') myName = input() number = random.randint(1, 20) print('Well, ' + myName +", "part of for statement body. print('Your guess is too low.') #Eight spaces in", "str(guessesTaken + 1) print('Good job, ' + myName + '! You guessed my", "What is your name?') myName = input() number = random.randint(1, 20) print('Well, '", "= random.randint(1, 20) print('Well, ' + myName + ', I am thinking of", "guess > number: print('Your guess is too high') if guess == number: break", "' + myName + '! You guessed my number in ' + guessesTaken", "guess = int(guess) if guess < number: #\"if\" statement part of for statement", "<reponame>AnacondaDontWantNone/gotTheBuns<gh_stars>0 #This is a Guess the Number game. import random guessesTaken = 0", "20) print('Well, ' + myName + ', I am thinking of a number", "of a number between 1 and 20. Can you guess it in six", ":)') for guessesTaken in range(6): try: print('Take a guess.') #Four spaces in front", "silly! Sorry, but that counts as a guess.') continue if guess == number:", "+ myName + ', I am thinking of a number between 1 and", "int(guess) if guess < number: #\"if\" statement part of for statement body. print('Your", "number in ' + guessesTaken + ' guesses!') if guess != number: number", "not a number, silly! Sorry, but that counts as a guess.') continue if", "number: #\"if\" statement part of for statement body. print('Your guess is too low.')", "= str(guessesTaken + 1) print('Good job, ' + myName + '! You guessed", "= input() guess = int(guess) if guess < number: #\"if\" statement part of", "it in six tries? :)') for guessesTaken in range(6): try: print('Take a guess.')", "!= number: number = str(number) print('Nope. The number I was thinking of was", "myName + '! You guessed my number in ' + guessesTaken + '", "and 20. Can you guess it in six tries? :)') for guessesTaken in", "body. print('Your guess is too low.') #Eight spaces in front of \"print\" if", "I am thinking of a number between 1 and 20. Can you guess", "job, ' + myName + '! You guessed my number in ' +", "guessesTaken = str(guessesTaken + 1) print('Good job, ' + myName + '! You", "You guessed my number in ' + guessesTaken + ' guesses!') if guess", "for statement body. print('Your guess is too low.') #Eight spaces in front of", "in six tries? :)') for guessesTaken in range(6): try: print('Take a guess.') #Four", "', I am thinking of a number between 1 and 20. Can you", "tries? :)') for guessesTaken in range(6): try: print('Take a guess.') #Four spaces in", "'! You guessed my number in ' + guessesTaken + ' guesses!') if", "front of \"print\" guess = input() guess = int(guess) if guess < number:", "front of \"print\" if guess > number: print('Your guess is too high') if", "guessed my number in ' + guessesTaken + ' guesses!') if guess !=", "1 and 20. Can you guess it in six tries? :)') for guessesTaken", "of for statement body. print('Your guess is too low.') #Eight spaces in front", "print('Your guess is too high') if guess == number: break except: print('That\\'s not", "\"print\" guess = input() guess = int(guess) if guess < number: #\"if\" statement", "six tries? :)') for guessesTaken in range(6): try: print('Take a guess.') #Four spaces", "20. Can you guess it in six tries? :)') for guessesTaken in range(6):", "a Guess the Number game. import random guessesTaken = 0 print('Hello! What is", "try: print('Take a guess.') #Four spaces in front of \"print\" guess = input()", "thinking of a number between 1 and 20. Can you guess it in", "guess == number: break except: print('That\\'s not a number, silly! Sorry, but that", "a number between 1 and 20. Can you guess it in six tries?", "is a Guess the Number game. import random guessesTaken = 0 print('Hello! What", "str(number) print('Nope. The number I was thinking of was ' + number +", "if guess > number: print('Your guess is too high') if guess == number:", "guess == number: guessesTaken = str(guessesTaken + 1) print('Good job, ' + myName", "game. import random guessesTaken = 0 print('Hello! What is your name?') myName =", "print('Your guess is too low.') #Eight spaces in front of \"print\" if guess", "is your name?') myName = input() number = random.randint(1, 20) print('Well, ' +", "+ guessesTaken + ' guesses!') if guess != number: number = str(number) print('Nope.", "number = random.randint(1, 20) print('Well, ' + myName + ', I am thinking", "Number game. import random guessesTaken = 0 print('Hello! What is your name?') myName", "myName = input() number = random.randint(1, 20) print('Well, ' + myName + ',", "myName + ', I am thinking of a number between 1 and 20.", "' guesses!') if guess != number: number = str(number) print('Nope. The number I", "' + myName + ', I am thinking of a number between 1", "> number: print('Your guess is too high') if guess == number: break except:", "= str(number) print('Nope. The number I was thinking of was ' + number", "guesses!') if guess != number: number = str(number) print('Nope. The number I was", "+ ', I am thinking of a number between 1 and 20. Can", "guessesTaken in range(6): try: print('Take a guess.') #Four spaces in front of \"print\"", "== number: break except: print('That\\'s not a number, silly! Sorry, but that counts", "as a guess.') continue if guess == number: guessesTaken = str(guessesTaken + 1)", "if guess != number: number = str(number) print('Nope. The number I was thinking", "guess = input() guess = int(guess) if guess < number: #\"if\" statement part", "0 print('Hello! What is your name?') myName = input() number = random.randint(1, 20)", "guess.') continue if guess == number: guessesTaken = str(guessesTaken + 1) print('Good job,", "in front of \"print\" if guess > number: print('Your guess is too high')", "number: print('Your guess is too high') if guess == number: break except: print('That\\'s", "number: break except: print('That\\'s not a number, silly! Sorry, but that counts as", "print('Well, ' + myName + ', I am thinking of a number between", "too low.') #Eight spaces in front of \"print\" if guess > number: print('Your", "of \"print\" guess = input() guess = int(guess) if guess < number: #\"if\"", "random guessesTaken = 0 print('Hello! What is your name?') myName = input() number", "#This is a Guess the Number game. import random guessesTaken = 0 print('Hello!", "< number: #\"if\" statement part of for statement body. print('Your guess is too", "break except: print('That\\'s not a number, silly! Sorry, but that counts as a", "statement part of for statement body. print('Your guess is too low.') #Eight spaces", "but that counts as a guess.') continue if guess == number: guessesTaken =", "in front of \"print\" guess = input() guess = int(guess) if guess <", "+ 1) print('Good job, ' + myName + '! You guessed my number", "number = str(number) print('Nope. The number I was thinking of was ' +", "for guessesTaken in range(6): try: print('Take a guess.') #Four spaces in front of", "#Eight spaces in front of \"print\" if guess > number: print('Your guess is", "if guess < number: #\"if\" statement part of for statement body. print('Your guess", "= 0 print('Hello! What is your name?') myName = input() number = random.randint(1,", "low.') #Eight spaces in front of \"print\" if guess > number: print('Your guess", "high') if guess == number: break except: print('That\\'s not a number, silly! Sorry,", "that counts as a guess.') continue if guess == number: guessesTaken = str(guessesTaken", "print('Good job, ' + myName + '! You guessed my number in '", "of \"print\" if guess > number: print('Your guess is too high') if guess", "print('Take a guess.') #Four spaces in front of \"print\" guess = input() guess", "' + guessesTaken + ' guesses!') if guess != number: number = str(number)", "1) print('Good job, ' + myName + '! You guessed my number in", "you guess it in six tries? :)') for guessesTaken in range(6): try: print('Take", "your name?') myName = input() number = random.randint(1, 20) print('Well, ' + myName", "random.randint(1, 20) print('Well, ' + myName + ', I am thinking of a", "a number, silly! Sorry, but that counts as a guess.') continue if guess", "guess is too low.') #Eight spaces in front of \"print\" if guess >", "number: number = str(number) print('Nope. The number I was thinking of was '", "continue if guess == number: guessesTaken = str(guessesTaken + 1) print('Good job, '", "except: print('That\\'s not a number, silly! Sorry, but that counts as a guess.')", "print('That\\'s not a number, silly! Sorry, but that counts as a guess.') continue", "number: guessesTaken = str(guessesTaken + 1) print('Good job, ' + myName + '!", "import random guessesTaken = 0 print('Hello! What is your name?') myName = input()", "#Four spaces in front of \"print\" guess = input() guess = int(guess) if", "spaces in front of \"print\" if guess > number: print('Your guess is too", "guess it in six tries? :)') for guessesTaken in range(6): try: print('Take a", "my number in ' + guessesTaken + ' guesses!') if guess != number:", "print('Nope. The number I was thinking of was ' + number + '.')", "guess < number: #\"if\" statement part of for statement body. print('Your guess is", "a guess.') #Four spaces in front of \"print\" guess = input() guess =", "between 1 and 20. Can you guess it in six tries? :)') for", "= input() number = random.randint(1, 20) print('Well, ' + myName + ', I", "too high') if guess == number: break except: print('That\\'s not a number, silly!", "+ '! You guessed my number in ' + guessesTaken + ' guesses!')", "+ myName + '! You guessed my number in ' + guessesTaken +", "statement body. print('Your guess is too low.') #Eight spaces in front of \"print\"", "Can you guess it in six tries? :)') for guessesTaken in range(6): try:", "if guess == number: guessesTaken = str(guessesTaken + 1) print('Good job, ' +", "in range(6): try: print('Take a guess.') #Four spaces in front of \"print\" guess", "guess is too high') if guess == number: break except: print('That\\'s not a", "input() guess = int(guess) if guess < number: #\"if\" statement part of for", "guess.') #Four spaces in front of \"print\" guess = input() guess = int(guess)" ]
[ "import unittest from letter_capitalize import LetterCapitalize class TestWordCapitalize(unittest.TestCase): def test_word_capitalize(self): self.assertEqual(LetterCapitalize(\"hello world\"), \"Hello", "from letter_capitalize import LetterCapitalize class TestWordCapitalize(unittest.TestCase): def test_word_capitalize(self): self.assertEqual(LetterCapitalize(\"hello world\"), \"Hello World\") if", "letter_capitalize import LetterCapitalize class TestWordCapitalize(unittest.TestCase): def test_word_capitalize(self): self.assertEqual(LetterCapitalize(\"hello world\"), \"Hello World\") if __name__", "import LetterCapitalize class TestWordCapitalize(unittest.TestCase): def test_word_capitalize(self): self.assertEqual(LetterCapitalize(\"hello world\"), \"Hello World\") if __name__ ==", "class TestWordCapitalize(unittest.TestCase): def test_word_capitalize(self): self.assertEqual(LetterCapitalize(\"hello world\"), \"Hello World\") if __name__ == '__main__': unittest.main()", "unittest from letter_capitalize import LetterCapitalize class TestWordCapitalize(unittest.TestCase): def test_word_capitalize(self): self.assertEqual(LetterCapitalize(\"hello world\"), \"Hello World\")", "LetterCapitalize class TestWordCapitalize(unittest.TestCase): def test_word_capitalize(self): self.assertEqual(LetterCapitalize(\"hello world\"), \"Hello World\") if __name__ == '__main__':" ]
[ "and the following disclaimer in the documentation # and/or other materials provided with", "ISO date string and compare it to the expected value. \"\"\" if expectation", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES", "15), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), ( \"1985102T1015Z\", dt.datetime(1985, 4, 12,", "[ ( \"19850412T1015\", dt.datetime(1985, 4, 12, 10, 15), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_MINUTE,", "format, output): \"\"\" Parse an ISO date string and compare it to the", "without # modification, are permitted provided that the following conditions are met: #", "in binary form must reproduce the above copyright notice, # this list of", "TORT ############################################################################## \"\"\" Test cases for the isodatetime module. \"\"\" import datetime as", "\".%f\" + TZ_BAS, \"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12, 8, 29, 46,", "All rights reserved. # # Redistribution and use in source and binary forms,", "\".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12, 8, 29, 46,", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "expectation @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_format(datetime_string, expectation, format, output): \"\"\" Take", "691780, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ),", "retain the above copyright notice, # this list of conditions and the following", "None means an ISO8601Error # is expected. TEST_CASES = [ ( \"19850412T1015\", dt.datetime(1985,", "PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR", "THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "8, 55, 22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" +", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\" + TIME_BAS_MINUTE", "+ TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12, 10, 15,", "NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY", "DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\",", "authors nor the names of its contributors # may be used to endorse", "DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR, UTC, FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime,", "TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE +", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "\"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE", "CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #", "the name of the authors nor the names of its contributors # may", "4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE +", "# and/or other materials provided with the distribution. # * Neither the name", "of conditions and the following disclaimer. # * Redistributions in binary form must", "\"19850412T1015\", dt.datetime(1985, 4, 12, 10, 15), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_MINUTE, \"19850412T1015\", ),", "BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF", "# CONTRACT, STRICT LIABILITY, OR TORT ############################################################################## \"\"\" Test cases for the isodatetime", "\"\"\" Parse an ISO date string and compare it to the expected value.", "), ( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE +", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #", "+ TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 691780,", "its contributors # may be used to endorse or promote products derived from", "LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", ") # the following list contains tuples of ISO datetime strings and the", "\"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\", dt.datetime(1985,", "This is the reverse test to test_parse. \"\"\" if expectation is None: with", "COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "* Redistributions in binary form must reproduce the above copyright notice, # this", "\"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12, 10,", "name of the authors nor the names of its contributors # may be", "( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\"", "2009, <NAME> # All rights reserved. # # Redistribution and use in source", "the distribution. # * Neither the name of the authors nor the names", "29, 46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS,", "+ \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012,", "+ \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE + \"T\"", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "the expected value. \"\"\" if expectation is None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result", "\"1985102T1015Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\" + TIME_BAS_MINUTE +", "DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR, UTC, FixedOffset,", "to the expected value. \"\"\" if expectation is None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else:", "this list of conditions and the following disclaimer. # * Redistributions in binary", "0, \"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\",", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR", "BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS", "and create ISO string from it. This is the reverse test to test_parse.", "4, 12, 10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\",", "TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "with or without # modification, are permitted provided that the following conditions are", "WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT ############################################################################## \"\"\" Test cases for", "tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), (", "conditions and the following disclaimer. # * Redistributions in binary form must reproduce", "+ TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30,", "TEST_CASES) def test_parse(datetime_string, expectation, format, output): \"\"\" Parse an ISO date string and", "means an ISO8601Error # is expected. TEST_CASES = [ ( \"19850412T1015\", dt.datetime(1985, 4,", "EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #", "\"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE", "BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL", "), ( \"1985W155T1015+0400\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE +", "it. This is the reverse test to test_parse. \"\"\" if expectation is None:", "# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS", "Take date object and create ISO string from it. This is the reverse", "dt.datetime(1985, 4, 12, 10, 15), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_MINUTE, \"19850412T1015\", ), (", "following disclaimer. # * Redistributions in binary form must reproduce the above copyright", "+ \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012,", "+ \"T\" + TIME_BAS_MINUTE, \"19850412T1015\", ), ( \"1985-04-12T10:15\", dt.datetime(1985, 4, 12, 10, 15),", "TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4,", "OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT ############################################################################## \"\"\" Test", "), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE +", "names of its contributors # may be used to endorse or promote products", "# the following list contains tuples of ISO datetime strings and the expected", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO", "OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "def test_format(datetime_string, expectation, format, output): \"\"\" Take date object and create ISO string", "15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\", ),", "list of conditions and the following disclaimer. # * Redistributions in binary form", "used to endorse or promote products derived from this software # without specific", "output\", TEST_CASES) def test_format(datetime_string, expectation, format, output): \"\"\" Take date object and create", "rights reserved. # # Redistribution and use in source and binary forms, with", "an ISO8601Error # is expected. TEST_CASES = [ ( \"19850412T1015\", dt.datetime(1985, 4, 12,", "+ TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\", dt.datetime(1985, 4, 12, 10, 15,", "( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\"", "DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string,", "\"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18 14:55:22.123456Z\", None,", "or promote products derived from this software # without specific prior written permission.", "permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "datetime as dt import pytest from isodate import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE,", "the following conditions are met: # # * Redistributions of source code must", "25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_COMPLETE + \".%f\" + TZ_BAS, \"20110410T101225.123000Z\",", "), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE +", "that the following conditions are met: # # * Redistributions of source code", "reproduce the above copyright notice, # this list of conditions and the following", "the documentation # and/or other materials provided with the distribution. # * Neither", "STRICT LIABILITY, OR TORT ############################################################################## \"\"\" Test cases for the isodatetime module. \"\"\"", "promote products derived from this software # without specific prior written permission. #", "+ \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\", dt.datetime(2011, 4, 10,", "+ TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12, 10, 15,", "the expected # result from the parse_datetime method. A result of None means", "ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT", "dt.datetime(2012, 10, 30, 8, 55, 22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "of source code must retain the above copyright notice, # this list of", "TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4,", "TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "parse_datetime(datetime_string) assert result == expectation @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_format(datetime_string, expectation,", "+ \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18 14:55:22.123456Z\",", "TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR, UTC, FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime, ) # the following", "Redistributions of source code must retain the above copyright notice, # this list", "DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "( \"1985W155T1015+0400\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\"", "A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER", "( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\"", "Parse an ISO date string and compare it to the expected value. \"\"\"", "of ISO datetime strings and the expected # result from the parse_datetime method.", "or without # modification, are permitted provided that the following conditions are met:", "dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE", "without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE", "datetime strings and the expected # result from the parse_datetime method. A result", "\"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10,", "Test cases for the isodatetime module. \"\"\" import datetime as dt import pytest", "( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\"", "46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\",", "documentation # and/or other materials provided with the distribution. # * Neither the", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "\"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10,", "( \"19850412T1015\", dt.datetime(1985, 4, 12, 10, 15), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_MINUTE, \"19850412T1015\",", "value. \"\"\" if expectation is None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result = parse_datetime(datetime_string)", "an ISO date string and compare it to the expected value. \"\"\" if", "\"\"\" if expectation is None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result = parse_datetime(datetime_string) assert", "copyright notice, # this list of conditions and the following disclaimer in the", "def test_parse(datetime_string, expectation, format, output): \"\"\" Parse an ISO date string and compare", "from it. This is the reverse test to test_parse. \"\"\" if expectation is", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN", "result == expectation @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_format(datetime_string, expectation, format, output):", "of its contributors # may be used to endorse or promote products derived", "string and compare it to the expected value. \"\"\" if expectation is None:", "tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\", ), (", "\"\"\" Take date object and create ISO string from it. This is the", "TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12, 8,", "TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(-4,", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING,", "( \"20110410T101225.123000Z\", dt.datetime(2011, 4, 10, 10, 12, 25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE + \"T\"", "AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR", "PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "= parse_datetime(datetime_string) assert result == expectation @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_format(datetime_string,", "this list of conditions and the following disclaimer in the documentation # and/or", "of conditions and the following disclaimer in the documentation # and/or other materials", "8, 29, 46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" +", "are permitted provided that the following conditions are met: # # * Redistributions", "\"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE", "+ TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123456,", "as dt import pytest from isodate import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE,", "\"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" +", "10, 12, 8, 29, 46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE +", "list of conditions and the following disclaimer in the documentation # and/or other", "15, tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\", ),", "+ TZ_BAS, \"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 45,", "and use in source and binary forms, with or without # modification, are", "# Copyright 2009, <NAME> # All rights reserved. # # Redistribution and use", "dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE", "distribution. # * Neither the name of the authors nor the names of", "with pytest.raises(AttributeError): datetime_isoformat(expectation, format) else: result = datetime_isoformat(expectation, format) assert result == output", "import datetime as dt import pytest from isodate import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE,", "# this list of conditions and the following disclaimer in the documentation #", "DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR, UTC, FixedOffset, ISO8601Error,", "provided that the following conditions are met: # # * Redistributions of source", "55, 22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS,", "10, 10, 12, 25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_COMPLETE + \".%f\"", "list contains tuples of ISO datetime strings and the expected # result from", "dt.datetime(2012, 10, 12, 8, 29, 46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE", "( \"1985102T1015Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\" + TIME_BAS_MINUTE", "output\", TEST_CASES) def test_parse(datetime_string, expectation, format, output): \"\"\" Parse an ISO date string", "the following list contains tuples of ISO datetime strings and the expected #", "), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE +", "expectation, format, output\", TEST_CASES) def test_format(datetime_string, expectation, format, output): \"\"\" Take date object", "<gh_stars>1-10 ############################################################################## # Copyright 2009, <NAME> # All rights reserved. # # Redistribution", "\"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985,", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "# * Redistributions of source code must retain the above copyright notice, #", "OR TORT ############################################################################## \"\"\" Test cases for the isodatetime module. \"\"\" import datetime", "+ \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30, 8, 55,", "dt.datetime(1985, 4, 12, 10, 15), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), (", "CONTRACT, STRICT LIABILITY, OR TORT ############################################################################## \"\"\" Test cases for the isodatetime module.", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT", "12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS,", "expectation is None: with pytest.raises(AttributeError): datetime_isoformat(expectation, format) else: result = datetime_isoformat(expectation, format) assert", "+ TZ_BAS, \"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0,", "10, 12, 8, 29, 46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE +", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT", "TIME_BAS_COMPLETE + \".%f\" + TZ_BAS, \"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12, 8,", "string from it. This is the reverse test to test_parse. \"\"\" if expectation", "10, 15, tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\",", "\"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\", dt.datetime(2011, 4, 10, 10,", "123456, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ),", "10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\", ), (", "tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\", ), (", "tuples of ISO datetime strings and the expected # result from the parse_datetime", "12, 10, 15, tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT,", "<NAME> # All rights reserved. # # Redistribution and use in source and", "\"\"\" Test cases for the isodatetime module. \"\"\" import datetime as dt import", "30, 8, 55, 22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\"", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT,", "assert result == expectation @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_format(datetime_string, expectation, format,", "Redistribution and use in source and binary forms, with or without # modification,", "source and binary forms, with or without # modification, are permitted provided that", "BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED", "FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "test to test_parse. \"\"\" if expectation is None: with pytest.raises(AttributeError): datetime_isoformat(expectation, format) else:", "22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\",", "from isodate import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE,", "-30, \"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\",", "TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES)", "12, 10, 15), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_MINUTE, \"19850412T1015\", ), ( \"1985-04-12T10:15\", dt.datetime(1985,", "PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "############################################################################## \"\"\" Test cases for the isodatetime module. \"\"\" import datetime as dt", "DATE_BAS_WEEK_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4,", "and compare it to the expected value. \"\"\" if expectation is None: with", "from this software # without specific prior written permission. # # THIS SOFTWARE", "TZ_HOUR, \"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(-4, -30, \"-0430\")),", "DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR, UTC, FixedOffset, ISO8601Error, datetime_isoformat,", "10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\",", "if expectation is None: with pytest.raises(AttributeError): datetime_isoformat(expectation, format) else: result = datetime_isoformat(expectation, format)", "4, 10, 10, 12, 25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_COMPLETE +", "parse_datetime, ) # the following list contains tuples of ISO datetime strings and", "ISO string from it. This is the reverse test to test_parse. \"\"\" if", "permitted provided that the following conditions are met: # # * Redistributions of", "dt.datetime(2012, 10, 30, 8, 55, 22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE", "expectation, format, output): \"\"\" Parse an ISO date string and compare it to", "module. \"\"\" import datetime as dt import pytest from isodate import ( DATE_BAS_COMPLETE,", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "may be used to endorse or promote products derived from this software #", "software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED", "123000, tzinfo=UTC), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_COMPLETE + \".%f\" + TZ_BAS, \"20110410T101225.123000Z\", ),", "\".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30, 8, 55, 22,", "ISO8601Error, datetime_isoformat, parse_datetime, ) # the following list contains tuples of ISO datetime", "forms, with or without # modification, are permitted provided that the following conditions", "\"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\", dt.datetime(1985, 4, 12, 10,", "TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123456, tzinfo=UTC),", "contains tuples of ISO datetime strings and the expected # result from the", "# # * Redistributions of source code must retain the above copyright notice,", "above copyright notice, # this list of conditions and the following disclaimer in", "\"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\", dt.datetime(2011,", "), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE +", "DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18", "in source and binary forms, with or without # modification, are permitted provided", "output): \"\"\" Parse an ISO date string and compare it to the expected", "29, 46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS,", "+ TIME_BAS_MINUTE, \"19850412T1015\", ), ( \"1985-04-12T10:15\", dt.datetime(1985, 4, 12, 10, 15), DATE_EXT_COMPLETE +", "the following disclaimer in the documentation # and/or other materials provided with the", "tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), (", "# # Redistribution and use in source and binary forms, with or without", "notice, # this list of conditions and the following disclaimer. # * Redistributions", "( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\"", "method. A result of None means an ISO8601Error # is expected. TEST_CASES =", "15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\", ),", "TZ_BAS, \"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 45, \"+04:45\")),", "55, 22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS,", "compare it to the expected value. \"\"\" if expectation is None: with pytest.raises(ISO8601Error):", "TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123457, tzinfo=UTC),", "import pytest from isodate import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE,", "15), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_MINUTE, \"19850412T1015\", ), ( \"1985-04-12T10:15\", dt.datetime(1985, 4, 12,", "), ( \"1985-102T10:15Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\" +", "TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30, 8,", "\"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE", "DATE_BAS_COMPLETE + \"T\" + TIME_BAS_MINUTE, \"19850412T1015\", ), ( \"1985-04-12T10:15\", dt.datetime(1985, 4, 12, 10,", "tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), (", "parse_datetime(datetime_string) else: result = parse_datetime(datetime_string) assert result == expectation @pytest.mark.parametrize(\"datetime_string, expectation, format, output\",", "TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30, 8,", "None, DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ]", "+ TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string, expectation, format, output\",", "strings and the expected # result from the parse_datetime method. A result of", "DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR, UTC,", "+ \"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), ( \"1985102T1015Z\", dt.datetime(1985, 4, 12, 10, 15,", "tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), (", "TZ_BAS, \"1985102T1015Z\", ), ( \"1985-102T10:15Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE +", "), ( \"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" +", "prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "DATE_BAS_COMPLETE + \"T\" + TIME_BAS_COMPLETE + \".%f\" + TZ_BAS, \"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\",", "TZ_EXT, \"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\", dt.datetime(2011, 4, 10, 10, 12, 25, 123000, tzinfo=UTC),", "DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR,", "tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\", ), (", "IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR", "\"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\" +", "12, 10, 15, tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_BAS,", "4, 12, 10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\",", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED.", "OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "4, 12, 10, 15, tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE +", "+ TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12, 10, 15,", "to endorse or promote products derived from this software # without specific prior", "UTC, FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime, ) # the following list contains tuples of", "TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 691780, tzinfo=UTC),", "TZ_HOUR, UTC, FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime, ) # the following list contains tuples", "OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE", "OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "derived from this software # without specific prior written permission. # # THIS", "\"1985-102T10:15Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\" + TIME_EXT_MINUTE +", "the names of its contributors # may be used to endorse or promote", "LIABILITY, OR TORT ############################################################################## \"\"\" Test cases for the isodatetime module. \"\"\" import", "INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "use in source and binary forms, with or without # modification, are permitted", "SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT,", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "result = parse_datetime(datetime_string) assert result == expectation @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def", "\"T\" + TIME_BAS_COMPLETE + \".%f\" + TZ_BAS, \"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10,", "Neither the name of the authors nor the names of its contributors #", "15, tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\", ),", "must reproduce the above copyright notice, # this list of conditions and the", "10, 30, 8, 55, 22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE +", "# All rights reserved. # # Redistribution and use in source and binary", "expected value. \"\"\" if expectation is None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result =", "following conditions are met: # # * Redistributions of source code must retain", "12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_HOUR,", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE", "be used to endorse or promote products derived from this software # without", "disclaimer in the documentation # and/or other materials provided with the distribution. #", "# this list of conditions and the following disclaimer. # * Redistributions in", "\"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10,", "the parse_datetime method. A result of None means an ISO8601Error # is expected.", "A result of None means an ISO8601Error # is expected. TEST_CASES = [", "\"19850412T1015\", ), ( \"1985-04-12T10:15\", dt.datetime(1985, 4, 12, 10, 15), DATE_EXT_COMPLETE + \"T\" +", "( \"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS,", "\"T\" + TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12, 10,", "\"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\"", "DATE_EXT_ORD_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\", dt.datetime(1985, 4,", "and the expected # result from the parse_datetime method. A result of None", "\"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE", "following list contains tuples of ISO datetime strings and the expected # result", "+ TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), ( \"1985102T1015Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE", "( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\"", "None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result = parse_datetime(datetime_string) assert result == expectation @pytest.mark.parametrize(\"datetime_string,", "8, 29, 46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" +", "DATE_EXT_COMPLETE + \"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), ( \"1985102T1015Z\", dt.datetime(1985, 4, 12, 10,", "and/or other materials provided with the distribution. # * Neither the name of", "* Neither the name of the authors nor the names of its contributors", "ISO datetime strings and the expected # result from the parse_datetime method. A", "disclaimer. # * Redistributions in binary form must reproduce the above copyright notice,", "SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "12, 8, 29, 46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\"", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION)", "+ TZ_EXT, \"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\", dt.datetime(2011, 4, 10, 10, 12, 25, 123000,", "+ TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_parse(datetime_string, expectation,", "expected. TEST_CASES = [ ( \"19850412T1015\", dt.datetime(1985, 4, 12, 10, 15), DATE_BAS_COMPLETE +", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "other materials provided with the distribution. # * Neither the name of the", "Copyright 2009, <NAME> # All rights reserved. # # Redistribution and use in", "and the following disclaimer. # * Redistributions in binary form must reproduce the", "# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #", "\".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_parse(datetime_string,", "TEST_CASES = [ ( \"19850412T1015\", dt.datetime(1985, 4, 12, 10, 15), DATE_BAS_COMPLETE + \"T\"", "the above copyright notice, # this list of conditions and the following disclaimer", "\"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\" +", "DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\",", "dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS,", "\"T\" + TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12, 10,", "12, 8, 29, 46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\"", "output): \"\"\" Take date object and create ISO string from it. This is", "TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_parse(datetime_string, expectation, format,", "TZ_BAS, TZ_EXT, TZ_HOUR, UTC, FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime, ) # the following list", "@pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_parse(datetime_string, expectation, format, output): \"\"\" Parse an", "# is expected. TEST_CASES = [ ( \"19850412T1015\", dt.datetime(1985, 4, 12, 10, 15),", "products derived from this software # without specific prior written permission. # #", "+ TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\", ), ( \"1985-102T10:15Z\", dt.datetime(1985, 4, 12, 10, 15,", "69178, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ),", "date object and create ISO string from it. This is the reverse test", "must retain the above copyright notice, # this list of conditions and the", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "pytest from isodate import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE,", "+ \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12,", "TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE +", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY", "+ \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\", dt.datetime(1985, 4, 12,", "# without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY", "is None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result = parse_datetime(datetime_string) assert result == expectation", "# result from the parse_datetime method. A result of None means an ISO8601Error", "\"T\" + TIME_BAS_MINUTE, \"19850412T1015\", ), ( \"1985-04-12T10:15\", dt.datetime(1985, 4, 12, 10, 15), DATE_EXT_COMPLETE", "it to the expected value. \"\"\" if expectation is None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string)", "object and create ISO string from it. This is the reverse test to", "AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL", "TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\", dt.datetime(2011, 4, 10, 10, 12, 25,", "+ \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string, expectation,", "\"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\",", "\".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE + \"T\" +", "test_format(datetime_string, expectation, format, output): \"\"\" Take date object and create ISO string from", "dt.datetime(2012, 10, 12, 8, 29, 46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE", "TZ_EXT, \"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")),", "the above copyright notice, # this list of conditions and the following disclaimer.", "of the authors nor the names of its contributors # may be used", "to test_parse. \"\"\" if expectation is None: with pytest.raises(AttributeError): datetime_isoformat(expectation, format) else: result", "and binary forms, with or without # modification, are permitted provided that the", "conditions and the following disclaimer in the documentation # and/or other materials provided", "tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\", ), ( \"1985-102T10:15Z\", dt.datetime(1985,", "TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\", ), ( \"1985-102T10:15Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC),", "DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\",", "45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\",", "4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\" + TIME_BAS_MINUTE +", "+ \".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "\"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE", "\"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" +", "\"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\", ), ( \"1985-102T10:15Z\", dt.datetime(1985, 4, 12, 10,", "TZ_BAS, \"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")),", "\"\"\" import datetime as dt import pytest from isodate import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE,", "10, 15), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), ( \"1985102T1015Z\", dt.datetime(1985, 4,", "import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS,", "DATE_BAS_ORD_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\", ), ( \"1985-102T10:15Z\", dt.datetime(1985, 4,", "12, 10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\", ),", "TEST_CASES) def test_format(datetime_string, expectation, format, output): \"\"\" Take date object and create ISO", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED", "expected # result from the parse_datetime method. A result of None means an", "+ \"T\" + TIME_BAS_COMPLETE + \".%f\" + TZ_BAS, \"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012,", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND", "+ \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\", ), ( \"1985-102T10:15Z\", dt.datetime(1985, 4, 12,", "\"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985,", "modification, are permitted provided that the following conditions are met: # # *", "are met: # # * Redistributions of source code must retain the above", "OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "provided with the distribution. # * Neither the name of the authors nor", "datetime_isoformat, parse_datetime, ) # the following list contains tuples of ISO datetime strings", "+ \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30, 8, 55,", "+ TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE", "test_parse(datetime_string, expectation, format, output): \"\"\" Parse an ISO date string and compare it", "14:55:22.123456Z\", None, DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ),", "else: result = parse_datetime(datetime_string) assert result == expectation @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES)", "# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES", "OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON", "30, 8, 55, 22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\"", "TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR, UTC, FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime, ) #", "\"\"\" if expectation is None: with pytest.raises(AttributeError): datetime_isoformat(expectation, format) else: result = datetime_isoformat(expectation,", "dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT,", "+ TIME_BAS_COMPLETE + \".%f\" + TZ_BAS, \"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12,", "tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\", dt.datetime(1985,", "4, 12, 10, 15), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), ( \"1985102T1015Z\",", "15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\",", "\"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 69178, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" +", "isodate import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE,", "+ \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012,", "12, 10, 15), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), ( \"1985102T1015Z\", dt.datetime(1985,", "PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS", "), ( \"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE +", "LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "contributors # may be used to endorse or promote products derived from this", "\"1985102T1015Z\", ), ( \"1985-102T10:15Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\"", "with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result = parse_datetime(datetime_string) assert result == expectation @pytest.mark.parametrize(\"datetime_string, expectation,", "0, \"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\", ), ( \"1985-W15-5T10:15+04\",", "10, 12, 25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_COMPLETE + \".%f\" +", "+ \"T\" + TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12,", "), ( \"1985102T1015Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\" +", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES;", "TIME_BAS_MINUTE, \"19850412T1015\", ), ( \"1985-04-12T10:15\", dt.datetime(1985, 4, 12, 10, 15), DATE_EXT_COMPLETE + \"T\"", "10, 15, tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\",", "code must retain the above copyright notice, # this list of conditions and", "+ TZ_BAS, \"2012-10-30T08:55:22.123456Z\", ), ( \"2014-08-18 14:55:22.123456Z\", None, DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE", "+ TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123457,", "\"1985-04-12T10:15\", dt.datetime(1985, 4, 12, 10, 15), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\", ),", "THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #", "met: # # * Redistributions of source code must retain the above copyright", "+ TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30,", "+ TZ_BAS, \"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 69178,", "format, output): \"\"\" Take date object and create ISO string from it. This", "+ TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\", dt.datetime(2011, 4, 10, 10, 12,", "EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT,", "( \"1985-04-12T10:15\", dt.datetime(1985, 4, 12, 10, 15), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\",", "\"1985-W15-5T10:15+04\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\" +", "Redistributions in binary form must reproduce the above copyright notice, # this list", "is expected. TEST_CASES = [ ( \"19850412T1015\", dt.datetime(1985, 4, 12, 10, 15), DATE_BAS_COMPLETE", "format, output\", TEST_CASES) def test_parse(datetime_string, expectation, format, output): \"\"\" Parse an ISO date", "DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4,", "\"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE", "# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "\"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_parse(datetime_string, expectation, format, output):", "IN # CONTRACT, STRICT LIABILITY, OR TORT ############################################################################## \"\"\" Test cases for the", "parse_datetime method. A result of None means an ISO8601Error # is expected. TEST_CASES", "( \"1985-102T10:15Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\" + TIME_EXT_MINUTE", "nor the names of its contributors # may be used to endorse or", "copyright notice, # this list of conditions and the following disclaimer. # *", "10, 15), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_MINUTE, \"19850412T1015\", ), ( \"1985-04-12T10:15\", dt.datetime(1985, 4,", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE #", "( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE, TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT,", "OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "), ( \"1985-04-12T10:15\", dt.datetime(1985, 4, 12, 10, 15), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_MINUTE,", "is the reverse test to test_parse. \"\"\" if expectation is None: with pytest.raises(AttributeError):", "12, 25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_COMPLETE + \".%f\" + TZ_BAS,", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "\"1985W155T1015+0400\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\" +", "\"2012-10-30T08:55:22.1234561Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" +", "if expectation is None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result = parse_datetime(datetime_string) assert result", "tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\", ), (", "4, 12, 10, 15), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_MINUTE, \"19850412T1015\", ), ( \"1985-04-12T10:15\",", "the following disclaimer. # * Redistributions in binary form must reproduce the above", "format, output\", TEST_CASES) def test_format(datetime_string, expectation, format, output): \"\"\" Take date object and", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE", "DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\", dt.datetime(2011, 4,", "TIME_BAS_COMPLETE, TIME_BAS_MINUTE, TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR, UTC, FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime, )", "\"20110410T101225.123000Z\", dt.datetime(2011, 4, 10, 10, 12, 25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE + \"T\" +", "( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE + \"T\"", "notice, # this list of conditions and the following disclaimer in the documentation", "# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE", "the reverse test to test_parse. \"\"\" if expectation is None: with pytest.raises(AttributeError): datetime_isoformat(expectation,", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", "\"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30, 8, 55, 22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE", "FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime, ) # the following list contains tuples of ISO", "+ \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12, 8, 29,", "12, 10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\", ),", "46, 691780, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\",", "TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4,", "dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE", "of None means an ISO8601Error # is expected. TEST_CASES = [ ( \"19850412T1015\",", "\".%f\" + TZ_BAS, \"2012-10-12T08:29:46.691780Z\", ), ( \"2012-10-30T08:55:22.1234567Z\", dt.datetime(2012, 10, 30, 8, 55, 22,", "# * Neither the name of the authors nor the names of its", "== expectation @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_format(datetime_string, expectation, format, output): \"\"\"", "USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY", "the authors nor the names of its contributors # may be used to", "dt.datetime(2011, 4, 10, 10, 12, 25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_COMPLETE", "\"1985-04-12T10:15\", ), ( \"1985102T1015Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\"", "reserved. # # Redistribution and use in source and binary forms, with or", "expectation, format, output): \"\"\" Take date object and create ISO string from it.", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "+ \".%f\" + TZ_BAS, \"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12, 8, 29,", "# Redistribution and use in source and binary forms, with or without #", "INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "reverse test to test_parse. \"\"\" if expectation is None: with pytest.raises(AttributeError): datetime_isoformat(expectation, format)", "), ] @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_parse(datetime_string, expectation, format, output): \"\"\"", "THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT ############################################################################## \"\"\"", "ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT ##############################################################################", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "materials provided with the distribution. # * Neither the name of the authors", "10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_EXT, \"1985-102T10:15Z\", ), (", "\"1985-W15-5T10:15+04:45\", ), ( \"20110410T101225.123000Z\", dt.datetime(2011, 4, 10, 10, 12, 25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE", "), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(-4, -30, \"-0430\")), DATE_EXT_WEEK_COMPLETE +", "cases for the isodatetime module. \"\"\" import datetime as dt import pytest from", "conditions are met: # # * Redistributions of source code must retain the", "endorse or promote products derived from this software # without specific prior written", "LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT ############################################################################## \"\"\" Test cases", "+ \"T\" + TIME_EXT_MINUTE + TZ_HOUR, \"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12,", "result from the parse_datetime method. A result of None means an ISO8601Error #", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), ( \"1985102T1015Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE +", "15, tzinfo=UTC), DATE_BAS_ORD_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985102T1015Z\", ), ( \"1985-102T10:15Z\",", "10, 30, 8, 55, 22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE +", "+ TZ_EXT, \"1985-102T10:15Z\", ), ( \"1985W155T1015+0400\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0,", "############################################################################## # Copyright 2009, <NAME> # All rights reserved. # # Redistribution and", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER", "\"T\" + TIME_EXT_MINUTE, \"1985-04-12T10:15\", ), ( \"1985102T1015Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC),", "for the isodatetime module. \"\"\" import datetime as dt import pytest from isodate", "ISO8601Error # is expected. TEST_CASES = [ ( \"19850412T1015\", dt.datetime(1985, 4, 12, 10,", "= [ ( \"19850412T1015\", dt.datetime(1985, 4, 12, 10, 15), DATE_BAS_COMPLETE + \"T\" +", "pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result = parse_datetime(datetime_string) assert result == expectation @pytest.mark.parametrize(\"datetime_string, expectation, format,", "tzinfo=UTC), DATE_BAS_COMPLETE + \"T\" + TIME_BAS_COMPLETE + \".%f\" + TZ_BAS, \"20110410T101225.123000Z\", ), (", "result of None means an ISO8601Error # is expected. TEST_CASES = [ (", "] @pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_parse(datetime_string, expectation, format, output): \"\"\" Parse", "binary forms, with or without # modification, are permitted provided that the following", "), ( \"20110410T101225.123000Z\", dt.datetime(2011, 4, 10, 10, 12, 25, 123000, tzinfo=UTC), DATE_BAS_COMPLETE +", "4, 12, 10, 15, tzinfo=FixedOffset(4, 45, \"+04:45\")), DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE +", "+ TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-12T08:29:46.069178Z\", ), ( \"2012-10-12T08:29:46.691780Z\", dt.datetime(2012, 10, 12,", "with the distribution. # * Neither the name of the authors nor the", "@pytest.mark.parametrize(\"datetime_string, expectation, format, output\", TEST_CASES) def test_format(datetime_string, expectation, format, output): \"\"\" Take date", "None: with pytest.raises(AttributeError): datetime_isoformat(expectation, format) else: result = datetime_isoformat(expectation, format) assert result ==", "+ TZ_HOUR, \"1985-W15-5T10:15+04\", ), ( \"1985-W15-5T10:15-0430\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(-4, -30,", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "the isodatetime module. \"\"\" import datetime as dt import pytest from isodate import", "binary form must reproduce the above copyright notice, # this list of conditions", "following disclaimer in the documentation # and/or other materials provided with the distribution.", "10, 15, tzinfo=FixedOffset(4, 0, \"+0400\")), DATE_BAS_WEEK_COMPLETE + \"T\" + TIME_BAS_MINUTE + TZ_BAS, \"1985W155T1015+0400\",", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY,", "in the documentation # and/or other materials provided with the distribution. # *", "FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE", "form must reproduce the above copyright notice, # this list of conditions and", "123457, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\", ),", "TIME_EXT_COMPLETE, TIME_EXT_MINUTE, TZ_BAS, TZ_EXT, TZ_HOUR, UTC, FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime, ) # the", "DATE_EXT_WEEK_COMPLETE + \"T\" + TIME_EXT_MINUTE + TZ_BAS, \"1985-W15-5T10:15-0430\", ), ( \"1985-W15-5T10:15+04:45\", dt.datetime(1985, 4,", "+ TZ_BAS, \"1985102T1015Z\", ), ( \"1985-102T10:15Z\", dt.datetime(1985, 4, 12, 10, 15, tzinfo=UTC), DATE_EXT_ORD_COMPLETE", "dt import pytest from isodate import ( DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE, DATE_BAS_WEEK_COMPLETE, DATE_EXT_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK_COMPLETE,", "TZ_EXT, TZ_HOUR, UTC, FixedOffset, ISO8601Error, datetime_isoformat, parse_datetime, ) # the following list contains", "this software # without specific prior written permission. # # THIS SOFTWARE IS", "22, 123457, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2012-10-30T08:55:22.123457Z\",", "\"T\" + TIME_EXT_COMPLETE + \".%f\" + TZ_BAS, \"2014-08-18T14:55:22.123456Z\", ), ] @pytest.mark.parametrize(\"datetime_string, expectation, format,", "expectation is None: with pytest.raises(ISO8601Error): parse_datetime(datetime_string) else: result = parse_datetime(datetime_string) assert result ==", "isodatetime module. \"\"\" import datetime as dt import pytest from isodate import (", "# modification, are permitted provided that the following conditions are met: # #", "written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "# * Redistributions in binary form must reproduce the above copyright notice, #", "TZ_BAS, \"20110410T101225.123000Z\", ), ( \"2012-10-12T08:29:46.069178Z\", dt.datetime(2012, 10, 12, 8, 29, 46, 69178, tzinfo=UTC),", "expectation, format, output\", TEST_CASES) def test_parse(datetime_string, expectation, format, output): \"\"\" Parse an ISO", "create ISO string from it. This is the reverse test to test_parse. \"\"\"", "is None: with pytest.raises(AttributeError): datetime_isoformat(expectation, format) else: result = datetime_isoformat(expectation, format) assert result", "source code must retain the above copyright notice, # this list of conditions", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF", "8, 55, 22, 123456, tzinfo=UTC), DATE_EXT_COMPLETE + \"T\" + TIME_EXT_COMPLETE + \".%f\" +", "test_parse. \"\"\" if expectation is None: with pytest.raises(AttributeError): datetime_isoformat(expectation, format) else: result =", "date string and compare it to the expected value. \"\"\" if expectation is", "above copyright notice, # this list of conditions and the following disclaimer. #", "specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "from the parse_datetime method. A result of None means an ISO8601Error # is", "* Redistributions of source code must retain the above copyright notice, # this", "# may be used to endorse or promote products derived from this software" ]
[ "if self._distr == 'poisson': #if reduction == 'none': # return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction)", "self.modules(): if isinstance(layer, nn.Linear): bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def", "__init__(self, input_dim, config): super(Decoder, self).__init__() config_decoder = json.loads(config.get(\"decoder\")) self._distr = config['distribution'] decoder_network =", "in dataloader: inputs = inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr == 'poisson':", "in dataloader: pred, mini_batch_log_densities = self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities) if", "layer in config_decoder: if layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu':", "= np.exp(np.array(parameters)) else: parameters = np.array(parameters) return parameters def _get_densities(self, dataloader): all_log_densities =", "log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth, predictions) accuracy =", "def forward(self, inputs): \"\"\" Forward propagation \"\"\" hidden_state = self.encoder_network(inputs) mean = self.read_mu(hidden_state)", "__init__(self, input_dim, config): super(Encoder, self).__init__() config_encoder = json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar =", "layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': encoder_network.append(nn.ReLU()) elif layer['type'] ==", "* self.decoder(z) else: return self.decoder(z) class VAE(nn.Module): \"\"\" VAE, x --> mu, log_sigma_sq", "self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) ) self.mu = None self.logvar = None self.precentile_threshold =", "#print(latent[index]) #input() return theta def _to_numpy(self, tensor): return tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta, inputs):", "torch.exp(logvar / 2) return mu + sigma * epsilon def forward(self, inputs): \"\"\"", "storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch + 1) % print_every == 0: epoch_time =", "| rec. {:.3f}'.format(f1, acc, prec, recall)) if (epoch + 1) % self._save_every ==", "minutes = round(total_time // 60) seconds = round(total_time % 60) return '{} min.,", "self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities) return all_log_densities def _evaluate_probability(self, inputs): self.eval() with torch.no_grad():", "= [] for inputs, targets in dataloader: pred, mini_batch_log_densities = self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets)))", "1) % self._save_every == 0: f1, acc, prec, recall, _, _ = self.evaluate(trainloader)", "= np.array(parameters) return parameters def _get_densities(self, dataloader): all_log_densities = [] for inputs, _", "log_densities = [] for inputs, targets in dataloader: pred, mini_batch_log_densities = self.predict(inputs) predictions.extend(pred)", "= json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim encoder_network =", "#!/usr/bin/python3 \"\"\" Pytorch Variational Autoendoder Network Implementation \"\"\" from itertools import chain import", "restore_model(self, filename, epoch): \"\"\" Retore the model parameters \"\"\" model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'],", "inputs): self.eval() with torch.no_grad(): inputs = inputs.to(self._device) logtheta = self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta,", "self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f:", "* logtheta + torch.exp(logtheta) def loglikelihood(self, reduction): \"\"\" Return the log-likelihood \"\"\" if", "precision, recall, log_densities, ground_truth def predict(self, inputs): \"\"\" Predict the class of the", "def _evaluate_probability(self, inputs): self.eval() with torch.no_grad(): inputs = inputs.to(self._device) logtheta = self.forward(inputs) log_likelihood", "bound) layer.bias.data.zero_() def forward(self, z): if self._distr == 'poisson': alpha = 0.5 *", "== 'poisson': alpha = 0.5 * self.read_alpha(z) return alpha * self.decoder(z) else: return", "self.decoder(z) else: return self.decoder(z) class VAE(nn.Module): \"\"\" VAE, x --> mu, log_sigma_sq -->", "functional as F from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score class Encoder(nn.Module): \"\"\"", "storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1. {:.3f} | acc. {:.3f} | prec.: {:.3f} | rec.", "self).__init__() config_encoder = json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim", "if self._distr == 'poisson': alpha = 0.5 * self.read_alpha(z) return alpha * self.decoder(z)", "= 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, z): if self._distr ==", "= config.getint('model', 'save_every') def parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self, mu, logvar): epsilon", "nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self): \"\"\" Xavier initialization \"\"\"", "#print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input() return theta def _to_numpy(self, tensor):", "self.cur_epoch + self.num_epochs): self.cur_epoch += 1 # temporary storage losses, kldivs, neglogliks =", "as F from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score class Encoder(nn.Module): \"\"\" Probabilistic", "encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim'))", "np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, inputs): \"\"\" Forward propagation \"\"\" hidden_state =", "[], '-logp(x|z)': [], 'precision': [], 'recall': [], 'log_densities': None, 'params': None } for", "Evaluate accuracy. \"\"\" self._find_threshold(dataloader) predictions = [] ground_truth = [] log_densities = []", "= self._sample_z(self.mu, self.logvar) theta = self._decoder(latent) #if torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index])", "(epoch + 1) % print_every == 0: epoch_time = self._get_time(start_time, time.time()) f1, acc,", "config_decoder: if layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': decoder_network.append(nn.ReLU()) elif", "self._find_threshold(dataloader) predictions = [] ground_truth = [] log_densities = [] for inputs, targets", "elif layer['type'] == 'relu': decoder_network.append(nn.ReLU()) elif layer['type'] == 'relu6': decoder_network.append(nn.ReLU6()) elif layer['type'] ==", "accuracy_score, precision_score, recall_score class Encoder(nn.Module): \"\"\" Probabilistic Encoder Return the mean and the", "self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint = { 'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint, model_path)", "config_encoder[0]['in_features'] = input_dim encoder_network = [] for layer in config_encoder: if layer['type'] ==", "self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {} | loss: {:.3f} | -logp(x|z): {:.3f} | kldiv:", "\"\"\" from itertools import chain import time import json import pickle import numpy", "acc, prec, recall, _, _ = self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader) storage['params'] =", "'n_epochs') self._optim = optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) ) self.mu = None self.logvar", "nn.PoissonNLLLoss(reduction=reduction) elif self._distr == 'bernoulli': return nn.BCELoss(reduction=reduction) else: raise ValueError('{} is not a", "'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': decoder_network.append(nn.ReLU()) elif layer['type'] == 'relu6': decoder_network.append(nn.ReLU6())", "# def _remove_spam(self, dataloader, data): # idx_to_remove = self._find_threshold(dataloader) # data.pop(idx_to_remove) # self._encoder.initialize_parameters()", "seconds) # def _remove_spam(self, dataloader, data): # idx_to_remove = self._find_threshold(dataloader) # data.pop(idx_to_remove) #", "def _remove_spam(self, dataloader, data): # idx_to_remove = self._find_threshold(dataloader) # data.pop(idx_to_remove) # self._encoder.initialize_parameters() #", "current_time): total_time = current_time - starting_time minutes = round(total_time // 60) seconds =", "self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any(): # index = np.where(np.isnan(log_likelihood)) # print(index)", "acc. {:.3f} | prec.: {:.3f} | rec. {:.3f}'.format(f1, acc, prec, recall)) if (epoch", "self._decoder = Decoder(input_dim, config['model']) self.num_epochs = config.getint('training', 'n_epochs') self._optim = optim.Adam( self.parameters(), lr=config.getfloat('training',", "log_sigma_sq) --> z --> x \"\"\" def __init__(self, input_dim, config, checkpoint_directory): super(VAE, self).__init__()", "a valid distribution'.format(self._distr)) def fit(self, trainloader, print_every=1): \"\"\" Train the neural network \"\"\"", "dataloader): self.eval() parameters = [] for inputs, _ in dataloader: inputs = inputs.to(self._device)", "config self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory = checkpoint_directory self._distr = config['model']['distribution'] self._device =", "= 0 self._save_every = config.getint('model', 'save_every') def parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self,", "-loglikelihood + kl_div loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if", "'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder = nn.Sequential(*decoder_network) if self._distr == 'poisson': self.read_alpha = nn.Sequential(", "= self._decoder(latent) #if torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index]))", "if self._distr == 'poisson': self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() ) self.initialize_parameters() def", "predict(self, inputs): \"\"\" Predict the class of the inputs \"\"\" log_density = self._evaluate_probability(inputs)", "= recall_score(ground_truth, predictions) return f1, accuracy, precision, recall, log_densities, ground_truth def predict(self, inputs):", "return parameters def _get_densities(self, dataloader): all_log_densities = [] for inputs, _ in dataloader:", "def forward(self, inputs): \"\"\" Forward propagation \"\"\" self.mu, self.logvar = self._encoder(inputs) latent =", "predictions) accuracy = accuracy_score(ground_truth, predictions) precision = precision_score(ground_truth, predictions) recall = recall_score(ground_truth, predictions)", "| -logp(x|z): {:.3f} | kldiv: {:.3f} | time: {}'.format( epoch + 1, storage['loss'][-1],", "self._decoder(latent) #if torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index])", "== log_likelihood.shape[0] return self._to_numpy(log_likelihood) def _find_threshold(self, dataloader): log_densities = self._get_densities(dataloader) lowest_density = np.argmin(log_densities)", "z --> x \"\"\" def __init__(self, input_dim, config, checkpoint_directory): super(VAE, self).__init__() self.config =", "neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch + 1) % print_every == 0: epoch_time", "layer.bias.data.zero_() def forward(self, inputs): \"\"\" Forward propagation \"\"\" hidden_state = self.encoder_network(inputs) mean =", "'kldiv': [], '-logp(x|z)': [], 'precision': [], 'recall': [], 'log_densities': None, 'params': None }", "= torch.exp(logvar / 2) return mu + sigma * epsilon def forward(self, inputs):", "import optim from torch.nn import functional as F from sklearn.metrics import f1_score, accuracy_score,", "self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr == 'poisson': parameters = np.exp(np.array(parameters)) else: parameters = np.array(parameters)", "import Variable from torch import nn from torch import optim from torch.nn import", "= json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim encoder_network = [] for layer in config_encoder: if", "torch.isnan(loglikelihood).any() kl_div = -0.5 * torch.sum(1 + self.logvar - self.mu.pow(2) - self.logvar.exp()) /", "return nn.PoissonNLLLoss(reduction=reduction) elif self._distr == 'bernoulli': return nn.BCELoss(reduction=reduction) else: raise ValueError('{} is not", "recall, log_densities, ground_truth def predict(self, inputs): \"\"\" Predict the class of the inputs", "== 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'],", "= self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0] assert not torch.isnan(loglikelihood).any() kl_div =", "torch.no_grad(): inputs = inputs.to(self._device) logtheta = self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any():", "log_likelihood = torch.sum(log_likelihood, 1) assert inputs.shape[0] == log_likelihood.shape[0] return self._to_numpy(log_likelihood) def _find_threshold(self, dataloader):", "the model parameters \"\"\" model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name, filename) checkpoint = torch.load(model_path)", "def _get_densities(self, dataloader): all_log_densities = [] for inputs, _ in dataloader: mini_batch_log_densities =", "= -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0] assert not torch.isnan(loglikelihood).any() kl_div = -0.5 * torch.sum(1", "acc, prec, recall)) if (epoch + 1) % self._save_every == 0: f1, acc,", "1 # temporary storage losses, kldivs, neglogliks = [], [], [] for inputs,", "self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif self._distr == 'bernoulli': return nn.BCELoss(reduction=reduction) else: raise ValueError('{} is", "z ~ q(z|x). The prior of x is assume to be normal(0, I).", "1) assert inputs.shape[0] == log_likelihood.shape[0] return self._to_numpy(log_likelihood) def _find_threshold(self, dataloader): log_densities = self._get_densities(dataloader)", "= self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time,", "sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score class Encoder(nn.Module): \"\"\" Probabilistic Encoder Return the", "= torch.randn(mu.size()) epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar / 2) return mu", "nn.Linear): bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, inputs): \"\"\"", "assert not torch.isnan(loglikelihood).any() kl_div = -0.5 * torch.sum(1 + self.logvar - self.mu.pow(2) -", "self._get_time(start_time, time.time()) f1, acc, prec, recall, _, _ = self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch:", "== 'poisson': parameters = np.exp(np.array(parameters)) else: parameters = np.array(parameters) return parameters def _get_densities(self,", "= None self.logvar = None self.precentile_threshold = config.getfloat('model', 'threshold') self.threshold = None self.cur_epoch", "def save_checkpoint(self, f1_score): \"\"\"Save model paramers under config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch,", "from torch.autograd import Variable from torch import nn from torch import optim from", "inputs): \"\"\" Forward propagation \"\"\" hidden_state = self.encoder_network(inputs) mean = self.read_mu(hidden_state) logvar =", "initialization \"\"\" for layer in self.modules(): if isinstance(layer, nn.Linear): bound = 1 /", "Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar / 2) return mu + sigma * epsilon", "parameters.extend(logtheta) if self._distr == 'poisson': parameters = np.exp(np.array(parameters)) else: parameters = np.array(parameters) return", "forward(self, z): if self._distr == 'poisson': alpha = 0.5 * self.read_alpha(z) return alpha", "= -self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any(): # index = np.where(np.isnan(log_likelihood)) # print(index) # index", "= self.read_logvar(hidden_state) return mean, logvar class Decoder(nn.Module): \"\"\" Decoder \"\"\" def __init__(self, input_dim,", "Encoder(nn.Module): \"\"\" Probabilistic Encoder Return the mean and the variance of z ~", "the variance of z ~ q(z|x). The prior of x is assume to", "== 'poisson': #if reduction == 'none': # return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif self._distr", "print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth, predictions) accuracy = accuracy_score(ground_truth, predictions) precision = precision_score(ground_truth, predictions)", "-- number of features Returns: (tensor, tensor) -- mean and variance of the", "optim from torch.nn import functional as F from sklearn.metrics import f1_score, accuracy_score, precision_score,", "self._find_threshold(dataloader) # data.pop(idx_to_remove) # self._encoder.initialize_parameters() # self._decoder.initialize_parameters() # self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5,", "mean = self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state) return mean, logvar class Decoder(nn.Module): \"\"\" Decoder", "bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, z): if self._distr", "\"\"\" Forward propagation \"\"\" self.mu, self.logvar = self._encoder(inputs) latent = self._sample_z(self.mu, self.logvar) theta", "features Returns: (tensor, tensor) -- mean and variance of the latent variable output", "self.mu, self.logvar = self._encoder(inputs) latent = self._sample_z(self.mu, self.logvar) theta = self._decoder(latent) #if torch.isnan(theta).any():", "_, _ = self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name),", "print(index) # index = index[0][0] # print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood, 1)", "def __init__(self, input_dim, config, checkpoint_directory): super(VAE, self).__init__() self.config = config self.model_name = '{}{}'.format(config['model']['name'],", "= f1_score(ground_truth, predictions) accuracy = accuracy_score(ground_truth, predictions) precision = precision_score(ground_truth, predictions) recall =", "self.encoder_network = nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def", "starting_time, current_time): total_time = current_time - starting_time minutes = round(total_time // 60) seconds", "'bernoulli': return nn.BCELoss(reduction=reduction) else: raise ValueError('{} is not a valid distribution'.format(self._distr)) def fit(self,", "#if np.isnan(log_likelihood).any(): # index = np.where(np.isnan(log_likelihood)) # print(index) # index = index[0][0] #", "= torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input() return theta def", "[], [] for inputs, _ in trainloader: self.train() inputs = inputs.to(self._device) logtheta =", "torch import nn from torch import optim from torch.nn import functional as F", "layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, inputs): \"\"\" Forward propagation \"\"\" hidden_state = self.encoder_network(inputs)", "import time import json import pickle import numpy as np import torch from", "return lowest_density def evaluate(self, dataloader): \"\"\" Evaluate accuracy. \"\"\" self._find_threshold(dataloader) predictions = []", "dataloader): \"\"\" Evaluate accuracy. \"\"\" self._find_threshold(dataloader) predictions = [] ground_truth = [] log_densities", "Train the neural network \"\"\" start_time = time.time() storage = { 'loss': [],", "model_path) def restore_model(self, filename, epoch): \"\"\" Retore the model parameters \"\"\" model_path =", "+ self.num_epochs): self.cur_epoch += 1 # temporary storage losses, kldivs, neglogliks = [],", "self.logvar = None self.precentile_threshold = config.getfloat('model', 'threshold') self.threshold = None self.cur_epoch = 0", "| acc. {:.3f} | prec.: {:.3f} | rec. {:.3f}'.format(f1, acc, prec, recall)) if", "under config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint = { 'model_state_dict': self.state_dict(),", "prec, recall)) if (epoch + 1) % self._save_every == 0: f1, acc, prec,", "betas=(0.5, 0.999)) # return data def _get_parameters(self, dataloader): self.eval() parameters = [] for", "Predict the class of the inputs \"\"\" log_density = self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int)", "# temporary storage losses, kldivs, neglogliks = [], [], [] for inputs, _", "| kldiv: {:.3f} | time: {}'.format( epoch + 1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time))", "Decoder(input_dim, config['model']) self.num_epochs = config.getint('training', 'n_epochs') self._optim = optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas'])", "= checkpoint_directory self._distr = config['model']['distribution'] self._device = config['model']['device'] self._encoder = Encoder(input_dim, config['model']) self._decoder", "1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, z): if self._distr == 'poisson':", "super(Encoder, self).__init__() config_encoder = json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] =", "the class of the inputs \"\"\" log_density = self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int) predictions[log_density", "self._distr = config['model']['distribution'] self._device = config['model']['device'] self._encoder = Encoder(input_dim, config['model']) self._decoder = Decoder(input_dim,", "time.time()) f1, acc, prec, recall, _, _ = self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {}", "torch.randn(mu.size()) epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar / 2) return mu +", "def parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self, mu, logvar): epsilon = torch.randn(mu.size()) epsilon", "ground_truth def predict(self, inputs): \"\"\" Predict the class of the inputs \"\"\" log_density", "Forward propagation \"\"\" hidden_state = self.encoder_network(inputs) mean = self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state) return", "[], 'precision': [], 'recall': [], 'log_densities': None, 'params': None } for epoch in", "None self.precentile_threshold = config.getfloat('model', 'threshold') self.threshold = None self.cur_epoch = 0 self._save_every =", "recall, _, _ = self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader) with", "lowest_density = np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density def evaluate(self, dataloader): \"\"\"", "if layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': encoder_network.append(nn.ReLU()) elif layer['type']", "encoder_network.append(nn.Tanh()) elif layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network =", "inputs): \"\"\" Forward propagation \"\"\" self.mu, self.logvar = self._encoder(inputs) latent = self._sample_z(self.mu, self.logvar)", "self.config = config self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory = checkpoint_directory self._distr = config['model']['distribution']", "= round(total_time // 60) seconds = round(total_time % 60) return '{} min., {}", "\"\"\" Retore the model parameters \"\"\" model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name, filename) checkpoint", "self.logvar - self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0] loss = -loglikelihood + kl_div loss.backward()", "return mean, logvar class Decoder(nn.Module): \"\"\" Decoder \"\"\" def __init__(self, input_dim, config): super(Decoder,", "in config_decoder: if layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': decoder_network.append(nn.ReLU())", "pred, mini_batch_log_densities = self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities)))", "class Decoder(nn.Module): \"\"\" Decoder \"\"\" def __init__(self, input_dim, config): super(Decoder, self).__init__() config_decoder =", "layer['out_features'])) elif layer['type'] == 'relu': encoder_network.append(nn.ReLU()) elif layer['type'] == 'tanh': encoder_network.append(nn.Tanh()) elif layer['type']", "= nn.Sequential(*decoder_network) if self._distr == 'poisson': self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() )", "return nn.BCELoss(reduction=reduction) else: raise ValueError('{} is not a valid distribution'.format(self._distr)) def fit(self, trainloader,", "self).__init__() config_decoder = json.loads(config.get(\"decoder\")) self._distr = config['distribution'] decoder_network = [] for layer in", "= [] for layer in config_encoder: if layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif", "_to_numpy(self, tensor): return tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta, inputs): return - inputs * logtheta", "_ = self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {} | loss: {:.3f} | -logp(x|z): {:.3f}", "recall_score class Encoder(nn.Module): \"\"\" Probabilistic Encoder Return the mean and the variance of", "1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1. {:.3f} | acc. {:.3f} | prec.: {:.3f}", "\"\"\" def __init__(self, input_dim, config): super(Decoder, self).__init__() config_decoder = json.loads(config.get(\"decoder\")) self._distr = config['distribution']", "{:.3f} | rec. {:.3f}'.format(f1, acc, prec, recall)) if (epoch + 1) % self._save_every", "+ 1) % print_every == 0: epoch_time = self._get_time(start_time, time.time()) f1, acc, prec,", "ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth, predictions) accuracy", "input_dim, config): super(Encoder, self).__init__() config_encoder = json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\"))", "#print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input() return theta def _to_numpy(self, tensor): return tensor.data.cpu().numpy()", "# index = np.where(np.isnan(log_likelihood)) # print(index) # index = index[0][0] # print(inputs[index,:]) #", "} for epoch in range(self.cur_epoch, self.cur_epoch + self.num_epochs): self.cur_epoch += 1 # temporary", "\"\"\" model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name, filename) checkpoint = torch.load(model_path) self.load_state_dict(checkpoint['model_state_dict']) self._optim.load_state_dict(checkpoint['optimizer_state_dict']) self.cur_epoch", "for layer in config_encoder: if layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] ==", "index[0][0] # print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood, 1) assert inputs.shape[0] == log_likelihood.shape[0]", "elif self._distr == 'bernoulli': return nn.BCELoss(reduction=reduction) else: raise ValueError('{} is not a valid", "self._save_every = config.getint('model', 'save_every') def parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self, mu, logvar):", "None } for epoch in range(self.cur_epoch, self.cur_epoch + self.num_epochs): self.cur_epoch += 1 #", "self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f: pickle.dump(storage,", "Implementation \"\"\" from itertools import chain import time import json import pickle import", "json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim encoder_network = [] for layer in config_encoder: if layer['type']", "{}'.format(self.logvar[index])) #print(latent[index]) #input() return theta def _to_numpy(self, tensor): return tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta,", "self.threshold = np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density def evaluate(self, dataloader): \"\"\" Evaluate accuracy. \"\"\"", "of x is assume to be normal(0, I). Arguments: input_dim {int} -- number", "epoch in range(self.cur_epoch, self.cur_epoch + self.num_epochs): self.cur_epoch += 1 # temporary storage losses,", "_ in dataloader: inputs = inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr ==", "\"\"\" Predict the class of the inputs \"\"\" log_density = self._evaluate_probability(inputs) predictions =", "inputs) #if np.isnan(log_likelihood).any(): # index = np.where(np.isnan(log_likelihood)) # print(index) # index = index[0][0]", "prec.: {:.3f} | rec. {:.3f}'.format(f1, acc, prec, recall)) if (epoch + 1) %", "# return data def _get_parameters(self, dataloader): self.eval() parameters = [] for inputs, _", "self.mu = None self.logvar = None self.precentile_threshold = config.getfloat('model', 'threshold') self.threshold = None", "'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint, model_path) def restore_model(self, filename, epoch): \"\"\" Retore", "= -0.5 * torch.sum(1 + self.logvar - self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0] loss", "mu, logvar): epsilon = torch.randn(mu.size()) epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar /", "return self._to_numpy(log_likelihood) def _find_threshold(self, dataloader): log_densities = self._get_densities(dataloader) lowest_density = np.argmin(log_densities) self.threshold =", "data): # idx_to_remove = self._find_threshold(dataloader) # data.pop(idx_to_remove) # self._encoder.initialize_parameters() # self._decoder.initialize_parameters() # self._optim", "losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch + 1) % print_every ==", "'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': encoder_network.append(nn.ReLU()) elif layer['type'] == 'tanh': encoder_network.append(nn.Tanh())", "return alpha * self.decoder(z) else: return self.decoder(z) class VAE(nn.Module): \"\"\" VAE, x -->", "# self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999)) # return data def _get_parameters(self, dataloader):", "== 'bernoulli': return nn.BCELoss(reduction=reduction) else: raise ValueError('{} is not a valid distribution'.format(self._distr)) def", "# self._decoder.initialize_parameters() # self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999)) # return data def", "epsilon = torch.randn(mu.size()) epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar / 2) return", "#index = torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input() return theta", "'relu6': decoder_network.append(nn.ReLU6()) elif layer['type'] == 'tanh': decoder_network.append(nn.Tanh()) elif layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid()) elif", "inputs, _ in dataloader: mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities) return all_log_densities", "for layer in self.modules(): if isinstance(layer, nn.Linear): bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound,", "list(predictions), log_density def save_checkpoint(self, f1_score): \"\"\"Save model paramers under config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format(", "self.logvar = self._encoder(inputs) latent = self._sample_z(self.mu, self.logvar) theta = self._decoder(latent) #if torch.isnan(theta).any(): #index", "-self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any(): # index = np.where(np.isnan(log_likelihood)) # print(index) # index =", "Autoendoder Network Implementation \"\"\" from itertools import chain import time import json import", "class of the inputs \"\"\" log_density = self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int) predictions[log_density <", "[], 'kldiv': [], '-logp(x|z)': [], 'precision': [], 'recall': [], 'log_densities': None, 'params': None", "np.isnan(log_likelihood).any(): # index = np.where(np.isnan(log_likelihood)) # print(index) # index = index[0][0] # print(inputs[index,:])", "self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth,", "'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif", "= Encoder(input_dim, config['model']) self._decoder = Decoder(input_dim, config['model']) self.num_epochs = config.getint('training', 'n_epochs') self._optim =", "self._optim.state_dict() } torch.save(checkpoint, model_path) def restore_model(self, filename, epoch): \"\"\" Retore the model parameters", "layer['type'] == 'tanh': encoder_network.append(nn.Tanh()) elif layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm':", "open('./results/{}.pkl'.format(self.model_name), 'wb') as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time, current_time): total_time =", "# return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif self._distr == 'bernoulli': return nn.BCELoss(reduction=reduction) else: raise", "def __init__(self, input_dim, config): super(Encoder, self).__init__() config_encoder = json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar", "elif layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder =", "np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth, predictions) accuracy = accuracy_score(ground_truth, predictions) precision", "nn from torch import optim from torch.nn import functional as F from sklearn.metrics", "recall)) if (epoch + 1) % self._save_every == 0: f1, acc, prec, recall,", "config['model']['device'] self._encoder = Encoder(input_dim, config['model']) self._decoder = Decoder(input_dim, config['model']) self.num_epochs = config.getint('training', 'n_epochs')", "for epoch in range(self.cur_epoch, self.cur_epoch + self.num_epochs): self.cur_epoch += 1 # temporary storage", "assert inputs.shape[0] == log_likelihood.shape[0] return self._to_numpy(log_likelihood) def _find_threshold(self, dataloader): log_densities = self._get_densities(dataloader) lowest_density", "60) return '{} min., {} sec.'.format(minutes, seconds) # def _remove_spam(self, dataloader, data): #", "total_time = current_time - starting_time minutes = round(total_time // 60) seconds = round(total_time", "\"\"\"Save model paramers under config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint =", "/ np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, inputs): \"\"\" Forward propagation \"\"\" hidden_state", "variance of z ~ q(z|x). The prior of x is assume to be", ") self.mu = None self.logvar = None self.precentile_threshold = config.getfloat('model', 'threshold') self.threshold =", "{}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input() return theta def _to_numpy(self, tensor): return tensor.data.cpu().numpy() def", "= np.where(np.isnan(log_likelihood)) # print(index) # index = index[0][0] # print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood", "= self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {} | loss: {:.3f} | -logp(x|z): {:.3f} |", "'none': # return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif self._distr == 'bernoulli': return nn.BCELoss(reduction=reduction) else:", "'lr'), betas=json.loads(config['training']['betas']) ) self.mu = None self.logvar = None self.precentile_threshold = config.getfloat('model', 'threshold')", "{:.3f} | kldiv: {:.3f} | time: {}'.format( epoch + 1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1],", "self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self): \"\"\" Xavier initialization \"\"\" for layer", "= nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() ) self.initialize_parameters() def initialize_parameters(self): for layer in self.modules():", "layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder = nn.Sequential(*decoder_network) if self._distr == 'poisson': self.read_alpha", "= config self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory = checkpoint_directory self._distr = config['model']['distribution'] self._device", "1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, inputs): \"\"\" Forward propagation \"\"\"", "inputs.shape[0] == log_likelihood.shape[0] return self._to_numpy(log_likelihood) def _find_threshold(self, dataloader): log_densities = self._get_densities(dataloader) lowest_density =", "< self.threshold] = 1 #if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions), log_density def", "'poisson': alpha = 0.5 * self.read_alpha(z) return alpha * self.decoder(z) else: return self.decoder(z)", "self.precentile_threshold) return lowest_density def evaluate(self, dataloader): \"\"\" Evaluate accuracy. \"\"\" self._find_threshold(dataloader) predictions =", "the latent variable output from the forward propagation \"\"\" def __init__(self, input_dim, config):", "'{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory = checkpoint_directory self._distr = config['model']['distribution'] self._device = config['model']['device'] self._encoder =", "np.array(parameters) return parameters def _get_densities(self, dataloader): all_log_densities = [] for inputs, _ in", "hidden_state = self.encoder_network(inputs) mean = self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state) return mean, logvar class", "= np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density def evaluate(self, dataloader): \"\"\" Evaluate accuracy. \"\"\" self._find_threshold(dataloader)", "optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) ) self.mu = None self.logvar = None self.precentile_threshold", "fit(self, trainloader, print_every=1): \"\"\" Train the neural network \"\"\" start_time = time.time() storage", "{}'.format( epoch + 1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1. {:.3f} | acc. {:.3f}", "(epoch + 1) % self._save_every == 0: f1, acc, prec, recall, _, _", "log_density = self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold] = 1 #if np.isnan(log_density).any():", "model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name, filename) checkpoint = torch.load(model_path) self.load_state_dict(checkpoint['model_state_dict']) self._optim.load_state_dict(checkpoint['optimizer_state_dict']) self.cur_epoch =", "elif layer['type'] == 'relu6': decoder_network.append(nn.ReLU6()) elif layer['type'] == 'tanh': decoder_network.append(nn.Tanh()) elif layer['type'] ==", "'loss': [], 'kldiv': [], '-logp(x|z)': [], 'precision': [], 'recall': [], 'log_densities': None, 'params':", "json import pickle import numpy as np import torch from torch.autograd import Variable", "not torch.isnan(loglikelihood).any() kl_div = -0.5 * torch.sum(1 + self.logvar - self.mu.pow(2) - self.logvar.exp())", "self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999)) # return data def _get_parameters(self, dataloader): self.eval()", "/ np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, z): if self._distr == 'poisson': alpha", "= inputs.to(self._device) logtheta = self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0] assert not", "_remove_spam(self, dataloader, data): # idx_to_remove = self._find_threshold(dataloader) # data.pop(idx_to_remove) # self._encoder.initialize_parameters() # self._decoder.initialize_parameters()", "self._to_numpy(log_likelihood) def _find_threshold(self, dataloader): log_densities = self._get_densities(dataloader) lowest_density = np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities,", "| loss: {:.3f} | -logp(x|z): {:.3f} | kldiv: {:.3f} | time: {}'.format( epoch", "#print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input() return theta def _to_numpy(self, tensor): return", "[] for inputs, targets in dataloader: pred, mini_batch_log_densities = self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities)", "loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch + 1)", "predictions = np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold] = 1 #if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold)", "not a valid distribution'.format(self._distr)) def fit(self, trainloader, print_every=1): \"\"\" Train the neural network", "checkpoint = { 'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint, model_path) def restore_model(self, filename,", "nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() ) self.initialize_parameters() def initialize_parameters(self): for layer in self.modules(): if isinstance(layer,", "input_dim {int} -- number of features Returns: (tensor, tensor) -- mean and variance", "layer['type'] == 'tanh': decoder_network.append(nn.Tanh()) elif layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type'] == 'dropout':", "= np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density def evaluate(self, dataloader): \"\"\" Evaluate", "torch.save(checkpoint, model_path) def restore_model(self, filename, epoch): \"\"\" Retore the model parameters \"\"\" model_path", "self._decoder.initialize_parameters() # self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999)) # return data def _get_parameters(self,", "class Encoder(nn.Module): \"\"\" Probabilistic Encoder Return the mean and the variance of z", "in self.modules(): if isinstance(layer, nn.Linear): bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_()", "- self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0] loss = -loglikelihood + kl_div loss.backward() self._optim.step()", "nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self): \"\"\" Xavier initialization \"\"\" for layer in self.modules():", "epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar / 2) return mu + sigma", "config['model']['config_id']) self.checkpoint_directory = checkpoint_directory self._distr = config['model']['distribution'] self._device = config['model']['device'] self._encoder = Encoder(input_dim,", "storage['params'] = self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def _get_time(self,", "return data def _get_parameters(self, dataloader): self.eval() parameters = [] for inputs, _ in", "loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0] assert not torch.isnan(loglikelihood).any() kl_div = -0.5 *", "ground_truth = [] log_densities = [] for inputs, targets in dataloader: pred, mini_batch_log_densities", "= config.getint('training', 'n_epochs') self._optim = optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) ) self.mu =", "model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint = { 'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict()", "storage losses, kldivs, neglogliks = [], [], [] for inputs, _ in trainloader:", "for inputs, _ in trainloader: self.train() inputs = inputs.to(self._device) logtheta = self.forward(inputs) loglikelihood", "= None self.cur_epoch = 0 self._save_every = config.getint('model', 'save_every') def parameters(self): return chain(self._encoder.parameters(),", "input_dim, config): super(Decoder, self).__init__() config_decoder = json.loads(config.get(\"decoder\")) self._distr = config['distribution'] decoder_network = []", "--> N(mu, log_sigma_sq) --> z --> x \"\"\" def __init__(self, input_dim, config, checkpoint_directory):", "epoch_time)) print('F1. {:.3f} | acc. {:.3f} | prec.: {:.3f} | rec. {:.3f}'.format(f1, acc,", "\"\"\" hidden_state = self.encoder_network(inputs) mean = self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state) return mean, logvar", "mini_batch_log_densities = self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1", "nn.Sequential(*decoder_network) if self._distr == 'poisson': self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() ) self.initialize_parameters()", "variance of the latent variable output from the forward propagation \"\"\" def __init__(self,", "temporary storage losses, kldivs, neglogliks = [], [], [] for inputs, _ in", "config_decoder = json.loads(config.get(\"decoder\")) self._distr = config['distribution'] decoder_network = [] for layer in config_decoder:", "inputs.shape[0] loss = -loglikelihood + kl_div loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses))", "_, _ = self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {} | loss: {:.3f} | -logp(x|z):", "- starting_time minutes = round(total_time // 60) seconds = round(total_time % 60) return", "with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time, current_time): total_time", "f1 = f1_score(ground_truth, predictions) accuracy = accuracy_score(ground_truth, predictions) precision = precision_score(ground_truth, predictions) recall", "* self.read_alpha(z) return alpha * self.decoder(z) else: return self.decoder(z) class VAE(nn.Module): \"\"\" VAE,", "layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder = nn.Sequential(*decoder_network)", "for inputs, _ in dataloader: inputs = inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if", "| time: {}'.format( epoch + 1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1. {:.3f} |", "self._get_densities(dataloader) lowest_density = np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density def evaluate(self, dataloader):", "ValueError('{} is not a valid distribution'.format(self._distr)) def fit(self, trainloader, print_every=1): \"\"\" Train the", "for layer in config_decoder: if layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] ==", "self.eval() with torch.no_grad(): inputs = inputs.to(self._device) logtheta = self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs)", "F from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score class Encoder(nn.Module): \"\"\" Probabilistic Encoder", "x is assume to be normal(0, I). Arguments: input_dim {int} -- number of", "layer['type'] == 'relu': decoder_network.append(nn.ReLU()) elif layer['type'] == 'relu6': decoder_network.append(nn.ReLU6()) elif layer['type'] == 'tanh':", "storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {} | loss: {:.3f} | -logp(x|z): {:.3f} | kldiv: {:.3f}", "None self.cur_epoch = 0 self._save_every = config.getint('model', 'save_every') def parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters())", "itertools import chain import time import json import pickle import numpy as np", "storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch + 1) % print_every == 0: epoch_time = self._get_time(start_time, time.time())", "propagation \"\"\" def __init__(self, input_dim, config): super(Encoder, self).__init__() config_encoder = json.loads(config.get(\"encoder\")) config_read_mu =", "poisson_cross_entropy(self, logtheta, inputs): return - inputs * logtheta + torch.exp(logtheta) def loglikelihood(self, reduction):", "# print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood, 1) assert inputs.shape[0] == log_likelihood.shape[0] return self._to_numpy(log_likelihood) def", "def fit(self, trainloader, print_every=1): \"\"\" Train the neural network \"\"\" start_time = time.time()", "lr=self.lr, betas=(0.5, 0.999)) # return data def _get_parameters(self, dataloader): self.eval() parameters = []", "self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint, model_path) def restore_model(self, filename, epoch): \"\"\" Retore the", "import json import pickle import numpy as np import torch from torch.autograd import", "layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar =", "print('epoch: {} | loss: {:.3f} | -logp(x|z): {:.3f} | kldiv: {:.3f} | time:", "f1, acc, prec, recall, _, _ = self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader) storage['params']", "self._encoder.initialize_parameters() # self._decoder.initialize_parameters() # self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999)) # return data", "print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood, 1) assert inputs.shape[0] == log_likelihood.shape[0] return self._to_numpy(log_likelihood) def _find_threshold(self,", "\"\"\" for layer in self.modules(): if isinstance(layer, nn.Linear): bound = 1 / np.sqrt(layer.in_features)", "self.cur_epoch, f1_score) checkpoint = { 'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint, model_path) def", "logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr == 'poisson': parameters = np.exp(np.array(parameters)) else: parameters", "== 'relu': encoder_network.append(nn.ReLU()) elif layer['type'] == 'tanh': encoder_network.append(nn.Tanh()) elif layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate']))", "= optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999)) # return data def _get_parameters(self, dataloader): self.eval() parameters", "= 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, inputs): \"\"\" Forward propagation", "alpha = 0.5 * self.read_alpha(z) return alpha * self.decoder(z) else: return self.decoder(z) class", "x --> mu, log_sigma_sq --> N(mu, log_sigma_sq) --> z --> x \"\"\" def", "[] log_densities = [] for inputs, targets in dataloader: pred, mini_batch_log_densities = self.predict(inputs)", "if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth, predictions) accuracy = accuracy_score(ground_truth, predictions) precision =", "= inputs.to(self._device) logtheta = self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any(): # index", "forward propagation \"\"\" def __init__(self, input_dim, config): super(Encoder, self).__init__() config_encoder = json.loads(config.get(\"encoder\")) config_read_mu", "'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim))", "filename, epoch): \"\"\" Retore the model parameters \"\"\" model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name,", "+ 1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1. {:.3f} | acc. {:.3f} | prec.:", "= self._get_time(start_time, time.time()) f1, acc, prec, recall, _, _ = self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall)", "elif layer['type'] == 'tanh': encoder_network.append(nn.Tanh()) elif layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] ==", "= config['distribution'] decoder_network = [] for layer in config_decoder: if layer['type'] == 'linear':", "json.loads(config.get(\"decoder\")) self._distr = config['distribution'] decoder_network = [] for layer in config_decoder: if layer['type']", "decoder_network = [] for layer in config_decoder: if layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features']))", "config_encoder: if layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': encoder_network.append(nn.ReLU()) elif", "_get_densities(self, dataloader): all_log_densities = [] for inputs, _ in dataloader: mini_batch_log_densities = self._evaluate_probability(inputs)", "prior of x is assume to be normal(0, I). Arguments: input_dim {int} --", "print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood, 1) assert inputs.shape[0] == log_likelihood.shape[0] return self._to_numpy(log_likelihood)", "_get_parameters(self, dataloader): self.eval() parameters = [] for inputs, _ in dataloader: inputs =", "recall = recall_score(ground_truth, predictions) return f1, accuracy, precision, recall, log_densities, ground_truth def predict(self,", "np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions), log_density def save_checkpoint(self, f1_score): \"\"\"Save model paramers", "= Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar / 2) return mu + sigma *", "and the variance of z ~ q(z|x). The prior of x is assume", "evaluate(self, dataloader): \"\"\" Evaluate accuracy. \"\"\" self._find_threshold(dataloader) predictions = [] ground_truth = []", "the forward propagation \"\"\" def __init__(self, input_dim, config): super(Encoder, self).__init__() config_encoder = json.loads(config.get(\"encoder\"))", "#print(self.threshold) return list(predictions), log_density def save_checkpoint(self, f1_score): \"\"\"Save model paramers under config['model_path']\"\"\" model_path", "= config.getfloat('model', 'threshold') self.threshold = None self.cur_epoch = 0 self._save_every = config.getint('model', 'save_every')", "from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score class Encoder(nn.Module): \"\"\" Probabilistic Encoder Return", "torch from torch.autograd import Variable from torch import nn from torch import optim", "= '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint = { 'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() }", "if (epoch + 1) % print_every == 0: epoch_time = self._get_time(start_time, time.time()) f1,", "_f, pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time, current_time): total_time = current_time - starting_time minutes =", "seconds = round(total_time % 60) return '{} min., {} sec.'.format(minutes, seconds) # def", "#if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions), log_density def save_checkpoint(self, f1_score): \"\"\"Save model", "pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time, current_time): total_time = current_time - starting_time minutes", "= np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold] = 1 #if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return", "loss: {:.3f} | -logp(x|z): {:.3f} | kldiv: {:.3f} | time: {}'.format( epoch +", "#print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input() return theta def _to_numpy(self, tensor): return tensor.data.cpu().numpy() def poisson_cross_entropy(self,", "return - inputs * logtheta + torch.exp(logtheta) def loglikelihood(self, reduction): \"\"\" Return the", "kl_div = -0.5 * torch.sum(1 + self.logvar - self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0]", "self.cur_epoch = 0 self._save_every = config.getint('model', 'save_every') def parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters()) def", "_ = self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb')", "return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif self._distr == 'bernoulli': return nn.BCELoss(reduction=reduction) else: raise ValueError('{}", "torch.nn import functional as F from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score class", "= self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold] = 1 #if np.isnan(log_density).any(): #", "self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold] = 1 #if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))])", "'params': None } for epoch in range(self.cur_epoch, self.cur_epoch + self.num_epochs): self.cur_epoch += 1", "'poisson': parameters = np.exp(np.array(parameters)) else: parameters = np.array(parameters) return parameters def _get_densities(self, dataloader):", "self._encoder(inputs) latent = self._sample_z(self.mu, self.logvar) theta = self._decoder(latent) #if torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0]", "elif layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder = nn.Sequential(*decoder_network) if self._distr == 'poisson':", "N(mu, log_sigma_sq) --> z --> x \"\"\" def __init__(self, input_dim, config, checkpoint_directory): super(VAE,", "def loglikelihood(self, reduction): \"\"\" Return the log-likelihood \"\"\" if self._distr == 'poisson': #if", "import nn from torch import optim from torch.nn import functional as F from", "def initialize_parameters(self): for layer in self.modules(): if isinstance(layer, nn.Linear): bound = 1 /", "= self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any(): # index = np.where(np.isnan(log_likelihood)) #", "mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities) return all_log_densities def _evaluate_probability(self, inputs): self.eval()", "'poisson': self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() ) self.initialize_parameters() def initialize_parameters(self): for layer", "= None self.precentile_threshold = config.getfloat('model', 'threshold') self.threshold = None self.cur_epoch = 0 self._save_every", "\"\"\" VAE, x --> mu, log_sigma_sq --> N(mu, log_sigma_sq) --> z --> x", "-- mean and variance of the latent variable output from the forward propagation", "def _find_threshold(self, dataloader): log_densities = self._get_densities(dataloader) lowest_density = np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities, self.precentile_threshold)", "self.precentile_threshold = config.getfloat('model', 'threshold') self.threshold = None self.cur_epoch = 0 self._save_every = config.getint('model',", "logtheta = self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0] assert not torch.isnan(loglikelihood).any() kl_div", "= np.array(all_log_densities) return all_log_densities def _evaluate_probability(self, inputs): self.eval() with torch.no_grad(): inputs = inputs.to(self._device)", "np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, z): if self._distr == 'poisson': alpha =", "nn.ReLU6() ) self.initialize_parameters() def initialize_parameters(self): for layer in self.modules(): if isinstance(layer, nn.Linear): bound", "= time.time() storage = { 'loss': [], 'kldiv': [], '-logp(x|z)': [], 'precision': [],", "losses, kldivs, neglogliks = [], [], [] for inputs, _ in trainloader: self.train()", "f1, accuracy, precision, recall, log_densities, ground_truth def predict(self, inputs): \"\"\" Predict the class", "input_dim), nn.ReLU6() ) self.initialize_parameters() def initialize_parameters(self): for layer in self.modules(): if isinstance(layer, nn.Linear):", "<filename>vae/vae.py #!/usr/bin/python3 \"\"\" Pytorch Variational Autoendoder Network Implementation \"\"\" from itertools import chain", "inputs): return - inputs * logtheta + torch.exp(logtheta) def loglikelihood(self, reduction): \"\"\" Return", "np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density def evaluate(self, dataloader): \"\"\" Evaluate accuracy.", "/ 2) return mu + sigma * epsilon def forward(self, inputs): \"\"\" Forward", "} torch.save(checkpoint, model_path) def restore_model(self, filename, epoch): \"\"\" Retore the model parameters \"\"\"", "dataloader: pred, mini_batch_log_densities = self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities) if np.isnan(log_densities).any():", "== 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': decoder_network.append(nn.ReLU()) elif layer['type'] == 'relu6':", "self.logvar) theta = self._decoder(latent) #if torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index]))", "isinstance(layer, nn.Linear): bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, z):", "{ 'loss': [], 'kldiv': [], '-logp(x|z)': [], 'precision': [], 'recall': [], 'log_densities': None,", "[] for layer in config_encoder: if layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type']", "in range(self.cur_epoch, self.cur_epoch + self.num_epochs): self.cur_epoch += 1 # temporary storage losses, kldivs,", "--> mu, log_sigma_sq --> N(mu, log_sigma_sq) --> z --> x \"\"\" def __init__(self,", "{:.3f}'.format(f1, acc, prec, recall)) if (epoch + 1) % self._save_every == 0: f1,", "{} | loss: {:.3f} | -logp(x|z): {:.3f} | kldiv: {:.3f} | time: {}'.format(", "log_density def save_checkpoint(self, f1_score): \"\"\"Save model paramers under config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory,", "log_sigma_sq --> N(mu, log_sigma_sq) --> z --> x \"\"\" def __init__(self, input_dim, config,", "checkpoint_directory): super(VAE, self).__init__() self.config = config self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory = checkpoint_directory", "theta def _to_numpy(self, tensor): return tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta, inputs): return - inputs", "is assume to be normal(0, I). Arguments: input_dim {int} -- number of features", "distribution'.format(self._distr)) def fit(self, trainloader, print_every=1): \"\"\" Train the neural network \"\"\" start_time =", "all_log_densities = [] for inputs, _ in dataloader: mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities", "torch.autograd import Variable from torch import nn from torch import optim from torch.nn", "Returns: (tensor, tensor) -- mean and variance of the latent variable output from", "== 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network) self.read_mu =", "acc, prec, recall, _, _ = self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {} | loss:", "self._distr == 'poisson': alpha = 0.5 * self.read_alpha(z) return alpha * self.decoder(z) else:", "def restore_model(self, filename, epoch): \"\"\" Retore the model parameters \"\"\" model_path = '{}{}/{}.pt'.format(", "from torch.nn import functional as F from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score", "latent = self._sample_z(self.mu, self.logvar) theta = self._decoder(latent) #if torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0] #print(index)", "[] for inputs, _ in trainloader: self.train() inputs = inputs.to(self._device) logtheta = self.forward(inputs)", "= -loglikelihood + kl_div loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks))", "= '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory = checkpoint_directory self._distr = config['model']['distribution'] self._device = config['model']['device'] self._encoder", "model paramers under config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint = {", "self._distr == 'poisson': parameters = np.exp(np.array(parameters)) else: parameters = np.array(parameters) return parameters def", "kl_div loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch +", "mean and variance of the latent variable output from the forward propagation \"\"\"", "_find_threshold(self, dataloader): log_densities = self._get_densities(dataloader) lowest_density = np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities, self.precentile_threshold) return", "self.encoder_network(inputs) mean = self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state) return mean, logvar class Decoder(nn.Module): \"\"\"", "'log_densities': None, 'params': None } for epoch in range(self.cur_epoch, self.cur_epoch + self.num_epochs): self.cur_epoch", "= { 'loss': [], 'kldiv': [], '-logp(x|z)': [], 'precision': [], 'recall': [], 'log_densities':", "dataloader, data): # idx_to_remove = self._find_threshold(dataloader) # data.pop(idx_to_remove) # self._encoder.initialize_parameters() # self._decoder.initialize_parameters() #", "decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder = nn.Sequential(*decoder_network) if self._distr == 'poisson': self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'),", "with torch.no_grad(): inputs = inputs.to(self._device) logtheta = self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs) #if", "'tanh': encoder_network.append(nn.Tanh()) elif layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network", "def initialize_parameters(self): \"\"\" Xavier initialization \"\"\" for layer in self.modules(): if isinstance(layer, nn.Linear):", "# self._encoder.initialize_parameters() # self._decoder.initialize_parameters() # self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999)) # return", "Probabilistic Encoder Return the mean and the variance of z ~ q(z|x). The", "precision_score(ground_truth, predictions) recall = recall_score(ground_truth, predictions) return f1, accuracy, precision, recall, log_densities, ground_truth", "variable output from the forward propagation \"\"\" def __init__(self, input_dim, config): super(Encoder, self).__init__()", "inputs): \"\"\" Predict the class of the inputs \"\"\" log_density = self._evaluate_probability(inputs) predictions", "self.threshold = None self.cur_epoch = 0 self._save_every = config.getint('model', 'save_every') def parameters(self): return", "config): super(Encoder, self).__init__() config_encoder = json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features']", "'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint, model_path) def restore_model(self, filename, epoch): \"\"\" Retore the model", "layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm':", "None, 'params': None } for epoch in range(self.cur_epoch, self.cur_epoch + self.num_epochs): self.cur_epoch +=", "/ inputs.shape[0] assert not torch.isnan(loglikelihood).any() kl_div = -0.5 * torch.sum(1 + self.logvar -", "+ self.logvar - self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0] loss = -loglikelihood + kl_div", "return all_log_densities def _evaluate_probability(self, inputs): self.eval() with torch.no_grad(): inputs = inputs.to(self._device) logtheta =", "index = np.where(np.isnan(log_likelihood)) # print(index) # index = index[0][0] # print(inputs[index,:]) # print(logtheta[index,:])", "torch.sum(1 + self.logvar - self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0] loss = -loglikelihood +", "= self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL)", "self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0] loss = -loglikelihood + kl_div loss.backward() self._optim.step() self._optim.zero_grad()", "layer['type'] == 'relu': encoder_network.append(nn.ReLU()) elif layer['type'] == 'tanh': encoder_network.append(nn.Tanh()) elif layer['type'] == 'dropout':", "= [] for layer in config_decoder: if layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif", "predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth, predictions)", "None self.logvar = None self.precentile_threshold = config.getfloat('model', 'threshold') self.threshold = None self.cur_epoch =", "all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities) return all_log_densities def _evaluate_probability(self, inputs): self.eval() with torch.no_grad(): inputs", "config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim encoder_network = [] for", "sec.'.format(minutes, seconds) # def _remove_spam(self, dataloader, data): # idx_to_remove = self._find_threshold(dataloader) # data.pop(idx_to_remove)", "dataloader: inputs = inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr == 'poisson': parameters", "= nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self): \"\"\" Xavier initialization \"\"\" for layer in", "2) return mu + sigma * epsilon def forward(self, inputs): \"\"\" Forward propagation", "self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def", "\"\"\" Return the log-likelihood \"\"\" if self._distr == 'poisson': #if reduction == 'none':", "print('F1. {:.3f} | acc. {:.3f} | prec.: {:.3f} | rec. {:.3f}'.format(f1, acc, prec,", "'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'],", "config['distribution'] decoder_network = [] for layer in config_decoder: if layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'],", "{int} -- number of features Returns: (tensor, tensor) -- mean and variance of", "config_encoder = json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim encoder_network", "inputs) / inputs.shape[0] assert not torch.isnan(loglikelihood).any() kl_div = -0.5 * torch.sum(1 + self.logvar", "self._decoder.parameters()) def _sample_z(self, mu, logvar): epsilon = torch.randn(mu.size()) epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma", "the inputs \"\"\" log_density = self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold] =", "== 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder = nn.Sequential(*decoder_network) if self._distr == 'poisson': self.read_alpha =", "config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self): \"\"\" Xavier initialization \"\"\" for layer in self.modules(): if", "layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network) self.read_mu", "self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state) return mean, logvar class Decoder(nn.Module): \"\"\" Decoder \"\"\" def", "mu + sigma * epsilon def forward(self, inputs): \"\"\" Forward propagation \"\"\" self.mu,", "forward(self, inputs): \"\"\" Forward propagation \"\"\" self.mu, self.logvar = self._encoder(inputs) latent = self._sample_z(self.mu,", "config.getfloat('model', 'threshold') self.threshold = None self.cur_epoch = 0 self._save_every = config.getint('model', 'save_every') def", "tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta, inputs): return - inputs * logtheta + torch.exp(logtheta) def", "logtheta + torch.exp(logtheta) def loglikelihood(self, reduction): \"\"\" Return the log-likelihood \"\"\" if self._distr", "for inputs, targets in dataloader: pred, mini_batch_log_densities = self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities", "'recall': [], 'log_densities': None, 'params': None } for epoch in range(self.cur_epoch, self.cur_epoch +", "return tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta, inputs): return - inputs * logtheta + torch.exp(logtheta)", "config, checkpoint_directory): super(VAE, self).__init__() self.config = config self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory =", "inputs = inputs.to(self._device) logtheta = self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any(): #", "'tanh': decoder_network.append(nn.Tanh()) elif layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif", "0 self._save_every = config.getint('model', 'save_every') def parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self, mu,", "__init__(self, input_dim, config, checkpoint_directory): super(VAE, self).__init__() self.config = config self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id'])", "to be normal(0, I). Arguments: input_dim {int} -- number of features Returns: (tensor,", "loss = -loglikelihood + kl_div loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs))", "np.exp(np.array(parameters)) else: parameters = np.array(parameters) return parameters def _get_densities(self, dataloader): all_log_densities = []", "mean, logvar class Decoder(nn.Module): \"\"\" Decoder \"\"\" def __init__(self, input_dim, config): super(Decoder, self).__init__()", "tensor) -- mean and variance of the latent variable output from the forward", "import numpy as np import torch from torch.autograd import Variable from torch import", "lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) ) self.mu = None self.logvar = None self.precentile_threshold = config.getfloat('model',", "self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0] assert not torch.isnan(loglikelihood).any() kl_div = -0.5", "optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999)) # return data def _get_parameters(self, dataloader): self.eval() parameters =", "parameters = np.exp(np.array(parameters)) else: parameters = np.array(parameters) return parameters def _get_densities(self, dataloader): all_log_densities", "layer in config_encoder: if layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu':", "0.5 * self.read_alpha(z) return alpha * self.decoder(z) else: return self.decoder(z) class VAE(nn.Module): \"\"\"", "else: parameters = np.array(parameters) return parameters def _get_densities(self, dataloader): all_log_densities = [] for", "== 0: f1, acc, prec, recall, _, _ = self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] =", "inputs \"\"\" log_density = self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold] = 1", "Network Implementation \"\"\" from itertools import chain import time import json import pickle", "Decoder(nn.Module): \"\"\" Decoder \"\"\" def __init__(self, input_dim, config): super(Decoder, self).__init__() config_decoder = json.loads(config.get(\"decoder\"))", "layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': decoder_network.append(nn.ReLU()) elif layer['type'] ==", "'threshold') self.threshold = None self.cur_epoch = 0 self._save_every = config.getint('model', 'save_every') def parameters(self):", "self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch + 1) % print_every", "0.999)) # return data def _get_parameters(self, dataloader): self.eval() parameters = [] for inputs,", "Encoder(input_dim, config['model']) self._decoder = Decoder(input_dim, config['model']) self.num_epochs = config.getint('training', 'n_epochs') self._optim = optim.Adam(", "encoder_network = [] for layer in config_encoder: if layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features']))", "def poisson_cross_entropy(self, logtheta, inputs): return - inputs * logtheta + torch.exp(logtheta) def loglikelihood(self,", "= 0.5 * self.read_alpha(z) return alpha * self.decoder(z) else: return self.decoder(z) class VAE(nn.Module):", "nn.BCELoss(reduction=reduction) else: raise ValueError('{} is not a valid distribution'.format(self._distr)) def fit(self, trainloader, print_every=1):", "self.logvar.exp()) / inputs.shape[0] loss = -loglikelihood + kl_div loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div))", "as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time, current_time): total_time = current_time -", "_evaluate_probability(self, inputs): self.eval() with torch.no_grad(): inputs = inputs.to(self._device) logtheta = self.forward(inputs) log_likelihood =", "'{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint = { 'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint,", "\"\"\" Probabilistic Encoder Return the mean and the variance of z ~ q(z|x).", "time: {}'.format( epoch + 1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1. {:.3f} | acc.", "'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim'))", "'{} min., {} sec.'.format(minutes, seconds) # def _remove_spam(self, dataloader, data): # idx_to_remove =", "layer.bias.data.zero_() def forward(self, z): if self._distr == 'poisson': alpha = 0.5 * self.read_alpha(z)", "targets in dataloader: pred, mini_batch_log_densities = self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities)", "torch.sum(log_likelihood, 1) assert inputs.shape[0] == log_likelihood.shape[0] return self._to_numpy(log_likelihood) def _find_threshold(self, dataloader): log_densities =", "% 60) return '{} min., {} sec.'.format(minutes, seconds) # def _remove_spam(self, dataloader, data):", "decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder", "[], 'log_densities': None, 'params': None } for epoch in range(self.cur_epoch, self.cur_epoch + self.num_epochs):", "kldivs, neglogliks = [], [], [] for inputs, _ in trainloader: self.train() inputs", "config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self): \"\"\" Xavier initialization \"\"\" for", "+= 1 # temporary storage losses, kldivs, neglogliks = [], [], [] for", "forward(self, inputs): \"\"\" Forward propagation \"\"\" hidden_state = self.encoder_network(inputs) mean = self.read_mu(hidden_state) logvar", "{:.3f} | acc. {:.3f} | prec.: {:.3f} | rec. {:.3f}'.format(f1, acc, prec, recall))", "self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory = checkpoint_directory self._distr = config['model']['distribution'] self._device = config['model']['device']", "else: return self.decoder(z) class VAE(nn.Module): \"\"\" VAE, x --> mu, log_sigma_sq --> N(mu,", "* epsilon def forward(self, inputs): \"\"\" Forward propagation \"\"\" self.mu, self.logvar = self._encoder(inputs)", "checkpoint_directory self._distr = config['model']['distribution'] self._device = config['model']['device'] self._encoder = Encoder(input_dim, config['model']) self._decoder =", "Return the log-likelihood \"\"\" if self._distr == 'poisson': #if reduction == 'none': #", "{:.3f} | time: {}'.format( epoch + 1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1. {:.3f}", "# data.pop(idx_to_remove) # self._encoder.initialize_parameters() # self._decoder.initialize_parameters() # self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999))", "{} sec.'.format(minutes, seconds) # def _remove_spam(self, dataloader, data): # idx_to_remove = self._find_threshold(dataloader) #", "model parameters \"\"\" model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name, filename) checkpoint = torch.load(model_path) self.load_state_dict(checkpoint['model_state_dict'])", "layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] == 'read_x':", "60) seconds = round(total_time % 60) return '{} min., {} sec.'.format(minutes, seconds) #", "+ kl_div loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch", "return chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self, mu, logvar): epsilon = torch.randn(mu.size()) epsilon = Variable(epsilon,", "decoder_network.append(nn.ReLU()) elif layer['type'] == 'relu6': decoder_network.append(nn.ReLU6()) elif layer['type'] == 'tanh': decoder_network.append(nn.Tanh()) elif layer['type']", "requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar / 2) return mu + sigma * epsilon def", "// 60) seconds = round(total_time % 60) return '{} min., {} sec.'.format(minutes, seconds)", "= 1 #if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions), log_density def save_checkpoint(self, f1_score):", "~ q(z|x). The prior of x is assume to be normal(0, I). Arguments:", "% self._save_every == 0: f1, acc, prec, recall, _, _ = self.evaluate(trainloader) self.save_checkpoint(f1)", "from torch import optim from torch.nn import functional as F from sklearn.metrics import", "config['model']) self.num_epochs = config.getint('training', 'n_epochs') self._optim = optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) )", "print_every == 0: epoch_time = self._get_time(start_time, time.time()) f1, acc, prec, recall, _, _", "loglikelihood(self, reduction): \"\"\" Return the log-likelihood \"\"\" if self._distr == 'poisson': #if reduction", "self._distr == 'poisson': self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() ) self.initialize_parameters() def initialize_parameters(self):", "log-likelihood \"\"\" if self._distr == 'poisson': #if reduction == 'none': # return self.poisson_cross_entropy", "= self._find_threshold(dataloader) # data.pop(idx_to_remove) # self._encoder.initialize_parameters() # self._decoder.initialize_parameters() # self._optim = optim.Adam(self.parameters(), lr=self.lr,", "self.num_epochs): self.cur_epoch += 1 # temporary storage losses, kldivs, neglogliks = [], [],", "x \"\"\" def __init__(self, input_dim, config, checkpoint_directory): super(VAE, self).__init__() self.config = config self.model_name", "\"\"\" Train the neural network \"\"\" start_time = time.time() storage = { 'loss':", "propagation \"\"\" self.mu, self.logvar = self._encoder(inputs) latent = self._sample_z(self.mu, self.logvar) theta = self._decoder(latent)", "The prior of x is assume to be normal(0, I). Arguments: input_dim {int}", "self._distr == 'bernoulli': return nn.BCELoss(reduction=reduction) else: raise ValueError('{} is not a valid distribution'.format(self._distr))", "inputs * logtheta + torch.exp(logtheta) def loglikelihood(self, reduction): \"\"\" Return the log-likelihood \"\"\"", "= config['model']['device'] self._encoder = Encoder(input_dim, config['model']) self._decoder = Decoder(input_dim, config['model']) self.num_epochs = config.getint('training',", "pickle import numpy as np import torch from torch.autograd import Variable from torch", "kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch + 1) % print_every == 0:", "log_likelihood.shape[0] return self._to_numpy(log_likelihood) def _find_threshold(self, dataloader): log_densities = self._get_densities(dataloader) lowest_density = np.argmin(log_densities) self.threshold", "from itertools import chain import time import json import pickle import numpy as", "data.pop(idx_to_remove) # self._encoder.initialize_parameters() # self._decoder.initialize_parameters() # self._optim = optim.Adam(self.parameters(), lr=self.lr, betas=(0.5, 0.999)) #", "import pickle import numpy as np import torch from torch.autograd import Variable from", "parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self, mu, logvar): epsilon = torch.randn(mu.size()) epsilon =", "Return the mean and the variance of z ~ q(z|x). The prior of", "= nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self):", "= current_time - starting_time minutes = round(total_time // 60) seconds = round(total_time %", "_ in dataloader: mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities) return all_log_densities def", "predictions) return f1, accuracy, precision, recall, log_densities, ground_truth def predict(self, inputs): \"\"\" Predict", "config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint = { 'model_state_dict': self.state_dict(), 'optimizer_state_dict':", "def _to_numpy(self, tensor): return tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta, inputs): return - inputs *", "_f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time, current_time): total_time = current_time - starting_time", "[] for inputs, _ in dataloader: mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities)", "\"\"\" Decoder \"\"\" def __init__(self, input_dim, config): super(Decoder, self).__init__() config_decoder = json.loads(config.get(\"decoder\")) self._distr", "self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self): \"\"\" Xavier", "return self.decoder(z) class VAE(nn.Module): \"\"\" VAE, x --> mu, log_sigma_sq --> N(mu, log_sigma_sq)", "def forward(self, z): if self._distr == 'poisson': alpha = 0.5 * self.read_alpha(z) return", "# print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood, 1) assert inputs.shape[0] == log_likelihood.shape[0] return", "Arguments: input_dim {int} -- number of features Returns: (tensor, tensor) -- mean and", "== 'tanh': encoder_network.append(nn.Tanh()) elif layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features']))", "else: raise ValueError('{} is not a valid distribution'.format(self._distr)) def fit(self, trainloader, print_every=1): \"\"\"", "0: epoch_time = self._get_time(start_time, time.time()) f1, acc, prec, recall, _, _ = self.evaluate(trainloader)", "min., {} sec.'.format(minutes, seconds) # def _remove_spam(self, dataloader, data): # idx_to_remove = self._find_threshold(dataloader)", "[] for inputs, _ in dataloader: inputs = inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta)", "if isinstance(layer, nn.Linear): bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self,", "f1_score) checkpoint = { 'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint, model_path) def restore_model(self,", "accuracy = accuracy_score(ground_truth, predictions) precision = precision_score(ground_truth, predictions) recall = recall_score(ground_truth, predictions) return", "inputs, _ in dataloader: inputs = inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr", "\"\"\" self.mu, self.logvar = self._encoder(inputs) latent = self._sample_z(self.mu, self.logvar) theta = self._decoder(latent) #if", "\"\"\" start_time = time.time() storage = { 'loss': [], 'kldiv': [], '-logp(x|z)': [],", "valid distribution'.format(self._distr)) def fit(self, trainloader, print_every=1): \"\"\" Train the neural network \"\"\" start_time", "Xavier initialization \"\"\" for layer in self.modules(): if isinstance(layer, nn.Linear): bound = 1", "\"\"\" if self._distr == 'poisson': #if reduction == 'none': # return self.poisson_cross_entropy return", "the log-likelihood \"\"\" if self._distr == 'poisson': #if reduction == 'none': # return", "self._save_every == 0: f1, acc, prec, recall, _, _ = self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities']", "\"\"\" def __init__(self, input_dim, config): super(Encoder, self).__init__() config_encoder = json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\"))", "the neural network \"\"\" start_time = time.time() storage = { 'loss': [], 'kldiv':", "= precision_score(ground_truth, predictions) recall = recall_score(ground_truth, predictions) return f1, accuracy, precision, recall, log_densities,", "== 'tanh': decoder_network.append(nn.Tanh()) elif layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate']))", "= [] ground_truth = [] log_densities = [] for inputs, targets in dataloader:", "self._distr == 'poisson': #if reduction == 'none': # return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif", "Variational Autoendoder Network Implementation \"\"\" from itertools import chain import time import json", "layer['out_features'])) elif layer['type'] == 'relu': decoder_network.append(nn.ReLU()) elif layer['type'] == 'relu6': decoder_network.append(nn.ReLU6()) elif layer['type']", "z): if self._distr == 'poisson': alpha = 0.5 * self.read_alpha(z) return alpha *", "in dataloader: mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities) return all_log_densities def _evaluate_probability(self,", "logtheta = self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any(): # index = np.where(np.isnan(log_likelihood))", "% print_every == 0: epoch_time = self._get_time(start_time, time.time()) f1, acc, prec, recall, _,", "dataloader): all_log_densities = [] for inputs, _ in dataloader: mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities)", "propagation \"\"\" hidden_state = self.encoder_network(inputs) mean = self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state) return mean,", "precision_score, recall_score class Encoder(nn.Module): \"\"\" Probabilistic Encoder Return the mean and the variance", "self.initialize_parameters() def initialize_parameters(self): for layer in self.modules(): if isinstance(layer, nn.Linear): bound = 1", "= self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state) return mean, logvar class Decoder(nn.Module): \"\"\" Decoder \"\"\"", "config['model']['distribution'] self._device = config['model']['device'] self._encoder = Encoder(input_dim, config['model']) self._decoder = Decoder(input_dim, config['model']) self.num_epochs", "torch import optim from torch.nn import functional as F from sklearn.metrics import f1_score,", "= Decoder(input_dim, config['model']) self.num_epochs = config.getint('training', 'n_epochs') self._optim = optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'),", "torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input() return theta def _to_numpy(self,", "def predict(self, inputs): \"\"\" Predict the class of the inputs \"\"\" log_density =", "sigma * epsilon def forward(self, inputs): \"\"\" Forward propagation \"\"\" self.mu, self.logvar =", "elif layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] ==", "# index = index[0][0] # print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood, 1) assert", "return f1, accuracy, precision, recall, log_densities, ground_truth def predict(self, inputs): \"\"\" Predict the", "inputs = inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr == 'poisson': parameters =", "predictions) recall = recall_score(ground_truth, predictions) return f1, accuracy, precision, recall, log_densities, ground_truth def", "= self.encoder_network(inputs) mean = self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state) return mean, logvar class Decoder(nn.Module):", "== 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder = nn.Sequential(*decoder_network) if", "of the inputs \"\"\" log_density = self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold]", "save_checkpoint(self, f1_score): \"\"\"Save model paramers under config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score)", "logvar class Decoder(nn.Module): \"\"\" Decoder \"\"\" def __init__(self, input_dim, config): super(Decoder, self).__init__() config_decoder", "network \"\"\" start_time = time.time() storage = { 'loss': [], 'kldiv': [], '-logp(x|z)':", "elif layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar", "= json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim encoder_network = [] for layer", "epoch): \"\"\" Retore the model parameters \"\"\" model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name, filename)", "all_log_densities = np.array(all_log_densities) return all_log_densities def _evaluate_probability(self, inputs): self.eval() with torch.no_grad(): inputs =", "--> x \"\"\" def __init__(self, input_dim, config, checkpoint_directory): super(VAE, self).__init__() self.config = config", "accuracy, precision, recall, log_densities, ground_truth def predict(self, inputs): \"\"\" Predict the class of", "prec, recall, _, _ = self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader)", "encoder_network.append(nn.ReLU()) elif layer['type'] == 'tanh': encoder_network.append(nn.Tanh()) elif layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type']", "current_time - starting_time minutes = round(total_time // 60) seconds = round(total_time % 60)", "self.read_logvar(hidden_state) return mean, logvar class Decoder(nn.Module): \"\"\" Decoder \"\"\" def __init__(self, input_dim, config):", "epsilon def forward(self, inputs): \"\"\" Forward propagation \"\"\" self.mu, self.logvar = self._encoder(inputs) latent", "all_log_densities def _evaluate_probability(self, inputs): self.eval() with torch.no_grad(): inputs = inputs.to(self._device) logtheta = self.forward(inputs)", "VAE(nn.Module): \"\"\" VAE, x --> mu, log_sigma_sq --> N(mu, log_sigma_sq) --> z -->", "[] for layer in config_decoder: if layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type']", "= torch.sum(log_likelihood, 1) assert inputs.shape[0] == log_likelihood.shape[0] return self._to_numpy(log_likelihood) def _find_threshold(self, dataloader): log_densities", "numpy as np import torch from torch.autograd import Variable from torch import nn", "storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1. {:.3f} | acc. {:.3f} | prec.: {:.3f} |", "\"\"\" log_density = self._evaluate_probability(inputs) predictions = np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold] = 1 #if", "\"\"\" def __init__(self, input_dim, config, checkpoint_directory): super(VAE, self).__init__() self.config = config self.model_name =", "\"\"\" Forward propagation \"\"\" hidden_state = self.encoder_network(inputs) mean = self.read_mu(hidden_state) logvar = self.read_logvar(hidden_state)", "of the latent variable output from the forward propagation \"\"\" def __init__(self, input_dim,", "the mean and the variance of z ~ q(z|x). The prior of x", "inputs, _ in trainloader: self.train() inputs = inputs.to(self._device) logtheta = self.forward(inputs) loglikelihood =", "input_dim)) self.decoder = nn.Sequential(*decoder_network) if self._distr == 'poisson': self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim),", "lowest_density def evaluate(self, dataloader): \"\"\" Evaluate accuracy. \"\"\" self._find_threshold(dataloader) predictions = [] ground_truth", "{:.3f} | prec.: {:.3f} | rec. {:.3f}'.format(f1, acc, prec, recall)) if (epoch +", "= accuracy_score(ground_truth, predictions) precision = precision_score(ground_truth, predictions) recall = recall_score(ground_truth, predictions) return f1,", "self.threshold] = 1 #if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions), log_density def save_checkpoint(self,", "= index[0][0] # print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood, 1) assert inputs.shape[0] ==", "storage['recall'].append(recall) print('epoch: {} | loss: {:.3f} | -logp(x|z): {:.3f} | kldiv: {:.3f} |", "theta = self._decoder(latent) #if torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar:", "f1_score(ground_truth, predictions) accuracy = accuracy_score(ground_truth, predictions) precision = precision_score(ground_truth, predictions) recall = recall_score(ground_truth,", "torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input() return", "== 0: epoch_time = self._get_time(start_time, time.time()) f1, acc, prec, recall, _, _ =", "= inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr == 'poisson': parameters = np.exp(np.array(parameters))", "self.checkpoint_directory = checkpoint_directory self._distr = config['model']['distribution'] self._device = config['model']['device'] self._encoder = Encoder(input_dim, config['model'])", "self._sample_z(self.mu, self.logvar) theta = self._decoder(latent) #if torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu:", "== 'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features']))", "= self._encoder(inputs) latent = self._sample_z(self.mu, self.logvar) theta = self._decoder(latent) #if torch.isnan(theta).any(): #index =", "f1_score, accuracy_score, precision_score, recall_score class Encoder(nn.Module): \"\"\" Probabilistic Encoder Return the mean and", "range(self.cur_epoch, self.cur_epoch + self.num_epochs): self.cur_epoch += 1 # temporary storage losses, kldivs, neglogliks", "self.num_epochs = config.getint('training', 'n_epochs') self._optim = optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) ) self.mu", "trainloader, print_every=1): \"\"\" Train the neural network \"\"\" start_time = time.time() storage =", "q(z|x). The prior of x is assume to be normal(0, I). Arguments: input_dim", "= { 'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint, model_path) def restore_model(self, filename, epoch):", "\"\"\" Xavier initialization \"\"\" for layer in self.modules(): if isinstance(layer, nn.Linear): bound =", "self.decoder(z) class VAE(nn.Module): \"\"\" VAE, x --> mu, log_sigma_sq --> N(mu, log_sigma_sq) -->", "logtheta, inputs): return - inputs * logtheta + torch.exp(logtheta) def loglikelihood(self, reduction): \"\"\"", "\"\"\" Pytorch Variational Autoendoder Network Implementation \"\"\" from itertools import chain import time", "super(Decoder, self).__init__() config_decoder = json.loads(config.get(\"decoder\")) self._distr = config['distribution'] decoder_network = [] for layer", "layer['type'] == 'relu6': decoder_network.append(nn.ReLU6()) elif layer['type'] == 'tanh': decoder_network.append(nn.Tanh()) elif layer['type'] == 'sigmoid':", "--> z --> x \"\"\" def __init__(self, input_dim, config, checkpoint_directory): super(VAE, self).__init__() self.config", "log_densities = self._get_densities(dataloader) lowest_density = np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density def", "/ inputs.shape[0] loss = -loglikelihood + kl_div loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood))", "in trainloader: self.train() inputs = inputs.to(self._device) logtheta = self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs)", "= optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) ) self.mu = None self.logvar = None", "and variance of the latent variable output from the forward propagation \"\"\" def", "parameters \"\"\" model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name, filename) checkpoint = torch.load(model_path) self.load_state_dict(checkpoint['model_state_dict']) self._optim.load_state_dict(checkpoint['optimizer_state_dict'])", "initialize_parameters(self): for layer in self.modules(): if isinstance(layer, nn.Linear): bound = 1 / np.sqrt(layer.in_features)", "= config['model']['distribution'] self._device = config['model']['device'] self._encoder = Encoder(input_dim, config['model']) self._decoder = Decoder(input_dim, config['model'])", "layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, z): if self._distr == 'poisson': alpha = 0.5", "#input() return theta def _to_numpy(self, tensor): return tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta, inputs): return", "elif layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] ==", "initialize_parameters(self): \"\"\" Xavier initialization \"\"\" for layer in self.modules(): if isinstance(layer, nn.Linear): bound", "self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time, current_time):", "_sample_z(self, mu, logvar): epsilon = torch.randn(mu.size()) epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar", "_get_time(self, starting_time, current_time): total_time = current_time - starting_time minutes = round(total_time // 60)", "= '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name, filename) checkpoint = torch.load(model_path) self.load_state_dict(checkpoint['model_state_dict']) self._optim.load_state_dict(checkpoint['optimizer_state_dict']) self.cur_epoch = epoch", "print_every=1): \"\"\" Train the neural network \"\"\" start_time = time.time() storage = {", "self._device = config['model']['device'] self._encoder = Encoder(input_dim, config['model']) self._decoder = Decoder(input_dim, config['model']) self.num_epochs =", "from torch import nn from torch import optim from torch.nn import functional as", "import torch from torch.autograd import Variable from torch import nn from torch import", "decoder_network.append(nn.Tanh()) elif layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type']", "betas=json.loads(config['training']['betas']) ) self.mu = None self.logvar = None self.precentile_threshold = config.getfloat('model', 'threshold') self.threshold", "storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch + 1) % print_every == 0: epoch_time = self._get_time(start_time,", "elif layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network)", "inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr == 'poisson': parameters = np.exp(np.array(parameters)) else:", "chain import time import json import pickle import numpy as np import torch", "nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self): \"\"\"", "-0.5 * torch.sum(1 + self.logvar - self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0] loss =", "# idx_to_remove = self._find_threshold(dataloader) # data.pop(idx_to_remove) # self._encoder.initialize_parameters() # self._decoder.initialize_parameters() # self._optim =", "= round(total_time % 60) return '{} min., {} sec.'.format(minutes, seconds) # def _remove_spam(self,", "self.eval() parameters = [] for inputs, _ in dataloader: inputs = inputs.to(self._device) logtheta", "dataloader): log_densities = self._get_densities(dataloader) lowest_density = np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density", "= [], [], [] for inputs, _ in trainloader: self.train() inputs = inputs.to(self._device)", "round(total_time % 60) return '{} min., {} sec.'.format(minutes, seconds) # def _remove_spam(self, dataloader,", "== 'relu6': decoder_network.append(nn.ReLU6()) elif layer['type'] == 'tanh': decoder_network.append(nn.Tanh()) elif layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid())", "json.loads(config.get(\"encoder\")) config_read_mu = json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim encoder_network = []", "{:.3f} | -logp(x|z): {:.3f} | kldiv: {:.3f} | time: {}'.format( epoch + 1,", "self._encoder = Encoder(input_dim, config['model']) self._decoder = Decoder(input_dim, config['model']) self.num_epochs = config.getint('training', 'n_epochs') self._optim", "VAE, x --> mu, log_sigma_sq --> N(mu, log_sigma_sq) --> z --> x \"\"\"", "def _sample_z(self, mu, logvar): epsilon = torch.randn(mu.size()) epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma =", "tensor): return tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta, inputs): return - inputs * logtheta +", "time.time() storage = { 'loss': [], 'kldiv': [], '-logp(x|z)': [], 'precision': [], 'recall':", "bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, inputs): \"\"\" Forward", "f1, acc, prec, recall, _, _ = self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {} |", "storage['log_densities'] = self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb') as _f: pickle.dump(storage, _f,", "\"\"\" Evaluate accuracy. \"\"\" self._find_threshold(dataloader) predictions = [] ground_truth = [] log_densities =", "np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth, predictions) accuracy = accuracy_score(ground_truth, predictions) precision = precision_score(ground_truth,", "reduction == 'none': # return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif self._distr == 'bernoulli': return", "idx_to_remove = self._find_threshold(dataloader) # data.pop(idx_to_remove) # self._encoder.initialize_parameters() # self._decoder.initialize_parameters() # self._optim = optim.Adam(self.parameters(),", "# print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions), log_density def save_checkpoint(self, f1_score): \"\"\"Save model paramers under", "class VAE(nn.Module): \"\"\" VAE, x --> mu, log_sigma_sq --> N(mu, log_sigma_sq) --> z", "= self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities = np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 =", "rec. {:.3f}'.format(f1, acc, prec, recall)) if (epoch + 1) % self._save_every == 0:", "torch.exp(logtheta) def loglikelihood(self, reduction): \"\"\" Return the log-likelihood \"\"\" if self._distr == 'poisson':", "decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder = nn.Sequential(*decoder_network) if self._distr ==", "of features Returns: (tensor, tensor) -- mean and variance of the latent variable", "= np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth, predictions) accuracy = accuracy_score(ground_truth, predictions)", "raise ValueError('{} is not a valid distribution'.format(self._distr)) def fit(self, trainloader, print_every=1): \"\"\" Train", "neglogliks = [], [], [] for inputs, _ in trainloader: self.train() inputs =", "from the forward propagation \"\"\" def __init__(self, input_dim, config): super(Encoder, self).__init__() config_encoder =", "'relu': encoder_network.append(nn.ReLU()) elif layer['type'] == 'tanh': encoder_network.append(nn.Tanh()) elif layer['type'] == 'dropout': encoder_network.append(nn.Dropout(layer['rate'])) elif", "normal(0, I). Arguments: input_dim {int} -- number of features Returns: (tensor, tensor) --", "bound) layer.bias.data.zero_() def forward(self, inputs): \"\"\" Forward propagation \"\"\" hidden_state = self.encoder_network(inputs) mean", "log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any(): # index = np.where(np.isnan(log_likelihood)) # print(index) #", "prec, recall, _, _ = self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {} | loss: {:.3f}", "== 'none': # return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif self._distr == 'bernoulli': return nn.BCELoss(reduction=reduction)", "Encoder Return the mean and the variance of z ~ q(z|x). The prior", "[], 'recall': [], 'log_densities': None, 'params': None } for epoch in range(self.cur_epoch, self.cur_epoch", "'wb') as _f: pickle.dump(storage, _f, pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time, current_time): total_time = current_time", "of z ~ q(z|x). The prior of x is assume to be normal(0,", "import chain import time import json import pickle import numpy as np import", "== 'batch_norm': encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'],", "time import json import pickle import numpy as np import torch from torch.autograd", "input_dim, config, checkpoint_directory): super(VAE, self).__init__() self.config = config self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory", "self._optim = optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) ) self.mu = None self.logvar =", "'precision': [], 'recall': [], 'log_densities': None, 'params': None } for epoch in range(self.cur_epoch,", "# print(index) # index = index[0][0] # print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood,", "= self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader) storage['params'] = self._get_parameters(trainloader) with open('./results/{}.pkl'.format(self.model_name), 'wb') as", "inputs = inputs.to(self._device) logtheta = self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0] assert", "= nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters() def initialize_parameters(self): \"\"\" Xavier initialization", "elif layer['type'] == 'relu': encoder_network.append(nn.ReLU()) elif layer['type'] == 'tanh': encoder_network.append(nn.Tanh()) elif layer['type'] ==", "- self.logvar.exp()) / inputs.shape[0] loss = -loglikelihood + kl_div loss.backward() self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss))", "return mu + sigma * epsilon def forward(self, inputs): \"\"\" Forward propagation \"\"\"", "is not a valid distribution'.format(self._distr)) def fit(self, trainloader, print_every=1): \"\"\" Train the neural", "accuracy_score(ground_truth, predictions) precision = precision_score(ground_truth, predictions) recall = recall_score(ground_truth, predictions) return f1, accuracy,", "predictions) precision = precision_score(ground_truth, predictions) recall = recall_score(ground_truth, predictions) return f1, accuracy, precision,", "reduction): \"\"\" Return the log-likelihood \"\"\" if self._distr == 'poisson': #if reduction ==", "Retore the model parameters \"\"\" model_path = '{}{}/{}.pt'.format( self.config['paths']['checkpoints_directory'], self.model_name, filename) checkpoint =", "nn.Linear): bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, z): if", "#if reduction == 'none': # return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif self._distr == 'bernoulli':", "return theta def _to_numpy(self, tensor): return tensor.data.cpu().numpy() def poisson_cross_entropy(self, logtheta, inputs): return -", "[] ground_truth = [] log_densities = [] for inputs, targets in dataloader: pred,", "{ 'model_state_dict': self.state_dict(), 'optimizer_state_dict': self._optim.state_dict() } torch.save(checkpoint, model_path) def restore_model(self, filename, epoch): \"\"\"", "decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': decoder_network.append(nn.ReLU()) elif layer['type'] == 'relu6': decoder_network.append(nn.ReLU6()) elif", "- inputs * logtheta + torch.exp(logtheta) def loglikelihood(self, reduction): \"\"\" Return the log-likelihood", "np import torch from torch.autograd import Variable from torch import nn from torch", "pickle.HIGHEST_PROTOCOL) def _get_time(self, starting_time, current_time): total_time = current_time - starting_time minutes = round(total_time", "config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim encoder_network = [] for layer in config_encoder:", "input_dim encoder_network = [] for layer in config_encoder: if layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'],", "self).__init__() self.config = config self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory = checkpoint_directory self._distr =", "= self._to_numpy(self.forward(inputs)) parameters.extend(logtheta) if self._distr == 'poisson': parameters = np.exp(np.array(parameters)) else: parameters =", "= self._get_densities(dataloader) lowest_density = np.argmin(log_densities) self.threshold = np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density def evaluate(self,", "Variable from torch import nn from torch import optim from torch.nn import functional", "assume to be normal(0, I). Arguments: input_dim {int} -- number of features Returns:", "alpha * self.decoder(z) else: return self.decoder(z) class VAE(nn.Module): \"\"\" VAE, x --> mu,", "super(VAE, self).__init__() self.config = config self.model_name = '{}{}'.format(config['model']['name'], config['model']['config_id']) self.checkpoint_directory = checkpoint_directory self._distr", "parameters = [] for inputs, _ in dataloader: inputs = inputs.to(self._device) logtheta =", "* torch.sum(1 + self.logvar - self.mu.pow(2) - self.logvar.exp()) / inputs.shape[0] loss = -loglikelihood", "import functional as F from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score class Encoder(nn.Module):", "= [] for inputs, _ in dataloader: inputs = inputs.to(self._device) logtheta = self._to_numpy(self.forward(inputs))", "index = index[0][0] # print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood = torch.sum(log_likelihood, 1) assert inputs.shape[0]", "self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() ) self.initialize_parameters() def initialize_parameters(self): for layer in", ") self.initialize_parameters() def initialize_parameters(self): for layer in self.modules(): if isinstance(layer, nn.Linear): bound =", "parameters def _get_densities(self, dataloader): all_log_densities = [] for inputs, _ in dataloader: mini_batch_log_densities", "number of features Returns: (tensor, tensor) -- mean and variance of the latent", "def _get_time(self, starting_time, current_time): total_time = current_time - starting_time minutes = round(total_time //", "'poisson': #if reduction == 'none': # return self.poisson_cross_entropy return nn.PoissonNLLLoss(reduction=reduction) elif self._distr ==", "[], [], [] for inputs, _ in trainloader: self.train() inputs = inputs.to(self._device) logtheta", "decoder_network.append(nn.ReLU6()) elif layer['type'] == 'tanh': decoder_network.append(nn.Tanh()) elif layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type']", "sigma = torch.exp(logvar / 2) return mu + sigma * epsilon def forward(self,", "np.where(np.isnan(log_likelihood)) # print(index) # index = index[0][0] # print(inputs[index,:]) # print(logtheta[index,:]) log_likelihood =", "== 'poisson': self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() ) self.initialize_parameters() def initialize_parameters(self): for", "self.train() inputs = inputs.to(self._device) logtheta = self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0]", "return list(predictions), log_density def save_checkpoint(self, f1_score): \"\"\"Save model paramers under config['model_path']\"\"\" model_path =", "| prec.: {:.3f} | rec. {:.3f}'.format(f1, acc, prec, recall)) if (epoch + 1)", "== 'relu': decoder_network.append(nn.ReLU()) elif layer['type'] == 'relu6': decoder_network.append(nn.ReLU6()) elif layer['type'] == 'tanh': decoder_network.append(nn.Tanh())", "+ sigma * epsilon def forward(self, inputs): \"\"\" Forward propagation \"\"\" self.mu, self.logvar", "= [] for inputs, _ in dataloader: mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities =", "inputs.to(self._device) logtheta = self.forward(inputs) log_likelihood = -self.loglikelihood(reduction='none')(logtheta, inputs) #if np.isnan(log_likelihood).any(): # index =", "= input_dim encoder_network = [] for layer in config_encoder: if layer['type'] == 'linear':", "starting_time minutes = round(total_time // 60) seconds = round(total_time % 60) return '{}", "-self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0] assert not torch.isnan(loglikelihood).any() kl_div = -0.5 * torch.sum(1 +", "self.decoder = nn.Sequential(*decoder_network) if self._distr == 'poisson': self.read_alpha = nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6()", "recall_score(ground_truth, predictions) return f1, accuracy, precision, recall, log_densities, ground_truth def predict(self, inputs): \"\"\"", "def evaluate(self, dataloader): \"\"\" Evaluate accuracy. \"\"\" self._find_threshold(dataloader) predictions = [] ground_truth =", "recall, _, _ = self.evaluate(trainloader) storage['precision'].append(prec) storage['recall'].append(recall) print('epoch: {} | loss: {:.3f} |", "Pytorch Variational Autoendoder Network Implementation \"\"\" from itertools import chain import time import", "np.array(all_log_densities) return all_log_densities def _evaluate_probability(self, inputs): self.eval() with torch.no_grad(): inputs = inputs.to(self._device) logtheta", "chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self, mu, logvar): epsilon = torch.randn(mu.size()) epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device)", "== 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': encoder_network.append(nn.ReLU()) elif layer['type'] == 'tanh':", "elif layer['type'] == 'tanh': decoder_network.append(nn.Tanh()) elif layer['type'] == 'sigmoid': decoder_network.append(nn.Sigmoid()) elif layer['type'] ==", "if (epoch + 1) % self._save_every == 0: f1, acc, prec, recall, _,", "1 #if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions), log_density def save_checkpoint(self, f1_score): \"\"\"Save", "'relu': decoder_network.append(nn.ReLU()) elif layer['type'] == 'relu6': decoder_network.append(nn.ReLU6()) elif layer['type'] == 'tanh': decoder_network.append(nn.Tanh()) elif", "0: f1, acc, prec, recall, _, _ = self.evaluate(trainloader) self.save_checkpoint(f1) storage['log_densities'] = self._get_densities(trainloader)", "self._distr = config['distribution'] decoder_network = [] for layer in config_decoder: if layer['type'] ==", "config.getint('model', 'save_every') def parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self, mu, logvar): epsilon =", "start_time = time.time() storage = { 'loss': [], 'kldiv': [], '-logp(x|z)': [], 'precision':", "accuracy. \"\"\" self._find_threshold(dataloader) predictions = [] ground_truth = [] log_densities = [] for", "self.cur_epoch += 1 # temporary storage losses, kldivs, neglogliks = [], [], []", "= self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities) return all_log_densities def _evaluate_probability(self, inputs): self.eval() with", "trainloader: self.train() inputs = inputs.to(self._device) logtheta = self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) /", "-logp(x|z): {:.3f} | kldiv: {:.3f} | time: {}'.format( epoch + 1, storage['loss'][-1], storage['-logp(x|z)'][-1],", "in config_encoder: if layer['type'] == 'linear': encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': encoder_network.append(nn.ReLU())", "layer in self.modules(): if isinstance(layer, nn.Linear): bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound)", "Decoder \"\"\" def __init__(self, input_dim, config): super(Decoder, self).__init__() config_decoder = json.loads(config.get(\"decoder\")) self._distr =", "self._optim.step() self._optim.zero_grad() losses.append(self._to_numpy(loss)) kldivs.append(self._to_numpy(kl_div)) neglogliks.append(self._to_numpy(-loglikelihood)) storage['loss'].append(np.mean(losses)) storage['kldiv'].append(np.mean(kldivs)) storage['-logp(x|z)'].append(np.mean(neglogliks)) if (epoch + 1) %", "+ torch.exp(logtheta) def loglikelihood(self, reduction): \"\"\" Return the log-likelihood \"\"\" if self._distr ==", "as np import torch from torch.autograd import Variable from torch import nn from", "latent variable output from the forward propagation \"\"\" def __init__(self, input_dim, config): super(Encoder,", "np.nanpercentile(log_densities, self.precentile_threshold) return lowest_density def evaluate(self, dataloader): \"\"\" Evaluate accuracy. \"\"\" self._find_threshold(dataloader) predictions", "output from the forward propagation \"\"\" def __init__(self, input_dim, config): super(Encoder, self).__init__() config_encoder", "1) % print_every == 0: epoch_time = self._get_time(start_time, time.time()) f1, acc, prec, recall,", "if self._distr == 'poisson': parameters = np.exp(np.array(parameters)) else: parameters = np.array(parameters) return parameters", "predictions[log_density < self.threshold] = 1 #if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions), log_density", "for inputs, _ in dataloader: mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities) return", "Forward propagation \"\"\" self.mu, self.logvar = self._encoder(inputs) latent = self._sample_z(self.mu, self.logvar) theta =", "def __init__(self, input_dim, config): super(Decoder, self).__init__() config_decoder = json.loads(config.get(\"decoder\")) self._distr = config['distribution'] decoder_network", "decoder_network.append(nn.Sigmoid()) elif layer['type'] == 'dropout': decoder_network.append(nn.Dropout(layer['rate'])) elif layer['type'] == 'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type']", "'save_every') def parameters(self): return chain(self._encoder.parameters(), self._decoder.parameters()) def _sample_z(self, mu, logvar): epsilon = torch.randn(mu.size())", "kldiv: {:.3f} | time: {}'.format( epoch + 1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1.", "precision = precision_score(ground_truth, predictions) recall = recall_score(ground_truth, predictions) return f1, accuracy, precision, recall,", "neural network \"\"\" start_time = time.time() storage = { 'loss': [], 'kldiv': [],", "#if torch.isnan(theta).any(): #index = torch.where(torch.isnan(theta))[0][0] #print(index) #print(inputs[index]) #print('mu: {}'.format(self.mu[index])) #print('logvar: {}'.format(self.logvar[index])) #print(latent[index]) #input()", "\"\"\" self._find_threshold(dataloader) predictions = [] ground_truth = [] log_densities = [] for inputs,", "self.read_alpha(z) return alpha * self.decoder(z) else: return self.decoder(z) class VAE(nn.Module): \"\"\" VAE, x", "encoder_network.append(nn.BatchNorm1d(layer['num_features'])) self.encoder_network = nn.Sequential(*encoder_network) self.read_mu = nn.Linear(config_read_mu['in_features'], config.getint('latent_dim')) self.read_logvar = nn.Linear(config_read_logvar['in_features'], config.getint('latent_dim')) self.initialize_parameters()", "config['model']) self._decoder = Decoder(input_dim, config['model']) self.num_epochs = config.getint('training', 'n_epochs') self._optim = optim.Adam( self.parameters(),", "isinstance(layer, nn.Linear): bound = 1 / np.sqrt(layer.in_features) layer.weight.data.uniform_(-bound, bound) layer.bias.data.zero_() def forward(self, inputs):", "round(total_time // 60) seconds = round(total_time % 60) return '{} min., {} sec.'.format(minutes,", "_ in trainloader: self.train() inputs = inputs.to(self._device) logtheta = self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta,", "I). Arguments: input_dim {int} -- number of features Returns: (tensor, tensor) -- mean", "mu, log_sigma_sq --> N(mu, log_sigma_sq) --> z --> x \"\"\" def __init__(self, input_dim,", "logvar): epsilon = torch.randn(mu.size()) epsilon = Variable(epsilon, requires_grad=False).type(torch.FloatTensor).to(self._device) sigma = torch.exp(logvar / 2)", "f1_score): \"\"\"Save model paramers under config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint", "import f1_score, accuracy_score, precision_score, recall_score class Encoder(nn.Module): \"\"\" Probabilistic Encoder Return the mean", "log_densities, ground_truth def predict(self, inputs): \"\"\" Predict the class of the inputs \"\"\"", "storage = { 'loss': [], 'kldiv': [], '-logp(x|z)': [], 'precision': [], 'recall': [],", "log_densities = np.array(log_densities) if np.isnan(log_densities).any(): print(np.where(np.isnan(log_densities))) f1 = f1_score(ground_truth, predictions) accuracy = accuracy_score(ground_truth,", "if layer['type'] == 'linear': decoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': decoder_network.append(nn.ReLU()) elif layer['type']", "parameters = np.array(parameters) return parameters def _get_densities(self, dataloader): all_log_densities = [] for inputs,", "config.getint('training', 'n_epochs') self._optim = optim.Adam( self.parameters(), lr=config.getfloat('training', 'lr'), betas=json.loads(config['training']['betas']) ) self.mu = None", "paramers under config['model_path']\"\"\" model_path = '{}/epoch_{}-f1_{}.pt'.format( self.checkpoint_directory, self.cur_epoch, f1_score) checkpoint = { 'model_state_dict':", "= json.loads(config.get(\"decoder\")) self._distr = config['distribution'] decoder_network = [] for layer in config_decoder: if", "self.initialize_parameters() def initialize_parameters(self): \"\"\" Xavier initialization \"\"\" for layer in self.modules(): if isinstance(layer,", "config): super(Decoder, self).__init__() config_decoder = json.loads(config.get(\"decoder\")) self._distr = config['distribution'] decoder_network = [] for", "'-logp(x|z)': [], 'precision': [], 'recall': [], 'log_densities': None, 'params': None } for epoch", "epoch + 1, storage['loss'][-1], storage['-logp(x|z)'][-1], storage['kldiv'][-1], epoch_time)) print('F1. {:.3f} | acc. {:.3f} |", "'batch_norm': decoder_network.append(nn.BatchNorm1d(layer['num_features'])) elif layer['type'] == 'read_x': decoder_network.append(nn.Linear(layer['in_features'], input_dim)) self.decoder = nn.Sequential(*decoder_network) if self._distr", "+ 1) % self._save_every == 0: f1, acc, prec, recall, _, _ =", "json.loads(config.get(\"read_mu\")) config_read_logvar = json.loads(config.get(\"read_sigma\")) config_encoder[0]['in_features'] = input_dim encoder_network = [] for layer in", "np.zeros_like(log_density).astype(int) predictions[log_density < self.threshold] = 1 #if np.isnan(log_density).any(): # print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions),", "nn.Sequential( nn.Linear(config.getint('latent_dim'), input_dim), nn.ReLU6() ) self.initialize_parameters() def initialize_parameters(self): for layer in self.modules(): if", "be normal(0, I). Arguments: input_dim {int} -- number of features Returns: (tensor, tensor)", "print(inputs[np.where(np.isnan(log_density))]) #print(self.threshold) return list(predictions), log_density def save_checkpoint(self, f1_score): \"\"\"Save model paramers under config['model_path']\"\"\"", "data def _get_parameters(self, dataloader): self.eval() parameters = [] for inputs, _ in dataloader:", "inputs.shape[0] assert not torch.isnan(loglikelihood).any() kl_div = -0.5 * torch.sum(1 + self.logvar - self.mu.pow(2)", "inputs.to(self._device) logtheta = self.forward(inputs) loglikelihood = -self.loglikelihood(reduction='sum')(logtheta, inputs) / inputs.shape[0] assert not torch.isnan(loglikelihood).any()", "dataloader: mini_batch_log_densities = self._evaluate_probability(inputs) all_log_densities.extend(mini_batch_log_densities) all_log_densities = np.array(all_log_densities) return all_log_densities def _evaluate_probability(self, inputs):", "= [] log_densities = [] for inputs, targets in dataloader: pred, mini_batch_log_densities =", "return '{} min., {} sec.'.format(minutes, seconds) # def _remove_spam(self, dataloader, data): # idx_to_remove", "encoder_network.append(nn.Linear(layer['in_features'], layer['out_features'])) elif layer['type'] == 'relu': encoder_network.append(nn.ReLU()) elif layer['type'] == 'tanh': encoder_network.append(nn.Tanh()) elif", "mean and the variance of z ~ q(z|x). The prior of x is", "predictions = [] ground_truth = [] log_densities = [] for inputs, targets in", "inputs, targets in dataloader: pred, mini_batch_log_densities = self.predict(inputs) predictions.extend(pred) ground_truth.extend(list(self._to_numpy(targets))) log_densities.extend(mini_batch_log_densities) log_densities =", "(tensor, tensor) -- mean and variance of the latent variable output from the", "epoch_time = self._get_time(start_time, time.time()) f1, acc, prec, recall, _, _ = self.evaluate(trainloader) storage['precision'].append(prec)", "storage['kldiv'][-1], epoch_time)) print('F1. {:.3f} | acc. {:.3f} | prec.: {:.3f} | rec. {:.3f}'.format(f1,", "def _get_parameters(self, dataloader): self.eval() parameters = [] for inputs, _ in dataloader: inputs", "logvar = self.read_logvar(hidden_state) return mean, logvar class Decoder(nn.Module): \"\"\" Decoder \"\"\" def __init__(self," ]
[ "= 'http://' + zap_ip + ':' + str(port) zap = ZAPv2(proxies={'http': http_proxy, 'https':", "'localhost' port = 12345 spiderTimeoutInMin = 2 startupTimeoutInMin=1 target='http://localhost:8080' def main(argv): #Initialize Zap", "https_proxy}) #Check untill zap is running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that target is reachable", "def main(argv): #Initialize Zap API http_proxy = 'http://' + zap_ip + ':' +", "target) # Use both spider zap_spider(zap, target) zap_ajax_spider(zap, target, spiderTimeoutInMin) if __name__ ==", "+ ':' + str(port) https_proxy = 'http://' + zap_ip + ':' + str(port)", "Spider and start listening for passive requests import sys from zapv2 import ZAPv2", "and start listening for passive requests import sys from zapv2 import ZAPv2 from", "= 'localhost' port = 12345 spiderTimeoutInMin = 2 startupTimeoutInMin=1 target='http://localhost:8080' def main(argv): #Initialize", "from zapv2 import ZAPv2 from zap_common import * #Configuration zap_ip = 'localhost' port", "#Initialize Zap API http_proxy = 'http://' + zap_ip + ':' + str(port) https_proxy", "= 12345 spiderTimeoutInMin = 2 startupTimeoutInMin=1 target='http://localhost:8080' def main(argv): #Initialize Zap API http_proxy", "# Use both spider zap_spider(zap, target) zap_ajax_spider(zap, target, spiderTimeoutInMin) if __name__ == \"__main__\":", "Use both spider zap_spider(zap, target) zap_ajax_spider(zap, target, spiderTimeoutInMin) if __name__ == \"__main__\": main(sys.argv[1:])", "zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy}) #Check untill zap is running wait_for_zap_start(zap, startupTimeoutInMin*60)", "str(port) https_proxy = 'http://' + zap_ip + ':' + str(port) zap = ZAPv2(proxies={'http':", "+ str(port) zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy}) #Check untill zap is running", "+ zap_ip + ':' + str(port) https_proxy = 'http://' + zap_ip + ':'", "+ str(port) https_proxy = 'http://' + zap_ip + ':' + str(port) zap =", "+ zap_ip + ':' + str(port) zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy}) #Check", "zap_access_target(zap, target) # Use both spider zap_spider(zap, target) zap_ajax_spider(zap, target, spiderTimeoutInMin) if __name__", "zap is running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that target is reachable zap_access_target(zap, target) #", "'http://' + zap_ip + ':' + str(port) zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy})", "main(argv): #Initialize Zap API http_proxy = 'http://' + zap_ip + ':' + str(port)", "is running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that target is reachable zap_access_target(zap, target) # Use", "#Check that target is reachable zap_access_target(zap, target) # Use both spider zap_spider(zap, target)", "zap_common import * #Configuration zap_ip = 'localhost' port = 12345 spiderTimeoutInMin = 2", "import * #Configuration zap_ip = 'localhost' port = 12345 spiderTimeoutInMin = 2 startupTimeoutInMin=1", "* #Configuration zap_ip = 'localhost' port = 12345 spiderTimeoutInMin = 2 startupTimeoutInMin=1 target='http://localhost:8080'", "= 2 startupTimeoutInMin=1 target='http://localhost:8080' def main(argv): #Initialize Zap API http_proxy = 'http://' +", "from zap_common import * #Configuration zap_ip = 'localhost' port = 12345 spiderTimeoutInMin =", "reachable zap_access_target(zap, target) # Use both spider zap_spider(zap, target) zap_ajax_spider(zap, target, spiderTimeoutInMin) if", "is reachable zap_access_target(zap, target) # Use both spider zap_spider(zap, target) zap_ajax_spider(zap, target, spiderTimeoutInMin)", "12345 spiderTimeoutInMin = 2 startupTimeoutInMin=1 target='http://localhost:8080' def main(argv): #Initialize Zap API http_proxy =", "import sys from zapv2 import ZAPv2 from zap_common import * #Configuration zap_ip =", "ZAPv2(proxies={'http': http_proxy, 'https': https_proxy}) #Check untill zap is running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that", "startupTimeoutInMin=1 target='http://localhost:8080' def main(argv): #Initialize Zap API http_proxy = 'http://' + zap_ip +", "str(port) zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy}) #Check untill zap is running wait_for_zap_start(zap,", "target='http://localhost:8080' def main(argv): #Initialize Zap API http_proxy = 'http://' + zap_ip + ':'", "port = 12345 spiderTimeoutInMin = 2 startupTimeoutInMin=1 target='http://localhost:8080' def main(argv): #Initialize Zap API", "sys from zapv2 import ZAPv2 from zap_common import * #Configuration zap_ip = 'localhost'", "= ZAPv2(proxies={'http': http_proxy, 'https': https_proxy}) #Check untill zap is running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check", "http_proxy = 'http://' + zap_ip + ':' + str(port) https_proxy = 'http://' +", "passive requests import sys from zapv2 import ZAPv2 from zap_common import * #Configuration", "':' + str(port) zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy}) #Check untill zap is", "API http_proxy = 'http://' + zap_ip + ':' + str(port) https_proxy = 'http://'", "running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that target is reachable zap_access_target(zap, target) # Use both", "ZAPv2 from zap_common import * #Configuration zap_ip = 'localhost' port = 12345 spiderTimeoutInMin", "zap_ip + ':' + str(port) https_proxy = 'http://' + zap_ip + ':' +", "spiderTimeoutInMin = 2 startupTimeoutInMin=1 target='http://localhost:8080' def main(argv): #Initialize Zap API http_proxy = 'http://'", "startupTimeoutInMin*60) #Check that target is reachable zap_access_target(zap, target) # Use both spider zap_spider(zap,", "= 'http://' + zap_ip + ':' + str(port) https_proxy = 'http://' + zap_ip", "# Spider and start listening for passive requests import sys from zapv2 import", "for passive requests import sys from zapv2 import ZAPv2 from zap_common import *", "that target is reachable zap_access_target(zap, target) # Use both spider zap_spider(zap, target) zap_ajax_spider(zap,", "+ ':' + str(port) zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy}) #Check untill zap", "':' + str(port) https_proxy = 'http://' + zap_ip + ':' + str(port) zap", "'https': https_proxy}) #Check untill zap is running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that target is", "python # Spider and start listening for passive requests import sys from zapv2", "listening for passive requests import sys from zapv2 import ZAPv2 from zap_common import", "https_proxy = 'http://' + zap_ip + ':' + str(port) zap = ZAPv2(proxies={'http': http_proxy,", "wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that target is reachable zap_access_target(zap, target) # Use both spider", "start listening for passive requests import sys from zapv2 import ZAPv2 from zap_common", "import ZAPv2 from zap_common import * #Configuration zap_ip = 'localhost' port = 12345", "Zap API http_proxy = 'http://' + zap_ip + ':' + str(port) https_proxy =", "zap_ip = 'localhost' port = 12345 spiderTimeoutInMin = 2 startupTimeoutInMin=1 target='http://localhost:8080' def main(argv):", "'http://' + zap_ip + ':' + str(port) https_proxy = 'http://' + zap_ip +", "target is reachable zap_access_target(zap, target) # Use both spider zap_spider(zap, target) zap_ajax_spider(zap, target,", "http_proxy, 'https': https_proxy}) #Check untill zap is running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that target", "requests import sys from zapv2 import ZAPv2 from zap_common import * #Configuration zap_ip", "#Check untill zap is running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that target is reachable zap_access_target(zap,", "2 startupTimeoutInMin=1 target='http://localhost:8080' def main(argv): #Initialize Zap API http_proxy = 'http://' + zap_ip", "#Configuration zap_ip = 'localhost' port = 12345 spiderTimeoutInMin = 2 startupTimeoutInMin=1 target='http://localhost:8080' def", "zapv2 import ZAPv2 from zap_common import * #Configuration zap_ip = 'localhost' port =", "zap_ip + ':' + str(port) zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy}) #Check untill", "untill zap is running wait_for_zap_start(zap, startupTimeoutInMin*60) #Check that target is reachable zap_access_target(zap, target)", "#!/usr/bin/env python # Spider and start listening for passive requests import sys from" ]
[ "button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy w supporcie!' in driver.find_element_by_tag_name('h2').get_attribute('innerHTML')", "= driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy w supporcie!' in driver.find_element_by_tag_name('h2').get_attribute('innerHTML') def test_login_to_dashboard(self,", "in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2') assert 'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link =", "button_login = driver.find_element_by_css_selector('button') assert 'Zaloguj się' in button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger') assert 'Czy", "tym serwisie po raz pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2') assert 'Cancel'", "button_modal_close = driver.find_element_by_id('modal-trigger2') assert 'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\",", "support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy w supporcie!' in driver.find_element_by_tag_name('h2').get_attribute('innerHTML') def", "waiting for animation time.sleep(2) button_login = driver.find_element_by_css_selector('button') assert 'Zaloguj się' in button_login.get_attribute('innerHTML') button_modal", "driver.find_element_by_css_selector('button') assert 'Zaloguj się' in button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger') assert 'Czy jesteś w", "helpers import * class TestLandingPageLoading: def test_get_base_url(self, driver): driver.get(BASE_URL) # waiting for animation", "time.sleep(2) button_login = driver.find_element_by_css_selector('button') assert 'Zaloguj się' in button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger') assert", "\"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy w supporcie!' in driver.find_element_by_tag_name('h2').get_attribute('innerHTML') def test_login_to_dashboard(self, driver): login(driver,", "driver): driver.get(BASE_URL) # waiting for animation time.sleep(2) button_login = driver.find_element_by_css_selector('button') assert 'Zaloguj się'", "= driver.find_element_by_id('modal-trigger2') assert 'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link)", "button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger') assert 'Czy jesteś w tym serwisie po raz pierwszy?'", "jesteś w tym serwisie po raz pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2')", "TestLandingPageLoading: def test_get_base_url(self, driver): driver.get(BASE_URL) # waiting for animation time.sleep(2) button_login = driver.find_element_by_css_selector('button')", "= driver.find_element_by_css_selector('button') assert 'Zaloguj się' in button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger') assert 'Czy jesteś", "test_get_base_url(self, driver): driver.get(BASE_URL) # waiting for animation time.sleep(2) button_login = driver.find_element_by_css_selector('button') assert 'Zaloguj", "for animation time.sleep(2) button_login = driver.find_element_by_css_selector('button') assert 'Zaloguj się' in button_login.get_attribute('innerHTML') button_modal =", "'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy w", "w tym serwisie po raz pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2') assert", "button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy w supporcie!' in", "assert 'Zaloguj się' in button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger') assert 'Czy jesteś w tym", "<reponame>Valaraucoo/raven-functional-tests from helpers import * class TestLandingPageLoading: def test_get_base_url(self, driver): driver.get(BASE_URL) # waiting", "= driver.find_element_by_id('modal-trigger') assert 'Czy jesteś w tym serwisie po raz pierwszy?' in button_modal.get_attribute('innerHTML')", "button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2') assert 'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(.,", "się' in button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger') assert 'Czy jesteś w tym serwisie po", "raz pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2') assert 'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click()", "po raz pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2') assert 'Cancel' in button_modal_close.get_attribute('innerHTML')", "from helpers import * class TestLandingPageLoading: def test_get_base_url(self, driver): driver.get(BASE_URL) # waiting for", "class TestLandingPageLoading: def test_get_base_url(self, driver): driver.get(BASE_URL) # waiting for animation time.sleep(2) button_login =", "# waiting for animation time.sleep(2) button_login = driver.find_element_by_css_selector('button') assert 'Zaloguj się' in button_login.get_attribute('innerHTML')", "support_link) assert 'Witamy w supporcie!' in driver.find_element_by_tag_name('h2').get_attribute('innerHTML') def test_login_to_dashboard(self, driver): login(driver, '<EMAIL>', 'admin')", "serwisie po raz pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2') assert 'Cancel' in", "def test_get_base_url(self, driver): driver.get(BASE_URL) # waiting for animation time.sleep(2) button_login = driver.find_element_by_css_selector('button') assert", "assert 'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy", "driver.find_element_by_id('modal-trigger2') assert 'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert", "animation time.sleep(2) button_login = driver.find_element_by_css_selector('button') assert 'Zaloguj się' in button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger')", "pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2') assert 'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link", "driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy w supporcie!' in driver.find_element_by_tag_name('h2').get_attribute('innerHTML') def test_login_to_dashboard(self, driver):", "'Czy jesteś w tym serwisie po raz pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close =", "in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]') driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy w supporcie!'", "driver.execute_script(\"arguments[0].click();\", support_link) assert 'Witamy w supporcie!' in driver.find_element_by_tag_name('h2').get_attribute('innerHTML') def test_login_to_dashboard(self, driver): login(driver, '<EMAIL>',", "assert 'Czy jesteś w tym serwisie po raz pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click() button_modal_close", "import * class TestLandingPageLoading: def test_get_base_url(self, driver): driver.get(BASE_URL) # waiting for animation time.sleep(2)", "button_modal.click() button_modal_close = driver.find_element_by_id('modal-trigger2') assert 'Cancel' in button_modal_close.get_attribute('innerHTML') button_modal_close.click() support_link = driver.find_element_by_xpath('//*[text()[contains(., \"Support\")]]')", "driver.find_element_by_id('modal-trigger') assert 'Czy jesteś w tym serwisie po raz pierwszy?' in button_modal.get_attribute('innerHTML') button_modal.click()", "* class TestLandingPageLoading: def test_get_base_url(self, driver): driver.get(BASE_URL) # waiting for animation time.sleep(2) button_login", "'Zaloguj się' in button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger') assert 'Czy jesteś w tym serwisie", "driver.get(BASE_URL) # waiting for animation time.sleep(2) button_login = driver.find_element_by_css_selector('button') assert 'Zaloguj się' in", "in button_login.get_attribute('innerHTML') button_modal = driver.find_element_by_id('modal-trigger') assert 'Czy jesteś w tym serwisie po raz", "button_modal = driver.find_element_by_id('modal-trigger') assert 'Czy jesteś w tym serwisie po raz pierwszy?' in" ]
[ "HTTPError(CordedError): def __init__(self, response: ClientResponse): self.response = response class BadRequest(HTTPError): pass class Unauthorized(HTTPError):", "response: ClientResponse): self.response = response class BadRequest(HTTPError): pass class Unauthorized(HTTPError): pass class Forbidden(HTTPError):", "ClientResponse class CordedError(Exception): pass # HTTP Errors class HTTPError(CordedError): def __init__(self, response: ClientResponse):", "from aiohttp import ClientResponse class CordedError(Exception): pass # HTTP Errors class HTTPError(CordedError): def", "CordedError(Exception): pass # HTTP Errors class HTTPError(CordedError): def __init__(self, response: ClientResponse): self.response =", "pass class NotFound(HTTPError): pass class PayloadTooLarge(HTTPError): pass class TooManyRequests(HTTPError): pass class DiscordServerError(HTTPError): pass", "= response class BadRequest(HTTPError): pass class Unauthorized(HTTPError): pass class Forbidden(HTTPError): pass class NotFound(HTTPError):", "class CordedError(Exception): pass # HTTP Errors class HTTPError(CordedError): def __init__(self, response: ClientResponse): self.response", "pass # HTTP Errors class HTTPError(CordedError): def __init__(self, response: ClientResponse): self.response = response", "import ClientResponse class CordedError(Exception): pass # HTTP Errors class HTTPError(CordedError): def __init__(self, response:", "HTTP Errors class HTTPError(CordedError): def __init__(self, response: ClientResponse): self.response = response class BadRequest(HTTPError):", "# HTTP Errors class HTTPError(CordedError): def __init__(self, response: ClientResponse): self.response = response class", "Forbidden(HTTPError): pass class NotFound(HTTPError): pass class PayloadTooLarge(HTTPError): pass class TooManyRequests(HTTPError): pass class DiscordServerError(HTTPError):", "Errors class HTTPError(CordedError): def __init__(self, response: ClientResponse): self.response = response class BadRequest(HTTPError): pass", "__init__(self, response: ClientResponse): self.response = response class BadRequest(HTTPError): pass class Unauthorized(HTTPError): pass class", "class Unauthorized(HTTPError): pass class Forbidden(HTTPError): pass class NotFound(HTTPError): pass class PayloadTooLarge(HTTPError): pass class", "ClientResponse): self.response = response class BadRequest(HTTPError): pass class Unauthorized(HTTPError): pass class Forbidden(HTTPError): pass", "response class BadRequest(HTTPError): pass class Unauthorized(HTTPError): pass class Forbidden(HTTPError): pass class NotFound(HTTPError): pass", "Unauthorized(HTTPError): pass class Forbidden(HTTPError): pass class NotFound(HTTPError): pass class PayloadTooLarge(HTTPError): pass class TooManyRequests(HTTPError):", "aiohttp import ClientResponse class CordedError(Exception): pass # HTTP Errors class HTTPError(CordedError): def __init__(self,", "class BadRequest(HTTPError): pass class Unauthorized(HTTPError): pass class Forbidden(HTTPError): pass class NotFound(HTTPError): pass class", "pass class Forbidden(HTTPError): pass class NotFound(HTTPError): pass class PayloadTooLarge(HTTPError): pass class TooManyRequests(HTTPError): pass", "<reponame>an-dyy/Corded from aiohttp import ClientResponse class CordedError(Exception): pass # HTTP Errors class HTTPError(CordedError):", "pass class Unauthorized(HTTPError): pass class Forbidden(HTTPError): pass class NotFound(HTTPError): pass class PayloadTooLarge(HTTPError): pass", "self.response = response class BadRequest(HTTPError): pass class Unauthorized(HTTPError): pass class Forbidden(HTTPError): pass class", "class Forbidden(HTTPError): pass class NotFound(HTTPError): pass class PayloadTooLarge(HTTPError): pass class TooManyRequests(HTTPError): pass class", "def __init__(self, response: ClientResponse): self.response = response class BadRequest(HTTPError): pass class Unauthorized(HTTPError): pass", "BadRequest(HTTPError): pass class Unauthorized(HTTPError): pass class Forbidden(HTTPError): pass class NotFound(HTTPError): pass class PayloadTooLarge(HTTPError):", "class HTTPError(CordedError): def __init__(self, response: ClientResponse): self.response = response class BadRequest(HTTPError): pass class" ]
[ "Sentence_XLM: @staticmethod def get_default_model(): return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def", "XlmRoBertaSentenceEmbeddings class Sentence_XLM: @staticmethod def get_default_model(): return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\")", "@staticmethod def get_default_model(): return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name,", "class Sentence_XLM: @staticmethod def get_default_model(): return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod", "\\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name, language): return XlmRoBertaSentenceEmbeddings.pretrained(name, language) \\", "\"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name, language): return XlmRoBertaSentenceEmbeddings.pretrained(name, language) \\ .setInputCols(\"sentence\", \"token\")", "get_default_model(): return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name, language): return", "sparknlp.annotator import XlmRoBertaSentenceEmbeddings class Sentence_XLM: @staticmethod def get_default_model(): return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\")", "return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name, language): return XlmRoBertaSentenceEmbeddings.pretrained(name,", "XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name, language): return XlmRoBertaSentenceEmbeddings.pretrained(name, language)", "def get_default_model(): return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name, language):", "\\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name, language): return XlmRoBertaSentenceEmbeddings.pretrained(name, language) \\ .setInputCols(\"sentence\", \"token\") \\", ".setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name, language): return XlmRoBertaSentenceEmbeddings.pretrained(name, language) \\ .setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\")", "from sparknlp.annotator import XlmRoBertaSentenceEmbeddings class Sentence_XLM: @staticmethod def get_default_model(): return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\",", ".setInputCols(\"sentence\", \"token\") \\ .setOutputCol(\"sentence_xlm_roberta\") @staticmethod def get_pretrained_model(name, language): return XlmRoBertaSentenceEmbeddings.pretrained(name, language) \\ .setInputCols(\"sentence\",", "import XlmRoBertaSentenceEmbeddings class Sentence_XLM: @staticmethod def get_default_model(): return XlmRoBertaSentenceEmbeddings.pretrained() \\ .setInputCols(\"sentence\", \"token\") \\" ]
[ "import json import pprint def pformat(value): \"\"\" Format given object: Try JSON fist", "\"\"\" Format given object: Try JSON fist and fallback to pformat() (JSON dumps", "and fallback to pformat() (JSON dumps are nicer than pprint.pformat() ;) \"\"\" try:", ";) \"\"\" try: value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except TypeError: # Fallback", "Try JSON fist and fallback to pformat() (JSON dumps are nicer than pprint.pformat()", "Fallback if values are not serializable with JSON: value = pprint.pformat(value, width=120) return", "dumps are nicer than pprint.pformat() ;) \"\"\" try: value = json.dumps(value, indent=4, sort_keys=True,", "are nicer than pprint.pformat() ;) \"\"\" try: value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False)", "pprint def pformat(value): \"\"\" Format given object: Try JSON fist and fallback to", "except TypeError: # Fallback if values are not serializable with JSON: value =", "def pformat(value): \"\"\" Format given object: Try JSON fist and fallback to pformat()", "# Fallback if values are not serializable with JSON: value = pprint.pformat(value, width=120)", "sort_keys=True, ensure_ascii=False) except TypeError: # Fallback if values are not serializable with JSON:", "given object: Try JSON fist and fallback to pformat() (JSON dumps are nicer", "\"\"\" try: value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except TypeError: # Fallback if", "pformat(value): \"\"\" Format given object: Try JSON fist and fallback to pformat() (JSON", "Format given object: Try JSON fist and fallback to pformat() (JSON dumps are", "(JSON dumps are nicer than pprint.pformat() ;) \"\"\" try: value = json.dumps(value, indent=4,", "pformat() (JSON dumps are nicer than pprint.pformat() ;) \"\"\" try: value = json.dumps(value,", "pprint.pformat() ;) \"\"\" try: value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except TypeError: #", "TypeError: # Fallback if values are not serializable with JSON: value = pprint.pformat(value,", "fallback to pformat() (JSON dumps are nicer than pprint.pformat() ;) \"\"\" try: value", "= json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except TypeError: # Fallback if values are not", "indent=4, sort_keys=True, ensure_ascii=False) except TypeError: # Fallback if values are not serializable with", "import pprint def pformat(value): \"\"\" Format given object: Try JSON fist and fallback", "nicer than pprint.pformat() ;) \"\"\" try: value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except", "fist and fallback to pformat() (JSON dumps are nicer than pprint.pformat() ;) \"\"\"", "value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except TypeError: # Fallback if values are", "json import pprint def pformat(value): \"\"\" Format given object: Try JSON fist and", "to pformat() (JSON dumps are nicer than pprint.pformat() ;) \"\"\" try: value =", "json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except TypeError: # Fallback if values are not serializable", "if values are not serializable with JSON: value = pprint.pformat(value, width=120) return value", "ensure_ascii=False) except TypeError: # Fallback if values are not serializable with JSON: value", "than pprint.pformat() ;) \"\"\" try: value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except TypeError:", "JSON fist and fallback to pformat() (JSON dumps are nicer than pprint.pformat() ;)", "try: value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False) except TypeError: # Fallback if values", "object: Try JSON fist and fallback to pformat() (JSON dumps are nicer than" ]
[ "\"data/coco.names\", directory='./data/') print(files) net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta = cdarknet.load_meta(\"coco.data\".encode()) r =", "# model_path = \"./\" # d = darknet_extractor() # result = d.inference('yolov3', model_filename,", "as np if cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\", directory='./') download_file(cls._base_model_url + \"data/coco.names\", directory='./data/') print(files)", "print(model_filename) # image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path = \"./\" # d = darknet_extractor()", "architecture_map = { 'yolov3' : { 'config' : _base_model_url + \"cfg/yolov3.cfg\", 'weights' :", "print_function import os from mmdnn.conversion.examples.darknet import darknet as cdarknet from mmdnn.conversion.examples.imagenet_test import TestKit", "= \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = { 'yolov3' : { 'config' : _base_model_url + \"cfg/yolov3.cfg\",", "} @classmethod def download(cls, architecture, path = './'): if cls.sanity_check(architecture): cfg_name = architecture", "path = './'): if cls.sanity_check(architecture): cfg_name = architecture + \".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'],", "files, model_path, image_path): import numpy as np if cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\", directory='./')", "return None @classmethod def inference(cls, architecture, files, model_path, image_path): import numpy as np", "license information. #---------------------------------------------------------------------------------------------- from __future__ import absolute_import from __future__ import print_function import os", "model_path = \"./\" # d = darknet_extractor() # result = d.inference('yolov3', model_filename, model_path,", "cfg_name = architecture + \".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if not architecture_file:", "print(r) return r else: return None # d = darknet_extractor() # model_filename =", "print(\"Darknet Model {} saved as [{}] and [{}].\".format(architecture, architecture_file, weight_file)) return (architecture_file, weight_file)", "# Licensed under the MIT License. See License.txt in the project root for", "project root for license information. #---------------------------------------------------------------------------------------------- from __future__ import absolute_import from __future__ import", "class darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = { 'yolov3' : { 'config' :", "None print(\"Darknet Model {} saved as [{}] and [{}].\".format(architecture, architecture_file, weight_file)) return (architecture_file,", "'yolov2' :{ 'config' : _base_model_url + \"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod", "print(files) net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta = cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net, meta,", "base_extractor from mmdnn.conversion.common.utils import download_file class darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = {", "the project root for license information. #---------------------------------------------------------------------------------------------- from __future__ import absolute_import from __future__", "# d = darknet_extractor() # model_filename = d.download('yolov3') # print(model_filename) # image_path =", "= cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net, meta, image_path.encode()) # print(r) return r else: return", "\"./mmdnn/conversion/examples/data/dog.jpg\" # model_path = \"./\" # d = darknet_extractor() # result = d.inference('yolov3',", "= download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if not architecture_file: return None weight_name = architecture +", "meta, image_path.encode()) # print(r) return r else: return None # d = darknet_extractor()", "mmdnn.conversion.examples.extractor import base_extractor from mmdnn.conversion.common.utils import download_file class darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map", "return (architecture_file, weight_file) else: return None @classmethod def inference(cls, architecture, files, model_path, image_path):", "under the MIT License. See License.txt in the project root for license information.", "reserved. # Licensed under the MIT License. See License.txt in the project root", "weight_name = architecture + \".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if not weight_file:", "= \"./\" # d = darknet_extractor() # result = d.inference('yolov3', model_filename, model_path, image_path", "@classmethod def inference(cls, architecture, files, model_path, image_path): import numpy as np if cls.sanity_check(architecture):", "from mmdnn.conversion.examples.imagenet_test import TestKit from mmdnn.conversion.examples.extractor import base_extractor from mmdnn.conversion.common.utils import download_file class", "weight_file)) return (architecture_file, weight_file) else: return None @classmethod def inference(cls, architecture, files, model_path,", "if not weight_file: return None print(\"Darknet Model {} saved as [{}] and [{}].\".format(architecture,", "download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if not weight_file: return None print(\"Darknet Model {} saved as", "np if cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\", directory='./') download_file(cls._base_model_url + \"data/coco.names\", directory='./data/') print(files) net", "darknet_extractor() # model_filename = d.download('yolov3') # print(model_filename) # image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path", "'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod def download(cls, architecture, path = './'): if", "not architecture_file: return None weight_name = architecture + \".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path,", "architecture + \".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if not architecture_file: return None", ": _base_model_url + \"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod def download(cls, architecture,", "download(cls, architecture, path = './'): if cls.sanity_check(architecture): cfg_name = architecture + \".cfg\" architecture_file", "local_fname=weight_name) if not weight_file: return None print(\"Darknet Model {} saved as [{}] and", "files[1].encode(), 0) meta = cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net, meta, image_path.encode()) # print(r) return", "'yolov3' : { 'config' : _base_model_url + \"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2'", "weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if not weight_file: return None print(\"Darknet Model {}", "+ \"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{ 'config' : _base_model_url + \"cfg/yolov2.cfg\",", "(architecture_file, weight_file) else: return None @classmethod def inference(cls, architecture, files, model_path, image_path): import", "import os from mmdnn.conversion.examples.darknet import darknet as cdarknet from mmdnn.conversion.examples.imagenet_test import TestKit from", "d.download('yolov3') # print(model_filename) # image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path = \"./\" # d", "\"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{ 'config' : _base_model_url + \"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" }", "cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\", directory='./') download_file(cls._base_model_url + \"data/coco.names\", directory='./data/') print(files) net = cdarknet.load_net(files[0].encode(),", "return r else: return None # d = darknet_extractor() # model_filename = d.download('yolov3')", "return None weight_name = architecture + \".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if", "r = cdarknet.detect(net, meta, image_path.encode()) # print(r) return r else: return None #", "download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if not architecture_file: return None weight_name = architecture + \".weights\"", "architecture_file, weight_file)) return (architecture_file, weight_file) else: return None @classmethod def inference(cls, architecture, files,", "\"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod def download(cls, architecture, path = './'):", "\"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod def download(cls, architecture, path = './'): if cls.sanity_check(architecture): cfg_name", "d = darknet_extractor() # result = d.inference('yolov3', model_filename, model_path, image_path = image_path) #", "directory=path, local_fname=weight_name) if not weight_file: return None print(\"Darknet Model {} saved as [{}]", "+ \"data/coco.names\", directory='./data/') print(files) net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta = cdarknet.load_meta(\"coco.data\".encode()) r", "else: return None # d = darknet_extractor() # model_filename = d.download('yolov3') # print(model_filename)", "[{}].\".format(architecture, architecture_file, weight_file)) return (architecture_file, weight_file) else: return None @classmethod def inference(cls, architecture,", "cdarknet.detect(net, meta, image_path.encode()) # print(r) return r else: return None # d =", "the MIT License. See License.txt in the project root for license information. #----------------------------------------------------------------------------------------------", "0) meta = cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net, meta, image_path.encode()) # print(r) return r", ": \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{ 'config' : _base_model_url + \"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\"", "as cdarknet from mmdnn.conversion.examples.imagenet_test import TestKit from mmdnn.conversion.examples.extractor import base_extractor from mmdnn.conversion.common.utils import", "+ \"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod def download(cls, architecture, path =", "= d.download('yolov3') # print(model_filename) # image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path = \"./\" #", "cdarknet from mmdnn.conversion.examples.imagenet_test import TestKit from mmdnn.conversion.examples.extractor import base_extractor from mmdnn.conversion.common.utils import download_file", "\".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if not architecture_file: return None weight_name =", "= cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta = cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net, meta, image_path.encode()) #", "\".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if not weight_file: return None print(\"Darknet Model", "directory=path, local_fname=cfg_name) if not architecture_file: return None weight_name = architecture + \".weights\" weight_file", "} } @classmethod def download(cls, architecture, path = './'): if cls.sanity_check(architecture): cfg_name =", "\"cfg/coco.data\", directory='./') download_file(cls._base_model_url + \"data/coco.names\", directory='./data/') print(files) net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta", "download_file(cls._base_model_url + \"data/coco.names\", directory='./data/') print(files) net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta = cdarknet.load_meta(\"coco.data\".encode())", "Licensed under the MIT License. See License.txt in the project root for license", "os from mmdnn.conversion.examples.darknet import darknet as cdarknet from mmdnn.conversion.examples.imagenet_test import TestKit from mmdnn.conversion.examples.extractor", "= architecture + \".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if not weight_file: return", "{ 'config' : _base_model_url + \"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{ 'config'", "architecture, path = './'): if cls.sanity_check(architecture): cfg_name = architecture + \".cfg\" architecture_file =", "weight_file) else: return None @classmethod def inference(cls, architecture, files, model_path, image_path): import numpy", "inference(cls, architecture, files, model_path, image_path): import numpy as np if cls.sanity_check(architecture): download_file(cls._base_model_url +", "+ \"cfg/coco.data\", directory='./') download_file(cls._base_model_url + \"data/coco.names\", directory='./data/') print(files) net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0)", "= architecture + \".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if not architecture_file: return", "if cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\", directory='./') download_file(cls._base_model_url + \"data/coco.names\", directory='./data/') print(files) net =", "architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if not architecture_file: return None weight_name = architecture", "absolute_import from __future__ import print_function import os from mmdnn.conversion.examples.darknet import darknet as cdarknet", "from __future__ import print_function import os from mmdnn.conversion.examples.darknet import darknet as cdarknet from", "directory='./') download_file(cls._base_model_url + \"data/coco.names\", directory='./data/') print(files) net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta =", "= cdarknet.detect(net, meta, image_path.encode()) # print(r) return r else: return None # d", "Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in", "TestKit from mmdnn.conversion.examples.extractor import base_extractor from mmdnn.conversion.common.utils import download_file class darknet_extractor(base_extractor): _base_model_url =", "root for license information. #---------------------------------------------------------------------------------------------- from __future__ import absolute_import from __future__ import print_function", ": { 'config' : _base_model_url + \"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{", "# print(r) return r else: return None # d = darknet_extractor() # model_filename", "'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{ 'config' : _base_model_url + \"cfg/yolov2.cfg\", 'weights' :", "# d = darknet_extractor() # result = d.inference('yolov3', model_filename, model_path, image_path = image_path)", "from mmdnn.conversion.examples.darknet import darknet as cdarknet from mmdnn.conversion.examples.imagenet_test import TestKit from mmdnn.conversion.examples.extractor import", "return None # d = darknet_extractor() # model_filename = d.download('yolov3') # print(model_filename) #", "None @classmethod def inference(cls, architecture, files, model_path, image_path): import numpy as np if", "import numpy as np if cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\", directory='./') download_file(cls._base_model_url + \"data/coco.names\",", "directory='./data/') print(files) net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta = cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net,", "information. #---------------------------------------------------------------------------------------------- from __future__ import absolute_import from __future__ import print_function import os from", "not weight_file: return None print(\"Darknet Model {} saved as [{}] and [{}].\".format(architecture, architecture_file,", "= darknet_extractor() # model_filename = d.download('yolov3') # print(model_filename) # image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" #", "rights reserved. # Licensed under the MIT License. See License.txt in the project", "None # d = darknet_extractor() # model_filename = d.download('yolov3') # print(model_filename) # image_path", "net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta = cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net, meta, image_path.encode())", "as [{}] and [{}].\".format(architecture, architecture_file, weight_file)) return (architecture_file, weight_file) else: return None @classmethod", "numpy as np if cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\", directory='./') download_file(cls._base_model_url + \"data/coco.names\", directory='./data/')", "= './'): if cls.sanity_check(architecture): cfg_name = architecture + \".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path,", "cdarknet.load_net(files[0].encode(), files[1].encode(), 0) meta = cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net, meta, image_path.encode()) # print(r)", "#---------------------------------------------------------------------------------------------- from __future__ import absolute_import from __future__ import print_function import os from mmdnn.conversion.examples.darknet", "_base_model_url + \"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod def download(cls, architecture, path", "# model_filename = d.download('yolov3') # print(model_filename) # image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path =", "See License.txt in the project root for license information. #---------------------------------------------------------------------------------------------- from __future__ import", "= download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if not weight_file: return None print(\"Darknet Model {} saved", "[{}] and [{}].\".format(architecture, architecture_file, weight_file)) return (architecture_file, weight_file) else: return None @classmethod def", "d = darknet_extractor() # model_filename = d.download('yolov3') # print(model_filename) # image_path = \"./mmdnn/conversion/examples/data/dog.jpg\"", "if not architecture_file: return None weight_name = architecture + \".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'],", "'config' : _base_model_url + \"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{ 'config' :", "download_file(cls._base_model_url + \"cfg/coco.data\", directory='./') download_file(cls._base_model_url + \"data/coco.names\", directory='./data/') print(files) net = cdarknet.load_net(files[0].encode(), files[1].encode(),", "image_path): import numpy as np if cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\", directory='./') download_file(cls._base_model_url +", "model_path, image_path): import numpy as np if cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\", directory='./') download_file(cls._base_model_url", "import base_extractor from mmdnn.conversion.common.utils import download_file class darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map =", "import download_file class darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = { 'yolov3' : {", "cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net, meta, image_path.encode()) # print(r) return r else: return None", "All rights reserved. # Licensed under the MIT License. See License.txt in the", "+ \".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if not architecture_file: return None weight_name", "weight_file: return None print(\"Darknet Model {} saved as [{}] and [{}].\".format(architecture, architecture_file, weight_file))", "License.txt in the project root for license information. #---------------------------------------------------------------------------------------------- from __future__ import absolute_import", ": \"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod def download(cls, architecture, path = './'): if cls.sanity_check(architecture):", "(c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT", "if cls.sanity_check(architecture): cfg_name = architecture + \".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if", "image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path = \"./\" # d = darknet_extractor() # result", "Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt", "__future__ import print_function import os from mmdnn.conversion.examples.darknet import darknet as cdarknet from mmdnn.conversion.examples.imagenet_test", "in the project root for license information. #---------------------------------------------------------------------------------------------- from __future__ import absolute_import from", "__future__ import absolute_import from __future__ import print_function import os from mmdnn.conversion.examples.darknet import darknet", "# image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path = \"./\" # d = darknet_extractor() #", ":{ 'config' : _base_model_url + \"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod def", "model_filename = d.download('yolov3') # print(model_filename) # image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path = \"./\"", "#---------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the", "None weight_name = architecture + \".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if not", "_base_model_url + \"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{ 'config' : _base_model_url +", "Model {} saved as [{}] and [{}].\".format(architecture, architecture_file, weight_file)) return (architecture_file, weight_file) else:", "= { 'yolov3' : { 'config' : _base_model_url + \"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\"", "License. See License.txt in the project root for license information. #---------------------------------------------------------------------------------------------- from __future__", "architecture_file: return None weight_name = architecture + \".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name)", "\"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = { 'yolov3' : { 'config' : _base_model_url + \"cfg/yolov3.cfg\", 'weights'", "mmdnn.conversion.examples.imagenet_test import TestKit from mmdnn.conversion.examples.extractor import base_extractor from mmdnn.conversion.common.utils import download_file class darknet_extractor(base_extractor):", "cls.sanity_check(architecture): cfg_name = architecture + \".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name) if not", "meta = cdarknet.load_meta(\"coco.data\".encode()) r = cdarknet.detect(net, meta, image_path.encode()) # print(r) return r else:", "architecture, files, model_path, image_path): import numpy as np if cls.sanity_check(architecture): download_file(cls._base_model_url + \"cfg/coco.data\",", "image_path.encode()) # print(r) return r else: return None # d = darknet_extractor() #", "\"./\" # d = darknet_extractor() # result = d.inference('yolov3', model_filename, model_path, image_path =", "download_file class darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = { 'yolov3' : { 'config'", ": _base_model_url + \"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{ 'config' : _base_model_url", "# print(model_filename) # image_path = \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path = \"./\" # d =", "from mmdnn.conversion.examples.extractor import base_extractor from mmdnn.conversion.common.utils import download_file class darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\"", "'./'): if cls.sanity_check(architecture): cfg_name = architecture + \".cfg\" architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name)", "darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = { 'yolov3' : { 'config' : _base_model_url", "mmdnn.conversion.examples.darknet import darknet as cdarknet from mmdnn.conversion.examples.imagenet_test import TestKit from mmdnn.conversion.examples.extractor import base_extractor", "import darknet as cdarknet from mmdnn.conversion.examples.imagenet_test import TestKit from mmdnn.conversion.examples.extractor import base_extractor from", "+ \".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if not weight_file: return None print(\"Darknet", "= darknet_extractor() # result = d.inference('yolov3', model_filename, model_path, image_path = image_path) # print(result)", "import TestKit from mmdnn.conversion.examples.extractor import base_extractor from mmdnn.conversion.common.utils import download_file class darknet_extractor(base_extractor): _base_model_url", "@classmethod def download(cls, architecture, path = './'): if cls.sanity_check(architecture): cfg_name = architecture +", "def inference(cls, architecture, files, model_path, image_path): import numpy as np if cls.sanity_check(architecture): download_file(cls._base_model_url", "return None print(\"Darknet Model {} saved as [{}] and [{}].\".format(architecture, architecture_file, weight_file)) return", "MIT License. See License.txt in the project root for license information. #---------------------------------------------------------------------------------------------- from", "r else: return None # d = darknet_extractor() # model_filename = d.download('yolov3') #", "'config' : _base_model_url + \"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" } } @classmethod def download(cls,", "}, 'yolov2' :{ 'config' : _base_model_url + \"cfg/yolov2.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov2.weights\" } }", "local_fname=cfg_name) if not architecture_file: return None weight_name = architecture + \".weights\" weight_file =", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License.", "import absolute_import from __future__ import print_function import os from mmdnn.conversion.examples.darknet import darknet as", "from __future__ import absolute_import from __future__ import print_function import os from mmdnn.conversion.examples.darknet import", "\"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" }, 'yolov2' :{ 'config' : _base_model_url + \"cfg/yolov2.cfg\", 'weights'", "saved as [{}] and [{}].\".format(architecture, architecture_file, weight_file)) return (architecture_file, weight_file) else: return None", "= \"./mmdnn/conversion/examples/data/dog.jpg\" # model_path = \"./\" # d = darknet_extractor() # result =", "_base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = { 'yolov3' : { 'config' : _base_model_url +", "else: return None @classmethod def inference(cls, architecture, files, model_path, image_path): import numpy as", "darknet as cdarknet from mmdnn.conversion.examples.imagenet_test import TestKit from mmdnn.conversion.examples.extractor import base_extractor from mmdnn.conversion.common.utils", "mmdnn.conversion.common.utils import download_file class darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = { 'yolov3' :", "{} saved as [{}] and [{}].\".format(architecture, architecture_file, weight_file)) return (architecture_file, weight_file) else: return", "and [{}].\".format(architecture, architecture_file, weight_file)) return (architecture_file, weight_file) else: return None @classmethod def inference(cls,", "{ 'yolov3' : { 'config' : _base_model_url + \"cfg/yolov3.cfg\", 'weights' : \"https://pjreddie.com/media/files/yolov3.weights\" },", "def download(cls, architecture, path = './'): if cls.sanity_check(architecture): cfg_name = architecture + \".cfg\"", "architecture + \".weights\" weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name) if not weight_file: return None", "from mmdnn.conversion.common.utils import download_file class darknet_extractor(base_extractor): _base_model_url = \"https://raw.githubusercontent.com/pjreddie/darknet/master/\" architecture_map = { 'yolov3'", "for license information. #---------------------------------------------------------------------------------------------- from __future__ import absolute_import from __future__ import print_function import", "import print_function import os from mmdnn.conversion.examples.darknet import darknet as cdarknet from mmdnn.conversion.examples.imagenet_test import" ]
[ "63, 99],[93, 25, 16, 42, 55, 61, 69, 68, 95, 28, 40, 90,", "-66, -30],34,), ([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,", "90, 1, 86, 76, 40, 13, 47, 71, 4, 64, 54, 84, 45],16,),", "-32, -30, -24, -12, -12, -8, -2, 4, 8, 16, 20, 24, 24,", "% n != 0 : return - 1 x = s // n", "1 ] = 0 if a [ i ] == x : continue", ": a [ i ] += b [ i ] b [ i", "a [ i ] + b [ i + 1 ] == x", "file in the root directory of this source tree. # def f_gold (", "1 ] if y == x : a [ i ] = y", "i ] += b [ i + 1 ] b [ i +", "50, 54, 60, 64, 74, 80, 88, 90, 92, 92],22,), ([0, 1, 1,", "86, 88, 90],[-96, -94, -80, -74, -64, -56, -52, -32, -30, -24, -12,", "49, 49, 54, 55, 57, 65, 66, 67, 67, 68, 83, 85, 89,", "72, -22, -2, 8, -94, 92, -44, -66, -30],34,), ([0, 0, 0, 0,", "0, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0,", "-80, -68, -58, -26, 50, -78, -90, -48, -28, 48, 56, 50, 72,", "1 ] b [ i - 1 ] = 0 if a [", "-38, -30, -30, -26, -14, -12, -10, -6, -6, 6, 22, 22, 22,", "-26, 50, -78, -90, -48, -28, 48, 56, 50, 72, -22, -2, 8,", "80, 82, 86, 90, 92, 92, 95],[3, 15, 16, 16, 18, 26, 30,", "4, 8, 16, 20, 24, 24, 24, 48, 50, 54, 60, 64, 74,", "92, 95],[3, 15, 16, 16, 18, 26, 30, 32, 32, 35, 37, 41,", "-64, -64, -64, -64, -62, -54, -48, -44, -44, -38, -30, -30, -26,", ": if a [ i ] > x : return - 1 if", "x = s // n for i in range ( 0 , n", ": b [ i + 1 ] = 0 continue if a [", "= a [ i ] + b [ i ] if i +", "54, 84, 45],16,), ([-80, -64, -64, -64, -64, -62, -54, -48, -44, -44,", "and a [ i ] + b [ i + 1 ] ==", "6, 74, 64, -78, 86, -42, -56, 2, -34, -46, 70, -62, 50,", "61, 69, 68, 95, 28, 40, 90, 1, 86, 76, 40, 13, 47,", "6, 6, -62, 46, 34, 2],[-62, -84, 72, 60, 10, -18, -44, -22,", "0, 1, 1, 0, 0, 1],[1, 1, 1, 0, 1, 1, 0, 0,", "-58, -68, 42, 0, 98, -70, -14, -32, 6, 74, 64, -78, 86,", ") : if b [ i ] != 0 : return - 1", "1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0,", "0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0,", "95, 28, 40, 90, 1, 86, 76, 40, 13, 47, 71, 4, 64,", "-18, -44, -22, 14, 0, 76, 72, 96, -28, -24, 52, -74, -30,", "0, 0, 1, 1, 0, 0, 1],[1, 1, 1, 0, 1, 1, 0,", "0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1],20,),", "-34, -46, 70, -62, 50, -58, -58, 42, 86, 96, -8, 8, -22,", "== 1 : return a [ 0 ] + b [ 0 ]", "0 if a [ i ] == x : continue y = a", "42, 86, 96, -8, 8, -22, -14, -14, 98, 2, 98, -28],[-26, 36,", "1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1,", "-70, -80, -68, -58, -26, 50, -78, -90, -48, -28, 48, 56, 50,", "1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,", "0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,", "84, 45],16,), ([-80, -64, -64, -64, -64, -62, -54, -48, -44, -44, -38,", "1, 1, 1, 1, 1, 1, 1],13,), ([98, 18, 50, 36, 88, 75,", "0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,", "43, 44, 46, 53, 53, 56, 56, 58, 60, 62, 70, 80, 80,", "[ i ] > x : return - 1 if i > 0", "-62, 30, -4, 82, 16, 32, -6, 58, 82, -66, -40, 52, -78,", "88, 75, 2, 40, 74, 19, 63, 82, 77, 5, 59, 97, 70,", "range ( 0 , n ) : if b [ i ] !=", "[ ([4, 9, 16, 18, 20, 23, 24, 25, 25, 26, 29, 30,", "56, 56, 58, 60, 62, 70, 80, 80, 80, 82, 86, 90, 92,", "1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,", "0, 0, 0, 0, 0, 1, 1],20,), ([59, 61, 64],[22, 59, 85],1,), ([98,", "licensed under the license found in the # LICENSE file in the root", "58, 60, 62, 70, 80, 80, 80, 82, 86, 90, 92, 92, 95],[3,", "87, 36, 73, 37, 80, 34, 57, 17, 88, 52],9,) ] n_success =", "98, -70, -14, -32, 6, 74, 64, -78, 86, -42, -56, 2, -34,", "0 , n ) : if b [ i ] != 0 :", "i ] += b [ i ] b [ i ] = 0", "47, 71, 4, 64, 54, 84, 45],16,), ([-80, -64, -64, -64, -64, -62,", "48, 56, 50, 72, -22, -2, 8, -94, 92, -44, -66, -30],34,), ([0,", "-64, -62, -54, -48, -44, -44, -38, -30, -30, -26, -14, -12, -10,", "[ i ] b [ i ] = 0 continue if i +", "44, 46, 53, 53, 56, 56, 58, 60, 62, 70, 80, 80, 80,", "the # LICENSE file in the root directory of this source tree. #", "-48, -28, 48, 56, 50, 72, -22, -2, 8, -94, 92, -44, -66,", "a [ 0 ] + b [ 0 ] if s % n", "80, 34, 57, 17, 88, 52],9,) ] n_success = 0 for i, parameters_set", "] += b [ i - 1 ] b [ i - 1", "n for i in range ( 0 , n ) : if a", "b [ i + 1 ] if y == x : a [", "92, 28, 42, -74, -36, 40, -8, 32, -22, -70, -22, -56, 74,", ": a [ i ] += b [ i - 1 ] b", "continue return - 1 for i in range ( 0 , n )", "n ) : if a [ i ] > x : return -", "93, 96, 97, 99],29,), ([-24, 70, -74, -90, 72, 50, -94, 86, -58,", "24, 24, 48, 50, 54, 60, 64, 74, 80, 88, 90, 92, 92],22,),", ": s += a [ i ] + b [ i ] if", "1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,", "37, 80, 34, 57, 17, 88, 52],9,) ] n_success = 0 for i,", "94, -70, -80, -68, -58, -26, 50, -78, -90, -48, -28, 48, 56,", "42, 55, 61, 69, 68, 95, 28, 40, 90, 1, 86, 76, 40,", "0 if i + 1 < n : b [ i + 1", "0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1,", "[ i ] if i + 1 < n : y += b", "i ] b [ i ] = 0 continue if i + 1", "< n : y += b [ i + 1 ] if y", "36, 88, 75, 2, 40, 74, 19, 63, 82, 77, 5, 59, 97,", "if i + 1 < n and a [ i ] + b", "-94, 86, -58, -68, 42, 0, 98, -70, -14, -32, 6, 74, 64,", "64],[22, 59, 85],1,), ([98, 92, 28, 42, -74, -36, 40, -8, 32, -22,", "26, 29, 30, 35, 40, 41, 43, 44, 46, 53, 53, 56, 56,", "in the # LICENSE file in the root directory of this source tree.", ", b , n ) : s = 0 for i in range", "+= b [ i + 1 ] b [ i + 1 ]", "is licensed under the license found in the # LICENSE file in the", "] == x : a [ i ] += b [ i ]", "-2, 4, 8, 16, 20, 24, 24, 24, 48, 50, 54, 60, 64,", "1, 0, 0, 0, 0, 0, 1, 1],20,), ([59, 61, 64],[22, 59, 85],1,),", "8, -22, -14, -14, 98, 2, 98, -28],[-26, 36, 48, 48, -38, -86,", "-30, -26, -14, -12, -10, -6, -6, 6, 22, 22, 22, 26, 28,", "n ) : s += a [ i ] + b [ i", "0 continue return - 1 for i in range ( 0 , n", "-64, -64, -64, -62, -54, -48, -44, -44, -38, -30, -30, -26, -14,", "y += b [ i + 1 ] if y == x :", "-80, -74, -64, -56, -52, -32, -30, -24, -12, -12, -8, -2, 4,", "68, 83, 85, 89, 89, 90, 91, 93, 96, 97, 99],29,), ([-24, 70,", "62, 70, 80, 80, 80, 82, 86, 90, 92, 92, 95],[3, 15, 16,", "] == x : continue y = a [ i ] + b", "-14, -14, 98, 2, 98, -28],[-26, 36, 48, 48, -38, -86, 90, -62,", "54, 60, 64, 74, 80, 88, 90, 92, 92],22,), ([0, 1, 1, 0,", "82, -66, -40, 52, -78, 94, -70, -80, -68, -58, -26, 50, -78,", "-2, 8, -94, 92, -44, -66, -30],34,), ([0, 0, 0, 0, 0, 0,", "-22, 14, 0, 76, 72, 96, -28, -24, 52, -74, -30, 16, 66],18,),", "1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0,", "if a [ i ] + b [ i ] == x :", "continue if a [ i ] + b [ i ] == x", "1, 1, 0, 0, 0, 0, 0, 1, 1],20,), ([59, 61, 64],[22, 59,", "1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0,", "+ 1 ] b [ i + 1 ] = 0 continue return", "-78, -90, -48, -28, 48, 56, 50, 72, -22, -2, 8, -94, 92,", "if a [ i ] > x : return - 1 if i", "a [ i ] + b [ i ] == x : a", "y == x : a [ i ] = y b [ i", "56, 58, 60, 62, 70, 80, 80, 80, 82, 86, 90, 92, 92,", "50, 72, -22, -2, 8, -94, 92, -44, -66, -30],34,), ([0, 0, 0,", "85, 89, 89, 90, 91, 93, 96, 97, 99],29,), ([-24, 70, -74, -90,", "1, 1, 1, 1, 1, 1, 1, 1, 1],34,), ([72, 97, 79, 21,", "+= a [ i ] + b [ i ] if n ==", "0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0,", "a [ i ] > x : return - 1 if i >", "b [ i ] if n == 1 : return a [ 0", "1 < n and a [ i ] + b [ i +", "8, -94, 92, -44, -66, -30],34,), ([0, 0, 0, 0, 0, 0, 0,", "0, 1, 1],20,), ([59, 61, 64],[22, 59, 85],1,), ([98, 92, 28, 42, -74,", "+ b [ i ] if n == 1 : return a [", "[ i + 1 ] = 0 continue return - 1 for i", "the license found in the # LICENSE file in the root directory of", "-68, 42, 0, 98, -70, -14, -32, 6, 74, 64, -78, 86, -42,", "34, 57, 17, 88, 52],9,) ] n_success = 0 for i, parameters_set in", "32, -22, -70, -22, -56, 74, 6, 6, -62, 46, 34, 2],[-62, -84,", "in range ( 0 , n ) : if a [ i ]", "n == 1 : return a [ 0 ] + b [ 0", "18, 20, 23, 24, 25, 25, 26, 29, 30, 35, 40, 41, 43,", "+= b [ i - 1 ] b [ i - 1 ]", "1 < n : b [ i + 1 ] = 0 continue", "1],[1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1,", "25, 16, 42, 55, 61, 69, 68, 95, 28, 40, 90, 1, 86,", "0 for i in range ( 0 , n ) : s +=", "1, 1, 1],13,), ([98, 18, 50, 36, 88, 75, 2, 40, 74, 19,", "-22, -14, -14, 98, 2, 98, -28],[-26, 36, 48, 48, -38, -86, 90,", "-12, -8, -2, 4, 8, 16, 20, 24, 24, 24, 48, 50, 54,", "0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1,", "range ( 0 , n ) : if a [ i ] >", "1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 1,", ": a [ i ] = y b [ i ] = 0", "[ i ] += b [ i + 1 ] b [ i", "16, 18, 20, 23, 24, 25, 25, 26, 29, 30, 35, 40, 41,", "n : y += b [ i + 1 ] if y ==", "0 , n ) : if a [ i ] > x :", "this source tree. # def f_gold ( a , b , n )", "i ] if i + 1 < n : y += b [", "40, -8, 32, -22, -70, -22, -56, 74, 6, 6, -62, 46, 34,", "directory of this source tree. # def f_gold ( a , b ,", "0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],13,), ([98, 18,", "-62, 46, 34, 2],[-62, -84, 72, 60, 10, -18, -44, -22, 14, 0,", "[ i + 1 ] = 0 continue if a [ i ]", "1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,", "1, 1, 1, 1, 1, 1, 1, 1],34,), ([72, 97, 79, 21, 83,", "([98, 92, 28, 42, -74, -36, 40, -8, 32, -22, -70, -22, -56,", "if y == x : a [ i ] = y b [", "if b [ i ] != 0 : return - 1 return x", ": y += b [ i + 1 ] if y == x", "1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],34,), ([72,", "52, -78, 94, -70, -80, -68, -58, -26, 50, -78, -90, -48, -28,", "79, 21, 83, 2, 31, 59, 6, 11, 79, 97],[27, 71, 87, 36,", "i + 1 ] if y == x : a [ i ]", "under the license found in the # LICENSE file in the root directory", "[ i ] + b [ i + 1 ] == x :", "[ i + 1 ] == x : a [ i ] +=", "-22, -70, -22, -56, 74, 6, 6, -62, 46, 34, 2],[-62, -84, 72,", "< n and a [ i ] + b [ i + 1", "-22, -2, 8, -94, 92, -44, -66, -30],34,), ([0, 0, 0, 0, 0,", "- 1 x = s // n for i in range ( 0", "1, 1, 0, 0, 1],[1, 1, 1, 0, 1, 1, 0, 0, 0,", "20, 23, 24, 25, 25, 26, 29, 30, 35, 40, 41, 43, 44,", "42, 0, 98, -70, -14, -32, 6, 74, 64, -78, 86, -42, -56,", "] if y == x : a [ i ] = y b", "- 1 for i in range ( 0 , n ) : if", "([-24, 70, -74, -90, 72, 50, -94, 86, -58, -68, 42, 0, 98,", "57, 17, 88, 52],9,) ] n_success = 0 for i, parameters_set in enumerate(param):", "26, 30, 32, 32, 35, 37, 41, 42, 43, 48, 49, 49, 54,", "99],29,), ([-24, 70, -74, -90, 72, 50, -94, 86, -58, -68, 42, 0,", "i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print(\"#Results: %i, %i\" %", "1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0,", "1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1,", "1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1,", "-14, 98, 2, 98, -28],[-26, 36, 48, 48, -38, -86, 90, -62, 30,", "1, 1, 1, 1, 1],34,), ([72, 97, 79, 21, 83, 2, 31, 59,", "52, 70, 86, 86, 88, 90],[-96, -94, -80, -74, -64, -56, -52, -32,", "18, 50, 36, 88, 75, 2, 40, 74, 19, 63, 82, 77, 5,", "b [ i + 1 ] == x : a [ i ]", "Facebook, Inc. # All rights reserved. # # This source code is licensed", "i ] = 0 if i + 1 < n : b [", "0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,", "16, 32, -6, 58, 82, -66, -40, 52, -78, 94, -70, -80, -68,", "1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0,", "48, 48, -38, -86, 90, -62, 30, -4, 82, 16, 32, -6, 58,", "i ] + b [ i ] if n == 1 : return", "[ i ] += b [ i ] b [ i ] =", "83, 85, 89, 89, 90, 91, 93, 96, 97, 99],29,), ([-24, 70, -74,", "+ 1 < n : b [ i + 1 ] = 0", "55, 61, 69, 68, 95, 28, 40, 90, 1, 86, 76, 40, 13,", "([-80, -64, -64, -64, -64, -62, -54, -48, -44, -44, -38, -30, -30,", "-42, -56, 2, -34, -46, 70, -62, 50, -58, -58, 42, 86, 96,", "90, -62, 30, -4, 82, 16, 32, -6, 58, 82, -66, -40, 52,", "This source code is licensed under the license found in the # LICENSE", "i ] += b [ i - 1 ] b [ i -", "1, 1, 1, 1, 1],13,), ([98, 18, 50, 36, 88, 75, 2, 40,", "70, -62, 50, -58, -58, 42, 86, 96, -8, 8, -22, -14, -14,", "59, 97, 70, 50, 71, 90, 90, 61, 63, 99],[93, 25, 16, 42,", "1, 1],20,), ([59, 61, 64],[22, 59, 85],1,), ([98, 92, 28, 42, -74, -36,", "1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],34,), ([72, 97,", "i ] + b [ i ] == x : a [ i", "83, 2, 31, 59, 6, 11, 79, 97],[27, 71, 87, 36, 73, 37,", "63, 82, 77, 5, 59, 97, 70, 50, 71, 90, 90, 61, 63,", "x : a [ i ] += b [ i + 1 ]", "y b [ i ] = 0 if i + 1 < n", "-58, -26, 50, -78, -90, -48, -28, 48, 56, 50, 72, -22, -2,", "+ b [ 0 ] if s % n != 0 : return", "# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This", "+ 1 ] = 0 continue if a [ i ] + b", "a [ i ] + b [ i ] if n == 1", "4, 64, 54, 84, 45],16,), ([-80, -64, -64, -64, -64, -62, -54, -48,", "30, 32, 32, 35, 37, 41, 42, 43, 48, 49, 49, 54, 55,", "90, 61, 63, 99],[93, 25, 16, 42, 55, 61, 69, 68, 95, 28,", "# All rights reserved. # # This source code is licensed under the", "b [ i - 1 ] b [ i - 1 ] =", "if s % n != 0 : return - 1 x = s", "74, 80, 88, 90, 92, 92],22,), ([0, 1, 1, 0, 0, 0, 0,", "s // n for i in range ( 0 , n ) :", "i ] == x : continue y = a [ i ] +", "0 continue if a [ i ] + b [ i ] ==", "48, 50, 54, 60, 64, 74, 80, 88, 90, 92, 92],22,), ([0, 1,", "37, 41, 42, 43, 48, 49, 49, 54, 55, 57, 65, 66, 67,", "parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print(\"#Results: %i, %i\" % (n_success,", "1, 1],13,), ([98, 18, 50, 36, 88, 75, 2, 40, 74, 19, 63,", "continue y = a [ i ] + b [ i ] if", "0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,", "] n_success = 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set):", "0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,", "1, 1, 1, 0, 0, 1, 1, 0, 0, 1],[1, 1, 1, 0,", "i ] == x : a [ i ] += b [ i", "74, 6, 6, -62, 46, 34, 2],[-62, -84, 72, 60, 10, -18, -44,", "40, 90, 1, 86, 76, 40, 13, 47, 71, 4, 64, 54, 84,", "= y b [ i ] = 0 if i + 1 <", "tree. # def f_gold ( a , b , n ) : s", "76, 72, 96, -28, -24, 52, -74, -30, 16, 66],18,), ([0, 0, 0,", "0 ] if s % n != 0 : return - 1 x", "16, 18, 26, 30, 32, 32, 35, 37, 41, 42, 43, 48, 49,", "__name__ == '__main__': param = [ ([4, 9, 16, 18, 20, 23, 24,", "1, 1, 0, 0, 1, 1, 0, 0, 1],[1, 1, 1, 0, 1,", "71, 90, 90, 61, 63, 99],[93, 25, 16, 42, 55, 61, 69, 68,", ") : s += a [ i ] + b [ i ]", "92],22,), ([0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1,", "97, 70, 50, 71, 90, 90, 61, 63, 99],[93, 25, 16, 42, 55,", "b [ i ] = 0 if i + 1 < n :", "] b [ i - 1 ] = 0 if a [ i", "50, -78, -90, -48, -28, 48, 56, 50, 72, -22, -2, 8, -94,", "-30, -30, -26, -14, -12, -10, -6, -6, 6, 22, 22, 22, 26,", "0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0,", "-74, -30, 16, 66],18,), ([0, 0, 0, 0, 0, 0, 0, 0, 0,", "rights reserved. # # This source code is licensed under the license found", "65, 66, 67, 67, 68, 83, 85, 89, 89, 90, 91, 93, 96,", "88, 90, 92, 92],22,), ([0, 1, 1, 0, 0, 0, 0, 1, 1,", "0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1],[1,", "+ b [ i ] == x : a [ i ] +=", "42, -74, -36, 40, -8, 32, -22, -70, -22, -56, 74, 6, 6,", "+ 1 < n : y += b [ i + 1 ]", ": a [ i ] += b [ i + 1 ] b", "73, 37, 80, 34, 57, 17, 88, 52],9,) ] n_success = 0 for", "n != 0 : return - 1 x = s // n for", "56, 50, 72, -22, -2, 8, -94, 92, -44, -66, -30],34,), ([0, 0,", "] = 0 if i + 1 < n : b [ i", "1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,", "] + b [ i + 1 ] == x : a [", "0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1,", "-6, -6, 6, 22, 22, 22, 26, 28, 50, 52, 70, 86, 86,", "48, 49, 49, 54, 55, 57, 65, 66, 67, 67, 68, 83, 85,", "0 : return - 1 return x #TOFILL if __name__ == '__main__': param", "s += a [ i ] + b [ i ] if n", "0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0,", "96, -28, -24, 52, -74, -30, 16, 66],18,), ([0, 0, 0, 0, 0,", "91, 93, 96, 97, 99],29,), ([-24, 70, -74, -90, 72, 50, -94, 86,", "16, 42, 55, 61, 69, 68, 95, 28, 40, 90, 1, 86, 76,", "36, 48, 48, -38, -86, 90, -62, 30, -4, 82, 16, 32, -6,", "-86, 90, -62, 30, -4, 82, 16, 32, -6, 58, 82, -66, -40,", "40, 13, 47, 71, 4, 64, 54, 84, 45],16,), ([-80, -64, -64, -64,", "0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,", "43, 48, 49, 49, 54, 55, 57, 65, 66, 67, 67, 68, 83,", "50, 52, 70, 86, 86, 88, 90],[-96, -94, -80, -74, -64, -56, -52,", "82, 86, 90, 92, 92, 95],[3, 15, 16, 16, 18, 26, 30, 32,", "continue if i + 1 < n and a [ i ] +", "f_gold ( a , b , n ) : s = 0 for", "32, 35, 37, 41, 42, 43, 48, 49, 49, 54, 55, 57, 65,", "-8, 8, -22, -14, -14, 98, 2, 98, -28],[-26, 36, 48, 48, -38,", "0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0,", "-44, -22, 14, 0, 76, 72, 96, -28, -24, 52, -74, -30, 16,", "18, 26, 30, 32, 32, 35, 37, 41, 42, 43, 48, 49, 49,", "b [ i ] == x : a [ i ] += b", "] + b [ 0 ] if s % n != 0 :", "b [ 0 ] if s % n != 0 : return -", "90, 90, 61, 63, 99],[93, 25, 16, 42, 55, 61, 69, 68, 95,", "0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print(\"#Results: %i,", "] if s % n != 0 : return - 1 x =", "[ i ] != 0 : return - 1 return x #TOFILL if", "-8, -2, 4, 8, 16, 20, 24, 24, 24, 48, 50, 54, 60,", "i + 1 ] b [ i + 1 ] = 0 continue", "x : return - 1 if i > 0 : a [ i", "66, 67, 67, 68, 83, 85, 89, 89, 90, 91, 93, 96, 97,", "[ i - 1 ] = 0 if a [ i ] ==", "i ] != 0 : return - 1 return x #TOFILL if __name__", "85],1,), ([98, 92, 28, 42, -74, -36, 40, -8, 32, -22, -70, -22,", ", n ) : if a [ i ] > x : return", "74, 64, -78, 86, -42, -56, 2, -34, -46, 70, -62, 50, -58,", "return - 1 x = s // n for i in range (", "b , n ) : s = 0 for i in range (", "#TOFILL if __name__ == '__main__': param = [ ([4, 9, 16, 18, 20,", "-56, 74, 6, 6, -62, 46, 34, 2],[-62, -84, 72, 60, 10, -18,", "( 0 , n ) : s += a [ i ] +", "2, 98, -28],[-26, 36, 48, 48, -38, -86, 90, -62, 30, -4, 82,", "return - 1 return x #TOFILL if __name__ == '__main__': param = [", "28, 40, 90, 1, 86, 76, 40, 13, 47, 71, 4, 64, 54,", "] = 0 continue if a [ i ] + b [ i", "0, 0, 1, 1, 1, 1, 1, 1, 1, 1],13,), ([98, 18, 50,", "-74, -36, 40, -8, 32, -22, -70, -22, -56, 74, 6, 6, -62,", "0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,", "0, 0, 0, 1, 1],20,), ([59, 61, 64],[22, 59, 85],1,), ([98, 92, 28,", "-58, -58, 42, 86, 96, -8, 8, -22, -14, -14, 98, 2, 98,", "52, -74, -30, 16, 66],18,), ([0, 0, 0, 0, 0, 0, 0, 0,", "= [ ([4, 9, 16, 18, 20, 23, 24, 25, 25, 26, 29,", "i + 1 < n : y += b [ i + 1", "1 ] == x : a [ i ] += b [ i", "1, 1, 1],34,), ([72, 97, 79, 21, 83, 2, 31, 59, 6, 11,", "69, 68, 95, 28, 40, 90, 1, 86, 76, 40, 13, 47, 71,", ": return - 1 x = s // n for i in range", "0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0,", "-12, -10, -6, -6, 6, 22, 22, 22, 26, 28, 50, 52, 70,", "for i in range ( 0 , n ) : if a [", "1, 1, 1, 1, 1, 1],34,), ([72, 97, 79, 21, 83, 2, 31,", "1 x = s // n for i in range ( 0 ,", "return - 1 for i in range ( 0 , n ) :", "-52, -32, -30, -24, -12, -12, -8, -2, 4, 8, 16, 20, 24,", "86, 90, 92, 92, 95],[3, 15, 16, 16, 18, 26, 30, 32, 32,", "[ i ] + b [ i ] if i + 1 <", "-58, 42, 86, 96, -8, 8, -22, -14, -14, 98, 2, 98, -28],[-26,", "([0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0,", "x : continue y = a [ i ] + b [ i", "58, 82, -66, -40, 52, -78, 94, -70, -80, -68, -58, -26, 50,", ": if b [ i ] != 0 : return - 1 return", "Inc. # All rights reserved. # # This source code is licensed under", "89, 90, 91, 93, 96, 97, 99],29,), ([-24, 70, -74, -90, 72, 50,", "86, -42, -56, 2, -34, -46, 70, -62, 50, -58, -58, 42, 86,", "1 ] = 0 continue if a [ i ] + b [", "54, 55, 57, 65, 66, 67, 67, 68, 83, 85, 89, 89, 90,", "1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0,", "-70, -14, -32, 6, 74, 64, -78, 86, -42, -56, 2, -34, -46,", "] + b [ i ] if i + 1 < n :", "1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1],20,), ([59,", "0, 0, 1],[1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0,", "1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0,", "[ i ] == x : a [ i ] += b [", "49, 54, 55, 57, 65, 66, 67, 67, 68, 83, 85, 89, 89,", "0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0,", "-28],[-26, 36, 48, 48, -38, -86, 90, -62, 30, -4, 82, 16, 32,", "99],[93, 25, 16, 42, 55, 61, 69, 68, 95, 28, 40, 90, 1,", "29, 30, 35, 40, 41, 43, 44, 46, 53, 53, 56, 56, 58,", "0, 0, 1, 1],20,), ([59, 61, 64],[22, 59, 85],1,), ([98, 92, 28, 42,", "0, 0, 0, 0, 1, 1],20,), ([59, 61, 64],[22, 59, 85],1,), ([98, 92,", "40, 41, 43, 44, 46, 53, 53, 56, 56, 58, 60, 62, 70,", "if i > 0 : a [ i ] += b [ i", "1, 0, 0, 1],[1, 1, 1, 0, 1, 1, 0, 0, 0, 1,", "-40, 52, -78, 94, -70, -80, -68, -58, -26, 50, -78, -90, -48,", "1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0,", "20, 24, 24, 24, 48, 50, 54, 60, 64, 74, 80, 88, 90,", "68, 95, 28, 40, 90, 1, 86, 76, 40, 13, 47, 71, 4,", "24, 48, 50, 54, 60, 64, 74, 80, 88, 90, 92, 92],22,), ([0,", "72, 50, -94, 86, -58, -68, 42, 0, 98, -70, -14, -32, 6,", "72, 96, -28, -24, 52, -74, -30, 16, 66],18,), ([0, 0, 0, 0,", "] = y b [ i ] = 0 if i + 1", "for i in range ( 0 , n ) : if b [", "of this source tree. # def f_gold ( a , b , n", "# LICENSE file in the root directory of this source tree. # def", "i ] + b [ i ] if i + 1 < n", "return a [ 0 ] + b [ 0 ] if s %", "25, 25, 26, 29, 30, 35, 40, 41, 43, 44, 46, 53, 53,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,", "+ b [ i ] if i + 1 < n : y", "= 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print(\"#Results:", "-64, -56, -52, -32, -30, -24, -12, -12, -8, -2, 4, 8, 16,", "0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1,", "] b [ i + 1 ] = 0 continue return - 1", "23, 24, 25, 25, 26, 29, 30, 35, 40, 41, 43, 44, 46,", "1, 1, 1, 1, 1, 1],13,), ([98, 18, 50, 36, 88, 75, 2,", "(c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code", "1 return x #TOFILL if __name__ == '__main__': param = [ ([4, 9,", "71, 4, 64, 54, 84, 45],16,), ([-80, -64, -64, -64, -64, -62, -54,", "1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0,", "i - 1 ] b [ i - 1 ] = 0 if", "50, 71, 90, 90, 61, 63, 99],[93, 25, 16, 42, 55, 61, 69,", "1 : return a [ 0 ] + b [ 0 ] if", "n and a [ i ] + b [ i + 1 ]", "-74, -90, 72, 50, -94, 86, -58, -68, 42, 0, 98, -70, -14,", "31, 59, 6, 11, 79, 97],[27, 71, 87, 36, 73, 37, 80, 34,", "50, 36, 88, 75, 2, 40, 74, 19, 63, 82, 77, 5, 59,", "( 0 , n ) : if a [ i ] > x", "1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0,", "# def f_gold ( a , b , n ) : s =", "16, 20, 24, 24, 24, 48, 50, 54, 60, 64, 74, 80, 88,", "[ 0 ] if s % n != 0 : return - 1", "the root directory of this source tree. # def f_gold ( a ,", "] if n == 1 : return a [ 0 ] + b", "s = 0 for i in range ( 0 , n ) :", "# # This source code is licensed under the license found in the", "17, 88, 52],9,) ] n_success = 0 for i, parameters_set in enumerate(param): if", "-84, 72, 60, 10, -18, -44, -22, 14, 0, 76, 72, 96, -28,", "46, 53, 53, 56, 56, 58, 60, 62, 70, 80, 80, 80, 82,", "1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "1, 0, 0, 1, 1, 0, 0, 1],[1, 1, 1, 0, 1, 1,", "6, 22, 22, 22, 26, 28, 50, 52, 70, 86, 86, 88, 90],[-96,", "license found in the # LICENSE file in the root directory of this", "64, 54, 84, 45],16,), ([-80, -64, -64, -64, -64, -62, -54, -48, -44,", "= 0 continue if a [ i ] + b [ i ]", "41, 43, 44, 46, 53, 53, 56, 56, 58, 60, 62, 70, 80,", "0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1,", ": continue y = a [ i ] + b [ i ]", "95],[3, 15, 16, 16, 18, 26, 30, 32, 32, 35, 37, 41, 42,", "-44, -66, -30],34,), ([0, 0, 0, 0, 0, 0, 0, 0, 1, 1,", "0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,", "45],16,), ([-80, -64, -64, -64, -64, -62, -54, -48, -44, -44, -38, -30,", ") : if a [ i ] > x : return - 1", "40, 74, 19, 63, 82, 77, 5, 59, 97, 70, 50, 71, 90,", "1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0,", "// n for i in range ( 0 , n ) : if", "for i in range ( 0 , n ) : s += a", "found in the # LICENSE file in the root directory of this source", "+ 1 ] = 0 continue return - 1 for i in range", "-22, -56, 74, 6, 6, -62, 46, 34, 2],[-62, -84, 72, 60, 10,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,", "b [ i + 1 ] = 0 continue if a [ i", "return - 1 if i > 0 : a [ i ] +=", "All rights reserved. # # This source code is licensed under the license", "26, 28, 50, 52, 70, 86, 86, 88, 90],[-96, -94, -80, -74, -64,", "-94, 92, -44, -66, -30],34,), ([0, 0, 0, 0, 0, 0, 0, 0,", "([59, 61, 64],[22, 59, 85],1,), ([98, 92, 28, 42, -74, -36, 40, -8,", "n ) : s = 0 for i in range ( 0 ,", "a , b , n ) : s = 0 for i in", "> x : return - 1 if i > 0 : a [", "-10, -6, -6, 6, 22, 22, 22, 26, 28, 50, 52, 70, 86,", "+ 1 < n and a [ i ] + b [ i", "'__main__': param = [ ([4, 9, 16, 18, 20, 23, 24, 25, 25,", "-68, -58, -26, 50, -78, -90, -48, -28, 48, 56, 50, 72, -22,", "for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print(\"#Results: %i, %i\"", "1, 1, 1, 1, 1, 1, 1],34,), ([72, 97, 79, 21, 83, 2,", "61, 64],[22, 59, 85],1,), ([98, 92, 28, 42, -74, -36, 40, -8, 32,", "-78, 94, -70, -80, -68, -58, -26, 50, -78, -90, -48, -28, 48,", "-94, -80, -74, -64, -56, -52, -32, -30, -24, -12, -12, -8, -2,", "1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0,", "0 : a [ i ] += b [ i - 1 ]", "[ i ] == x : continue y = a [ i ]", "-28, 48, 56, 50, 72, -22, -2, 8, -94, 92, -44, -66, -30],34,),", "] == x : a [ i ] += b [ i +", "-30],34,), ([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,", "1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1,", "-64, -64, -62, -54, -48, -44, -44, -38, -30, -30, -26, -14, -12,", "-44, -38, -30, -30, -26, -14, -12, -10, -6, -6, 6, 22, 22,", "0, 98, -70, -14, -32, 6, 74, 64, -78, 86, -42, -56, 2,", "i + 1 ] = 0 continue if a [ i ] +", "90, 92, 92, 95],[3, 15, 16, 16, 18, 26, 30, 32, 32, 35,", "41, 42, 43, 48, 49, 49, 54, 55, 57, 65, 66, 67, 67,", "x : a [ i ] += b [ i ] b [", "= 0 continue if i + 1 < n and a [ i", "98, 2, 98, -28],[-26, 36, 48, 48, -38, -86, 90, -62, 30, -4,", "0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0,", "b [ i ] if i + 1 < n : y +=", "10, -18, -44, -22, 14, 0, 76, 72, 96, -28, -24, 52, -74,", "i ] = y b [ i ] = 0 if i +", "1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "-70, -22, -56, 74, 6, 6, -62, 46, 34, 2],[-62, -84, 72, 60,", "] += b [ i + 1 ] b [ i + 1", "11, 79, 97],[27, 71, 87, 36, 73, 37, 80, 34, 57, 17, 88,", "] + b [ i ] if n == 1 : return a", "i > 0 : a [ i ] += b [ i -", "b [ i + 1 ] b [ i + 1 ] =", "36, 73, 37, 80, 34, 57, 17, 88, 52],9,) ] n_success = 0", "88, 90],[-96, -94, -80, -74, -64, -56, -52, -32, -30, -24, -12, -12,", "i in range ( 0 , n ) : s += a [", "24, 25, 25, 26, 29, 30, 35, 40, 41, 43, 44, 46, 53,", "1, 1],34,), ([72, 97, 79, 21, 83, 2, 31, 59, 6, 11, 79,", "74, 19, 63, 82, 77, 5, 59, 97, 70, 50, 71, 90, 90,", "in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print(\"#Results: %i, %i\" % (n_success, len(param)))", "a [ i ] += b [ i + 1 ] b [", "67, 68, 83, 85, 89, 89, 90, 91, 93, 96, 97, 99],29,), ([-24,", "67, 67, 68, 83, 85, 89, 89, 90, 91, 93, 96, 97, 99],29,),", "1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0,", "-24, 52, -74, -30, 16, 66],18,), ([0, 0, 0, 0, 0, 0, 0,", "s % n != 0 : return - 1 x = s //", "b [ i ] = 0 continue if i + 1 < n", "19, 63, 82, 77, 5, 59, 97, 70, 50, 71, 90, 90, 61,", "-14, -12, -10, -6, -6, 6, 22, 22, 22, 26, 28, 50, 52,", "6, -62, 46, 34, 2],[-62, -84, 72, 60, 10, -18, -44, -22, 14,", "70, -74, -90, 72, 50, -94, 86, -58, -68, 42, 0, 98, -70,", "80, 88, 90, 92, 92],22,), ([0, 1, 1, 0, 0, 0, 0, 1,", "([98, 18, 50, 36, 88, 75, 2, 40, 74, 19, 63, 82, 77,", "42, 43, 48, 49, 49, 54, 55, 57, 65, 66, 67, 67, 68,", "-12, -12, -8, -2, 4, 8, 16, 20, 24, 24, 24, 48, 50,", "0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1],[1, 1,", "0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0,", "-78, 86, -42, -56, 2, -34, -46, 70, -62, 50, -58, -58, 42,", "97, 79, 21, 83, 2, 31, 59, 6, 11, 79, 97],[27, 71, 87,", "86, 86, 88, 90],[-96, -94, -80, -74, -64, -56, -52, -32, -30, -24,", "a [ i ] += b [ i - 1 ] b [", "i ] + b [ i + 1 ] == x : a", "a [ i ] == x : continue y = a [ i", "- 1 return x #TOFILL if __name__ == '__main__': param = [ ([4,", "22, 26, 28, 50, 52, 70, 86, 86, 88, 90],[-96, -94, -80, -74,", "] = 0 continue if i + 1 < n and a [", "96, 97, 99],29,), ([-24, 70, -74, -90, 72, 50, -94, 86, -58, -68,", "i in range ( 0 , n ) : if b [ i", "76, 40, 13, 47, 71, 4, 64, 54, 84, 45],16,), ([-80, -64, -64,", "reserved. # # This source code is licensed under the license found in", "1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0,", "if i + 1 < n : b [ i + 1 ]", "2019-present, Facebook, Inc. # All rights reserved. # # This source code is", "86, 76, 40, 13, 47, 71, 4, 64, 54, 84, 45],16,), ([-80, -64,", "a [ i ] += b [ i ] b [ i ]", "+= b [ i ] b [ i ] = 0 continue if", "!= 0 : return - 1 return x #TOFILL if __name__ == '__main__':", "if __name__ == '__main__': param = [ ([4, 9, 16, 18, 20, 23,", ") : s = 0 for i in range ( 0 , n", "( a , b , n ) : s = 0 for i", "def f_gold ( a , b , n ) : s = 0", "21, 83, 2, 31, 59, 6, 11, 79, 97],[27, 71, 87, 36, 73,", "root directory of this source tree. # def f_gold ( a , b", "+= b [ i + 1 ] if y == x : a", "[ i ] if n == 1 : return a [ 0 ]", "in the root directory of this source tree. # def f_gold ( a", "55, 57, 65, 66, 67, 67, 68, 83, 85, 89, 89, 90, 91,", "35, 37, 41, 42, 43, 48, 49, 49, 54, 55, 57, 65, 66,", "96, -8, 8, -22, -14, -14, 98, 2, 98, -28],[-26, 36, 48, 48,", "-90, -48, -28, 48, 56, 50, 72, -22, -2, 8, -94, 92, -44,", "-26, -14, -12, -10, -6, -6, 6, 22, 22, 22, 26, 28, 50,", "2, 40, 74, 19, 63, 82, 77, 5, 59, 97, 70, 50, 71,", ", n ) : if b [ i ] != 0 : return", "in range ( 0 , n ) : if b [ i ]", "-56, 2, -34, -46, 70, -62, 50, -58, -58, 42, 86, 96, -8,", "66],18,), ([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "code is licensed under the license found in the # LICENSE file in", "97],[27, 71, 87, 36, 73, 37, 80, 34, 57, 17, 88, 52],9,) ]", "] + b [ i ] == x : a [ i ]", "] if i + 1 < n : y += b [ i", "90, 91, 93, 96, 97, 99],29,), ([-24, 70, -74, -90, 72, 50, -94,", "-8, 32, -22, -70, -22, -56, 74, 6, 6, -62, 46, 34, 2],[-62,", "98, -28],[-26, 36, 48, 48, -38, -86, 90, -62, 30, -4, 82, 16,", "b [ i ] != 0 : return - 1 return x #TOFILL", "0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],13,),", "if a [ i ] == x : continue y = a [", "] b [ i ] = 0 continue if i + 1 <", "1, 86, 76, 40, 13, 47, 71, 4, 64, 54, 84, 45],16,), ([-80,", "i ] if n == 1 : return a [ 0 ] +", "1 ] b [ i + 1 ] = 0 continue return -", "i ] > x : return - 1 if i > 0 :", "90],[-96, -94, -80, -74, -64, -56, -52, -32, -30, -24, -12, -12, -8,", "Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source", "60, 62, 70, 80, 80, 80, 82, 86, 90, 92, 92, 95],[3, 15,", "source tree. # def f_gold ( a , b , n ) :", "i + 1 < n : b [ i + 1 ] =", "-38, -86, 90, -62, 30, -4, 82, 16, 32, -6, 58, 82, -66,", "[ i ] = y b [ i ] = 0 if i", "0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1,", "0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1],[1, 1, 1,", "b [ i + 1 ] = 0 continue return - 1 for", "LICENSE file in the root directory of this source tree. # def f_gold", "- 1 ] b [ i - 1 ] = 0 if a", "1],13,), ([98, 18, 50, 36, 88, 75, 2, 40, 74, 19, 63, 82,", "] > x : return - 1 if i > 0 : a", "22, 22, 26, 28, 50, 52, 70, 86, 86, 88, 90],[-96, -94, -80,", "[ i ] + b [ i ] == x : a [", "[ i ] += b [ i - 1 ] b [ i", "-74, -64, -56, -52, -32, -30, -24, -12, -12, -8, -2, 4, 8,", "57, 65, 66, 67, 67, 68, 83, 85, 89, 89, 90, 91, 93,", "-48, -44, -44, -38, -30, -30, -26, -14, -12, -10, -6, -6, 6,", "1, 1, 1, 1, 1, 1, 1, 1],13,), ([98, 18, 50, 36, 88,", "0 continue if i + 1 < n and a [ i ]", "i in range ( 0 , n ) : if a [ i", "2, 31, 59, 6, 11, 79, 97],[27, 71, 87, 36, 73, 37, 80,", "n : b [ i + 1 ] = 0 continue if a", "= 0 if a [ i ] == x : continue y =", "[ i ] = 0 continue if i + 1 < n and", "-36, 40, -8, 32, -22, -70, -22, -56, 74, 6, 6, -62, 46,", "0 : return - 1 x = s // n for i in", "1 if i > 0 : a [ i ] += b [", "0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1,", ", n ) : s += a [ i ] + b [", "== x : a [ i ] += b [ i + 1", "-6, 58, 82, -66, -40, 52, -78, 94, -70, -80, -68, -58, -26,", "86, -58, -68, 42, 0, 98, -70, -14, -32, 6, 74, 64, -78,", "70, 86, 86, 88, 90],[-96, -94, -80, -74, -64, -56, -52, -32, -30,", "92, 92, 95],[3, 15, 16, 16, 18, 26, 30, 32, 32, 35, 37,", ": return a [ 0 ] + b [ 0 ] if s", "= 0 if i + 1 < n : b [ i +", "] != 0 : return - 1 return x #TOFILL if __name__ ==", "y = a [ i ] + b [ i ] if i", "30, -4, 82, 16, 32, -6, 58, 82, -66, -40, 52, -78, 94,", "0, 76, 72, 96, -28, -24, 52, -74, -30, 16, 66],18,), ([0, 0,", "1],34,), ([72, 97, 79, 21, 83, 2, 31, 59, 6, 11, 79, 97],[27,", "0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],13,), ([98,", "0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0,", "1, 1, 1, 1],13,), ([98, 18, 50, 36, 88, 75, 2, 40, 74,", "x #TOFILL if __name__ == '__main__': param = [ ([4, 9, 16, 18,", "86, 96, -8, 8, -22, -14, -14, 98, 2, 98, -28],[-26, 36, 48,", "61, 63, 99],[93, 25, 16, 42, 55, 61, 69, 68, 95, 28, 40,", ": return - 1 return x #TOFILL if __name__ == '__main__': param =", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 1],[1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0,", "- 1 ] = 0 if a [ i ] == x :", "+ 1 ] == x : a [ i ] += b [", "in range ( 0 , n ) : s += a [ i", "6, 11, 79, 97],[27, 71, 87, 36, 73, 37, 80, 34, 57, 17,", "-4, 82, 16, 32, -6, 58, 82, -66, -40, 52, -78, 94, -70,", ": s = 0 for i in range ( 0 , n )", "+ 1 ] if y == x : a [ i ] =", "0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1,", "a [ i ] + b [ i ] if i + 1", "-46, 70, -62, 50, -58, -58, 42, 86, 96, -8, 8, -22, -14,", "[ i ] + b [ i ] if n == 1 :", "30, 35, 40, 41, 43, 44, 46, 53, 53, 56, 56, 58, 60,", "i + 1 ] = 0 continue return - 1 for i in", "1],20,), ([59, 61, 64],[22, 59, 85],1,), ([98, 92, 28, 42, -74, -36, 40,", "28, 50, 52, 70, 86, 86, 88, 90],[-96, -94, -80, -74, -64, -56,", "-66, -40, 52, -78, 94, -70, -80, -68, -58, -26, 50, -78, -90,", "59, 85],1,), ([98, 92, 28, 42, -74, -36, 40, -8, 32, -22, -70,", "1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0,", "source code is licensed under the license found in the # LICENSE file", "70, 50, 71, 90, 90, 61, 63, 99],[93, 25, 16, 42, 55, 61,", "2],[-62, -84, 72, 60, 10, -18, -44, -22, 14, 0, 76, 72, 96,", "77, 5, 59, 97, 70, 50, 71, 90, 90, 61, 63, 99],[93, 25,", "0 ] + b [ 0 ] if s % n != 0", "50, -58, -58, 42, 86, 96, -8, 8, -22, -14, -14, 98, 2,", "1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0,", "0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,", ", n ) : s = 0 for i in range ( 0", "2, -34, -46, 70, -62, 50, -58, -58, 42, 86, 96, -8, 8,", "- 1 if i > 0 : a [ i ] += b", "x : a [ i ] = y b [ i ] =", "param = [ ([4, 9, 16, 18, 20, 23, 24, 25, 25, 26,", "-62, -54, -48, -44, -44, -38, -30, -30, -26, -14, -12, -10, -6,", "([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0,", "9, 16, 18, 20, 23, 24, 25, 25, 26, 29, 30, 35, 40,", "79, 97],[27, 71, 87, 36, 73, 37, 80, 34, 57, 17, 88, 52],9,)", "32, -6, 58, 82, -66, -40, 52, -78, 94, -70, -80, -68, -58,", "60, 10, -18, -44, -22, 14, 0, 76, 72, 96, -28, -24, 52,", "1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", ": return - 1 if i > 0 : a [ i ]", "22, 22, 22, 26, 28, 50, 52, 70, 86, 86, 88, 90],[-96, -94,", "] = 0 if a [ i ] == x : continue y", "n_success = 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1", "[ i - 1 ] b [ i - 1 ] = 0", "90, 92, 92],22,), ([0, 1, 1, 0, 0, 0, 0, 1, 1, 1,", "1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0,", "50, -94, 86, -58, -68, 42, 0, 98, -70, -14, -32, 6, 74,", "= 0 continue return - 1 for i in range ( 0 ,", "-56, -52, -32, -30, -24, -12, -12, -8, -2, 4, 8, 16, 20,", "14, 0, 76, 72, 96, -28, -24, 52, -74, -30, 16, 66],18,), ([0,", "1, 1, 1, 0, 0, 0, 0, 0, 1, 1],20,), ([59, 61, 64],[22,", "82, 77, 5, 59, 97, 70, 50, 71, 90, 90, 61, 63, 99],[93,", "92, 92],22,), ([0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1,", "25, 26, 29, 30, 35, 40, 41, 43, 44, 46, 53, 53, 56,", "1 ] = 0 continue return - 1 for i in range (", "80, 80, 82, 86, 90, 92, 92, 95],[3, 15, 16, 16, 18, 26,", "= s // n for i in range ( 0 , n )", "-30, 16, 66],18,), ([0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "53, 56, 56, 58, 60, 62, 70, 80, 80, 80, 82, 86, 90,", "1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1],20,), ([59, 61,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,", "46, 34, 2],[-62, -84, 72, 60, 10, -18, -44, -22, 14, 0, 76,", "13, 47, 71, 4, 64, 54, 84, 45],16,), ([-80, -64, -64, -64, -64,", "1 for i in range ( 0 , n ) : if b", "-30, -24, -12, -12, -8, -2, 4, 8, 16, 20, 24, 24, 24,", "0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,", "< n : b [ i + 1 ] = 0 continue if", "] += b [ i ] b [ i ] = 0 continue", "64, 74, 80, 88, 90, 92, 92],22,), ([0, 1, 1, 0, 0, 0,", "[ i ] = 0 if i + 1 < n : b", "0 , n ) : s += a [ i ] + b", "0, 1, 1, 1, 1, 1, 1, 1, 1],13,), ([98, 18, 50, 36,", "-24, -12, -12, -8, -2, 4, 8, 16, 20, 24, 24, 24, 48,", "-90, 72, 50, -94, 86, -58, -68, 42, 0, 98, -70, -14, -32,", "] = 0 continue return - 1 for i in range ( 0", "== x : a [ i ] = y b [ i ]", "-6, 6, 22, 22, 22, 26, 28, 50, 52, 70, 86, 86, 88,", "1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0,", "== '__main__': param = [ ([4, 9, 16, 18, 20, 23, 24, 25,", "== x : a [ i ] += b [ i ] b", "52],9,) ] n_success = 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) ==", "([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,", "== x : continue y = a [ i ] + b [", "88, 52],9,) ] n_success = 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set)", "48, -38, -86, 90, -62, 30, -4, 82, 16, 32, -6, 58, 82,", "[ i + 1 ] b [ i + 1 ] = 0", "70, 80, 80, 80, 82, 86, 90, 92, 92, 95],[3, 15, 16, 16,", "n ) : if b [ i ] != 0 : return -", "i + 1 ] == x : a [ i ] += b", "92, -44, -66, -30],34,), ([0, 0, 0, 0, 0, 0, 0, 0, 1,", "5, 59, 97, 70, 50, 71, 90, 90, 61, 63, 99],[93, 25, 16,", "16, 66],18,), ([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "71, 87, 36, 73, 37, 80, 34, 57, 17, 88, 52],9,) ] n_success", "60, 64, 74, 80, 88, 90, 92, 92],22,), ([0, 1, 1, 0, 0,", "1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0,", "1, 1, 1, 1, 1, 1, 1, 1, 1, 1],34,), ([72, 97, 79,", "59, 6, 11, 79, 97],[27, 71, 87, 36, 73, 37, 80, 34, 57,", "1, 1, 1, 1],34,), ([72, 97, 79, 21, 83, 2, 31, 59, 6,", "15, 16, 16, 18, 26, 30, 32, 32, 35, 37, 41, 42, 43,", "a [ i ] = y b [ i ] = 0 if", "80, 80, 80, 82, 86, 90, 92, 92, 95],[3, 15, 16, 16, 18,", "i - 1 ] = 0 if a [ i ] == x", "0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,", "i ] = 0 continue if i + 1 < n and a", "72, 60, 10, -18, -44, -22, 14, 0, 76, 72, 96, -28, -24,", "([4, 9, 16, 18, 20, 23, 24, 25, 25, 26, 29, 30, 35,", "# This source code is licensed under the license found in the #", "[ i + 1 ] if y == x : a [ i", "range ( 0 , n ) : s += a [ i ]", "-14, -32, 6, 74, 64, -78, 86, -42, -56, 2, -34, -46, 70,", "1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],34,),", "1 < n : y += b [ i + 1 ] if", "!= 0 : return - 1 x = s // n for i", "0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,", "8, 16, 20, 24, 24, 24, 48, 50, 54, 60, 64, 74, 80,", "i + 1 < n and a [ i ] + b [", "-44, -44, -38, -30, -30, -26, -14, -12, -10, -6, -6, 6, 22,", "35, 40, 41, 43, 44, 46, 53, 53, 56, 56, 58, 60, 62,", "if i + 1 < n : y += b [ i +", "32, 32, 35, 37, 41, 42, 43, 48, 49, 49, 54, 55, 57,", "return x #TOFILL if __name__ == '__main__': param = [ ([4, 9, 16,", "-32, 6, 74, 64, -78, 86, -42, -56, 2, -34, -46, 70, -62,", "[ 0 ] + b [ 0 ] if s % n !=", "b [ i - 1 ] = 0 if a [ i ]", "= 0 for i in range ( 0 , n ) : s", "16, 16, 18, 26, 30, 32, 32, 35, 37, 41, 42, 43, 48,", "24, 24, 24, 48, 50, 54, 60, 64, 74, 80, 88, 90, 92,", "75, 2, 40, 74, 19, 63, 82, 77, 5, 59, 97, 70, 50,", "0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0,", "+ b [ i + 1 ] == x : a [ i", "89, 89, 90, 91, 93, 96, 97, 99],29,), ([-24, 70, -74, -90, 72,", "97, 99],29,), ([-24, 70, -74, -90, 72, 50, -94, 86, -58, -68, 42,", "b [ i ] b [ i ] = 0 continue if i", "-28, -24, 52, -74, -30, 16, 66],18,), ([0, 0, 0, 0, 0, 0,", "0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1,", "28, 42, -74, -36, 40, -8, 32, -22, -70, -22, -56, 74, 6,", "64, -78, 86, -42, -56, 2, -34, -46, 70, -62, 50, -58, -58,", "> 0 : a [ i ] += b [ i - 1", "([72, 97, 79, 21, 83, 2, 31, 59, 6, 11, 79, 97],[27, 71,", "34, 2],[-62, -84, 72, 60, 10, -18, -44, -22, 14, 0, 76, 72,", "82, 16, 32, -6, 58, 82, -66, -40, 52, -78, 94, -70, -80,", "-62, 50, -58, -58, 42, 86, 96, -8, 8, -22, -14, -14, 98,", "if n == 1 : return a [ 0 ] + b [", "-54, -48, -44, -44, -38, -30, -30, -26, -14, -12, -10, -6, -6,", "53, 53, 56, 56, 58, 60, 62, 70, 80, 80, 80, 82, 86,", "1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,", "( 0 , n ) : if b [ i ] != 0" ]
[]
[ "torch from torchvision import transforms as T from scipy import interpolate from PIL", "shuffle(list_all) num_train = int(split_ratio * len(list_all)) list_train = list_all[:num_train] list_val = list_all[num_train:] train_dataset", "matrix, image_path def __len__(self): \"\"\"Returns the total number of font files.\"\"\" return len(self.image_paths)", "self.GT_paths = GT_path #self.image_paths = list(map(lambda x: os.path.join(root, x), os.listdir(root))) self.image_paths = list_img_path", "of config class LabeledImageFolder(data.Dataset): def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths and preprocessing", "xnew = list(range(xper[0],xper[-1]+1)) ynew = f(xnew) ynew = [int(i) for i in ynew]", "ynew] for n,xn in enumerate(xnew): matrix[xn, ynew[n]] = 1 vector[xn] = ynew[n] Transform", "* len(list_all)) list_train = list_all[:num_train] list_val = list_all[num_train:] train_dataset = LabeledImageFolder(root = image_path,", "Transform = T.Compose(Transform) image_t = Transform(image) Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5,", "<filename>data_loader.py from torch.utils import data import os import torch from torchvision import transforms", "LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True,", "#print(yper.text) yper = yper.text.split(' ') yper = [int(float(i)*224) for i in yper] image", "data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val, image_size =image_size,", "def get_loader(root_path, image_size, batch_size, split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and returns Dataloader.\"\"\"", "def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths and preprocessing module.\"\"\" self.root = root", "# GT : Ground Truth self.GT_paths = GT_path #self.image_paths = list(map(lambda x: os.path.join(root,", "= Norm_(image_t) return image_t, vector, matrix, image_path def __len__(self): \"\"\"Returns the total number", "obj.find('yper') #print(yper.text) yper = yper.text.split(' ') yper = [int(float(i)*224) for i in yper]", "= list_all[:num_train] list_val = list_all[num_train:] train_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train, image_size", "yper = yper.text.split(' ') yper = [int(float(i)*224) for i in yper] image =", "Transform(image) Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) image_t = Norm_(image_t) return", "0.5, 0.5)) image_t = Norm_(image_t) return image_t, vector, matrix, image_path def __len__(self): \"\"\"Returns", "shuffle=True, num_workers=num_workers) val_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) val_loader", "image_size, batch_size, split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and returns Dataloader.\"\"\" image_path =", "len(self.image_paths) def get_loader(root_path, image_size, batch_size, split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and returns", "os.listdir(root))) self.image_paths = list_img_path self.image_size = image_size self.mode = mode self.RotationDegree = [0,90,180,270]", "= [int(float(i)*224) for i in yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size) vector", "int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper') #print(xper.text) xper = xper.text.split('", "+ 'ISIC_' + filename + '_segmentation.png' image = Image.open(image_path) #GT = Image.open(GT_path) annot_fn", "(0.5, 0.5, 0.5)) image_t = Norm_(image_t) return image_t, vector, matrix, image_path def __len__(self):", "and preprocesses it and returns.\"\"\" image_path = self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path =", "self.mode = mode self.RotationDegree = [0,90,180,270] self.augmentation_prob = augmentation_prob print(\"image count in {}", "= image_path, GT_path=GT_path, list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) val_loader = data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)", "= image.width hei = image.height for ix, obj in enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox", "224 \"\"\"Reads an image from a file and preprocesses it and returns.\"\"\" image_path", "interpolate.interp1d(xper, yper) xnew = list(range(xper[0],xper[-1]+1)) ynew = f(xnew) ynew = [int(i) for i", "= [int(i) for i in ynew] for n,xn in enumerate(xnew): matrix[xn, ynew[n]] =", "in {} path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index): #img_size = 224 \"\"\"Reads an image", "tree = ET.parse(annot_fn) objs = tree.findall('object') #img = Image.open(fn) wid = image.width hei", "print(\"image count in {} path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index): #img_size = 224 \"\"\"Reads", "ET ## Config img_size = 256 ## End of config class LabeledImageFolder(data.Dataset): def", "#self.image_paths = list(map(lambda x: os.path.join(root, x), os.listdir(root))) self.image_paths = list_img_path self.image_size = image_size", "ynew[n] Transform = [] Transform.append(T.ToTensor()) Transform = T.Compose(Transform) image_t = Transform(image) Norm_ =", "= xper.text.split(' ') xper = [int(float(i)*224) for i in xper] yper = obj.find('yper')", "image = Image.open(image_path) #GT = Image.open(GT_path) annot_fn = self.GT_paths + filename.split('/')[-1] + '.xml'", "= obj.find('bndbox') x11 = int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text)) y12 =", "image_path = root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/' list_all = list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path)))", "in enumerate(xnew): matrix[xn, ynew[n]] = 1 vector[xn] = ynew[n] Transform = [] Transform.append(T.ToTensor())", "= GT_path #self.image_paths = list(map(lambda x: os.path.join(root, x), os.listdir(root))) self.image_paths = list_img_path self.image_size", "= list_img_path self.image_size = image_size self.mode = mode self.RotationDegree = [0,90,180,270] self.augmentation_prob =", "= Image.open(GT_path) annot_fn = self.GT_paths + filename.split('/')[-1] + '.xml' tree = ET.parse(annot_fn) objs", "module.\"\"\" self.root = root # GT : Ground Truth self.GT_paths = GT_path #self.image_paths", "self.root = root # GT : Ground Truth self.GT_paths = GT_path #self.image_paths =", "\"\"\"Builds and returns Dataloader.\"\"\" image_path = root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/' list_all = list(map(lambda", "batch_size, split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and returns Dataloader.\"\"\" image_path = root_path+'/JPEGImages/'", "from PIL import Image from random import shuffle import xml.etree.ElementTree as ET ##", "= list_all[num_train:] train_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader", "torch.zeros(img_size,img_size) vector = torch.ones(img_size) * (-1) f = interpolate.interp1d(xper, yper) xnew = list(range(xper[0],xper[-1]+1))", "batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob)", "y11 = int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper =", "image from a file and preprocesses it and returns.\"\"\" image_path = self.image_paths[index] filename", "objs = tree.findall('object') #img = Image.open(fn) wid = image.width hei = image.height for", "root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/' list_all = list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all) num_train", "data import os import torch from torchvision import transforms as T from scipy", "#print(xper.text) xper = xper.text.split(' ') xper = [int(float(i)*224) for i in xper] yper", "256 ## End of config class LabeledImageFolder(data.Dataset): def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image", "image_t = Transform(image) Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) image_t =", "0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and returns Dataloader.\"\"\" image_path = root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/'", "image_t = Norm_(image_t) return image_t, vector, matrix, image_path def __len__(self): \"\"\"Returns the total", "image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size) vector = torch.ones(img_size) * (-1) f = interpolate.interp1d(xper, yper)", "= Image.open(fn) wid = image.width hei = image.height for ix, obj in enumerate(objs):", "x: os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all) num_train = int(split_ratio * len(list_all)) list_train = list_all[:num_train]", "= 224 \"\"\"Reads an image from a file and preprocesses it and returns.\"\"\"", "list(range(xper[0],xper[-1]+1)) ynew = f(xnew) ynew = [int(i) for i in ynew] for n,xn", "from torch.utils import data import os import torch from torchvision import transforms as", "## Config img_size = 256 ## End of config class LabeledImageFolder(data.Dataset): def __init__(self,", "= [0,90,180,270] self.augmentation_prob = augmentation_prob print(\"image count in {} path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self,", "= int(split_ratio * len(list_all)) list_train = list_all[:num_train] list_val = list_all[num_train:] train_dataset = LabeledImageFolder(root", "= [int(float(i)*224) for i in xper] yper = obj.find('yper') #print(yper.text) yper = yper.text.split('", "root_path+'/Annotations/' list_all = list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all) num_train = int(split_ratio *", "as T from scipy import interpolate from PIL import Image from random import", "index): #img_size = 224 \"\"\"Reads an image from a file and preprocesses it", "x12 = int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper') #print(xper.text) xper", "list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) val_loader = data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) return train_loader, val_loader", "Config img_size = 256 ## End of config class LabeledImageFolder(data.Dataset): def __init__(self, root,", "= 256 ## End of config class LabeledImageFolder(data.Dataset): def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes", "i in xper] yper = obj.find('yper') #print(yper.text) yper = yper.text.split(' ') yper =", "scipy import interpolate from PIL import Image from random import shuffle import xml.etree.ElementTree", "path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index): #img_size = 224 \"\"\"Reads an image from a", "Image.open(GT_path) annot_fn = self.GT_paths + filename.split('/')[-1] + '.xml' tree = ET.parse(annot_fn) objs =", "image.height for ix, obj in enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox') x11 =", "#GT = Image.open(GT_path) annot_fn = self.GT_paths + filename.split('/')[-1] + '.xml' tree = ET.parse(annot_fn)", "Transform.append(T.ToTensor()) Transform = T.Compose(Transform) image_t = Transform(image) Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5,", "__init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths and preprocessing module.\"\"\" self.root = root #", "preprocessing module.\"\"\" self.root = root # GT : Ground Truth self.GT_paths = GT_path", "self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths + 'ISIC_' + filename + '_segmentation.png'", "list(map(lambda x: os.path.join(root, x), os.listdir(root))) self.image_paths = list_img_path self.image_size = image_size self.mode =", "i in yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size) vector = torch.ones(img_size) *", "'ISIC_' + filename + '_segmentation.png' image = Image.open(image_path) #GT = Image.open(GT_path) annot_fn =", "self.GT_paths + filename.split('/')[-1] + '.xml' tree = ET.parse(annot_fn) objs = tree.findall('object') #img =", "'_segmentation.png' image = Image.open(image_path) #GT = Image.open(GT_path) annot_fn = self.GT_paths + filename.split('/')[-1] +", "shuffle import xml.etree.ElementTree as ET ## Config img_size = 256 ## End of", "list_img_path self.image_size = image_size self.mode = mode self.RotationDegree = [0,90,180,270] self.augmentation_prob = augmentation_prob", "= torch.zeros(img_size,img_size) vector = torch.ones(img_size) * (-1) f = interpolate.interp1d(xper, yper) xnew =", "self.augmentation_prob = augmentation_prob print(\"image count in {} path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index): #img_size", "#GT_path = self.GT_paths + 'ISIC_' + filename + '_segmentation.png' image = Image.open(image_path) #GT", "= data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val, image_size", "End of config class LabeledImageFolder(data.Dataset): def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths and", "= self.GT_paths + filename.split('/')[-1] + '.xml' tree = ET.parse(annot_fn) objs = tree.findall('object') #img", "= augmentation_prob print(\"image count in {} path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index): #img_size =", "obj.find('bndbox') x11 = int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text))", "#img_size = 224 \"\"\"Reads an image from a file and preprocesses it and", "[int(i) for i in ynew] for n,xn in enumerate(xnew): matrix[xn, ynew[n]] = 1", "self.image_paths = list_img_path self.image_size = image_size self.mode = mode self.RotationDegree = [0,90,180,270] self.augmentation_prob", "= int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper') #print(xper.text) xper =", "= obj.find('xper') #print(xper.text) xper = xper.text.split(' ') xper = [int(float(i)*224) for i in", "[0,90,180,270] self.augmentation_prob = augmentation_prob print(\"image count in {} path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index):", "returns.\"\"\" image_path = self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths + 'ISIC_' +", "= root # GT : Ground Truth self.GT_paths = GT_path #self.image_paths = list(map(lambda", "vector, matrix, image_path def __len__(self): \"\"\"Returns the total number of font files.\"\"\" return", "= int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper') #print(xper.text) xper = xper.text.split(' ') xper", "import shuffle import xml.etree.ElementTree as ET ## Config img_size = 256 ## End", "#img = Image.open(fn) wid = image.width hei = image.height for ix, obj in", "os.path.join(root, x), os.listdir(root))) self.image_paths = list_img_path self.image_size = image_size self.mode = mode self.RotationDegree", "') yper = [int(float(i)*224) for i in yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix =", "= obj.find('yper') #print(yper.text) yper = yper.text.split(' ') yper = [int(float(i)*224) for i in", "xper] yper = obj.find('yper') #print(yper.text) yper = yper.text.split(' ') yper = [int(float(i)*224) for", "filename.split('/')[-1] + '.xml' tree = ET.parse(annot_fn) objs = tree.findall('object') #img = Image.open(fn) wid", "[int(float(i)*224) for i in yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size) vector =", "= T.Compose(Transform) image_t = Transform(image) Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))", "Ground Truth self.GT_paths = GT_path #self.image_paths = list(map(lambda x: os.path.join(root, x), os.listdir(root))) self.image_paths", "(-1) f = interpolate.interp1d(xper, yper) xnew = list(range(xper[0],xper[-1]+1)) ynew = f(xnew) ynew =", "= LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) val_loader = data.DataLoader(dataset=val_dataset, batch_size=batch_size,", "vector[xn] = ynew[n] Transform = [] Transform.append(T.ToTensor()) Transform = T.Compose(Transform) image_t = Transform(image)", "= root_path+'/Annotations/' list_all = list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all) num_train = int(split_ratio", "{} path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index): #img_size = 224 \"\"\"Reads an image from", "ET.parse(annot_fn) objs = tree.findall('object') #img = Image.open(fn) wid = image.width hei = image.height", "image.width hei = image.height for ix, obj in enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox =", "root # GT : Ground Truth self.GT_paths = GT_path #self.image_paths = list(map(lambda x:", "get_loader(root_path, image_size, batch_size, split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and returns Dataloader.\"\"\" image_path", "= self.GT_paths + 'ISIC_' + filename + '_segmentation.png' image = Image.open(image_path) #GT =", "= Image.open(image_path) #GT = Image.open(GT_path) annot_fn = self.GT_paths + filename.split('/')[-1] + '.xml' tree", "T.Compose(Transform) image_t = Transform(image) Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) image_t", "as ET ## Config img_size = 256 ## End of config class LabeledImageFolder(data.Dataset):", "augmentation_prob print(\"image count in {} path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index): #img_size = 224", "list_all[num_train:] train_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader =", "= ET.parse(annot_fn) objs = tree.findall('object') #img = Image.open(fn) wid = image.width hei =", "= self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths + 'ISIC_' + filename +", "from torchvision import transforms as T from scipy import interpolate from PIL import", "= image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size) vector = torch.ones(img_size) * (-1) f = interpolate.interp1d(xper,", "files.\"\"\" return len(self.image_paths) def get_loader(root_path, image_size, batch_size, split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds", "total number of font files.\"\"\" return len(self.image_paths) def get_loader(root_path, image_size, batch_size, split_ratio =", "and returns Dataloader.\"\"\" image_path = root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/' list_all = list(map(lambda x:", "os.listdir(image_path))) shuffle(list_all) num_train = int(split_ratio * len(list_all)) list_train = list_all[:num_train] list_val = list_all[num_train:]", "mode self.RotationDegree = [0,90,180,270] self.augmentation_prob = augmentation_prob print(\"image count in {} path :{}\".format(self.mode,len(self.image_paths)))", "Truth self.GT_paths = GT_path #self.image_paths = list(map(lambda x: os.path.join(root, x), os.listdir(root))) self.image_paths =", "obj.find('xper') #print(xper.text) xper = xper.text.split(' ') xper = [int(float(i)*224) for i in xper]", "train_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset,", "number of font files.\"\"\" return len(self.image_paths) def get_loader(root_path, image_size, batch_size, split_ratio = 0.99,", "PIL import Image from random import shuffle import xml.etree.ElementTree as ET ## Config", "ynew = [int(i) for i in ynew] for n,xn in enumerate(xnew): matrix[xn, ynew[n]]", "self.RotationDegree = [0,90,180,270] self.augmentation_prob = augmentation_prob print(\"image count in {} path :{}\".format(self.mode,len(self.image_paths))) def", "GT_path=GT_path, list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset =", "enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox') x11 = int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text)) x12", ": Ground Truth self.GT_paths = GT_path #self.image_paths = list(map(lambda x: os.path.join(root, x), os.listdir(root)))", "') xper = [int(float(i)*224) for i in xper] yper = obj.find('yper') #print(yper.text) yper", "for i in ynew] for n,xn in enumerate(xnew): matrix[xn, ynew[n]] = 1 vector[xn]", "torch.ones(img_size) * (-1) f = interpolate.interp1d(xper, yper) xnew = list(range(xper[0],xper[-1]+1)) ynew = f(xnew)", "paths and preprocessing module.\"\"\" self.root = root # GT : Ground Truth self.GT_paths", "int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper') #print(xper.text)", "import torch from torchvision import transforms as T from scipy import interpolate from", "img_size = 256 ## End of config class LabeledImageFolder(data.Dataset): def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4):", "self.GT_paths + 'ISIC_' + filename + '_segmentation.png' image = Image.open(image_path) #GT = Image.open(GT_path)", "tree.findall('object') #img = Image.open(fn) wid = image.width hei = image.height for ix, obj", "num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and returns Dataloader.\"\"\" image_path = root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/' list_all", "image_path def __len__(self): \"\"\"Returns the total number of font files.\"\"\" return len(self.image_paths) def", "= 0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and returns Dataloader.\"\"\" image_path = root_path+'/JPEGImages/' GT_path =", "GT : Ground Truth self.GT_paths = GT_path #self.image_paths = list(map(lambda x: os.path.join(root, x),", "0.5, 0.5), (0.5, 0.5, 0.5)) image_t = Norm_(image_t) return image_t, vector, matrix, image_path", "0.5), (0.5, 0.5, 0.5)) image_t = Norm_(image_t) return image_t, vector, matrix, image_path def", "import interpolate from PIL import Image from random import shuffle import xml.etree.ElementTree as", "LabeledImageFolder(data.Dataset): def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths and preprocessing module.\"\"\" self.root =", "= int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent':", "num_train = int(split_ratio * len(list_all)) list_train = list_all[:num_train] list_val = list_all[num_train:] train_dataset =", "= root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/' list_all = list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all)", "import os import torch from torchvision import transforms as T from scipy import", "= image.height for ix, obj in enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox') x11", "int(split_ratio * len(list_all)) list_train = list_all[:num_train] list_val = list_all[num_train:] train_dataset = LabeledImageFolder(root =", "= mode self.RotationDegree = [0,90,180,270] self.augmentation_prob = augmentation_prob print(\"image count in {} path", "\"\"\"Initializes image paths and preprocessing module.\"\"\" self.root = root # GT : Ground", ":{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index): #img_size = 224 \"\"\"Reads an image from a file", "GT_path=GT_path, list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) val_loader = data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) return train_loader,", "'.xml' tree = ET.parse(annot_fn) objs = tree.findall('object') #img = Image.open(fn) wid = image.width", "y12 = int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper') #print(xper.text) xper = xper.text.split(' ')", "obj in enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox') x11 = int(float(bbox.find('xmin').text)) y11 =", "GT_path = root_path+'/Annotations/' list_all = list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all) num_train =", "return len(self.image_paths) def get_loader(root_path, image_size, batch_size, split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and", "= list(map(lambda x: os.path.join(root, x), os.listdir(root))) self.image_paths = list_img_path self.image_size = image_size self.mode", "for i in yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size) vector = torch.ones(img_size)", "i in ynew] for n,xn in enumerate(xnew): matrix[xn, ynew[n]] = 1 vector[xn] =", "## End of config class LabeledImageFolder(data.Dataset): def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths", "from random import shuffle import xml.etree.ElementTree as ET ## Config img_size = 256", "int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper') #print(xper.text) xper = xper.text.split(' ') xper =", "yper = [int(float(i)*224) for i in yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size)", "import xml.etree.ElementTree as ET ## Config img_size = 256 ## End of config", "Transform = [] Transform.append(T.ToTensor()) Transform = T.Compose(Transform) image_t = Transform(image) Norm_ = T.Normalize((0.5,", "list_all[:num_train] list_val = list_all[num_train:] train_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train, image_size =image_size,", "=image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset = LabeledImageFolder(root = image_path,", "[] Transform.append(T.ToTensor()) Transform = T.Compose(Transform) image_t = Transform(image) Norm_ = T.Normalize((0.5, 0.5, 0.5),", "Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) image_t = Norm_(image_t) return image_t,", "if obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper') #print(xper.text) xper = xper.text.split(' ') xper = [int(float(i)*224)", "GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths and preprocessing module.\"\"\" self.root = root # GT :", "and preprocessing module.\"\"\" self.root = root # GT : Ground Truth self.GT_paths =", "xper = [int(float(i)*224) for i in xper] yper = obj.find('yper') #print(yper.text) yper =", "image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths + 'ISIC_' + filename + '_segmentation.png' image = Image.open(image_path)", "x), os.listdir(root))) self.image_paths = list_img_path self.image_size = image_size self.mode = mode self.RotationDegree =", "num_workers=num_workers) val_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) val_loader =", "of font files.\"\"\" return len(self.image_paths) def get_loader(root_path, image_size, batch_size, split_ratio = 0.99, num_workers=2,", "from scipy import interpolate from PIL import Image from random import shuffle import", "__getitem__(self, index): #img_size = 224 \"\"\"Reads an image from a file and preprocesses", "for i in xper] yper = obj.find('yper') #print(yper.text) yper = yper.text.split(' ') yper", "xper.text.split(' ') xper = [int(float(i)*224) for i in xper] yper = obj.find('yper') #print(yper.text)", "= yper.text.split(' ') yper = [int(float(i)*224) for i in yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size))", "list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all) num_train = int(split_ratio * len(list_all)) list_train =", "Image from random import shuffle import xml.etree.ElementTree as ET ## Config img_size =", "preprocesses it and returns.\"\"\" image_path = self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths", "yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size) vector = torch.ones(img_size) * (-1) f", "a file and preprocesses it and returns.\"\"\" image_path = self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")]", "obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox') x11 = int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text))", "torch.utils import data import os import torch from torchvision import transforms as T", "= list(range(xper[0],xper[-1]+1)) ynew = f(xnew) ynew = [int(i) for i in ynew] for", "if obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox') x11 = int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text)) x12 =", "image_path = self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths + 'ISIC_' + filename", "train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val,", "list_all = list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all) num_train = int(split_ratio * len(list_all))", "yper = obj.find('yper') #print(yper.text) yper = yper.text.split(' ') yper = [int(float(i)*224) for i", "T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) image_t = Norm_(image_t) return image_t, vector, matrix,", "import Image from random import shuffle import xml.etree.ElementTree as ET ## Config img_size", "0.5)) image_t = Norm_(image_t) return image_t, vector, matrix, image_path def __len__(self): \"\"\"Returns the", "in ynew] for n,xn in enumerate(xnew): matrix[xn, ynew[n]] = 1 vector[xn] = ynew[n]", "x), os.listdir(image_path))) shuffle(list_all) num_train = int(split_ratio * len(list_all)) list_train = list_all[:num_train] list_val =", "return image_t, vector, matrix, image_path def __len__(self): \"\"\"Returns the total number of font", "val_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) val_loader = data.DataLoader(dataset=val_dataset,", "torchvision import transforms as T from scipy import interpolate from PIL import Image", "in yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size) vector = torch.ones(img_size) * (-1)", "+ '_segmentation.png' image = Image.open(image_path) #GT = Image.open(GT_path) annot_fn = self.GT_paths + filename.split('/')[-1]", "[int(float(i)*224) for i in xper] yper = obj.find('yper') #print(yper.text) yper = yper.text.split(' ')", "split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4): \"\"\"Builds and returns Dataloader.\"\"\" image_path = root_path+'/JPEGImages/' GT_path", "x11 = int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text)) if", "GT_path #self.image_paths = list(map(lambda x: os.path.join(root, x), os.listdir(root))) self.image_paths = list_img_path self.image_size =", "= tree.findall('object') #img = Image.open(fn) wid = image.width hei = image.height for ix,", "Image.open(image_path) #GT = Image.open(GT_path) annot_fn = self.GT_paths + filename.split('/')[-1] + '.xml' tree =", "root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths and preprocessing module.\"\"\" self.root = root # GT", "list_train = list_all[:num_train] list_val = list_all[num_train:] train_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train,", "\"\"\"Reads an image from a file and preprocesses it and returns.\"\"\" image_path =", "matrix = torch.zeros(img_size,img_size) vector = torch.ones(img_size) * (-1) f = interpolate.interp1d(xper, yper) xnew", "T from scipy import interpolate from PIL import Image from random import shuffle", "count in {} path :{}\".format(self.mode,len(self.image_paths))) def __getitem__(self, index): #img_size = 224 \"\"\"Reads an", "= Transform(image) Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) image_t = Norm_(image_t)", "= image_size self.mode = mode self.RotationDegree = [0,90,180,270] self.augmentation_prob = augmentation_prob print(\"image count", "Norm_(image_t) return image_t, vector, matrix, image_path def __len__(self): \"\"\"Returns the total number of", "returns Dataloader.\"\"\" image_path = root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/' list_all = list(map(lambda x: os.path.join(image_path,", "= f(xnew) ynew = [int(i) for i in ynew] for n,xn in enumerate(xnew):", "hei = image.height for ix, obj in enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox')", "image_t, vector, matrix, image_path def __len__(self): \"\"\"Returns the total number of font files.\"\"\"", "mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path,", "for n,xn in enumerate(xnew): matrix[xn, ynew[n]] = 1 vector[xn] = ynew[n] Transform =", "= list(map(lambda x: os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all) num_train = int(split_ratio * len(list_all)) list_train", "image_size self.mode = mode self.RotationDegree = [0,90,180,270] self.augmentation_prob = augmentation_prob print(\"image count in", "ix, obj in enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox') x11 = int(float(bbox.find('xmin').text)) y11", "import transforms as T from scipy import interpolate from PIL import Image from", "= T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) image_t = Norm_(image_t) return image_t, vector,", "transforms as T from scipy import interpolate from PIL import Image from random", "= image_path, GT_path=GT_path, list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)", "os.path.join(image_path, x), os.listdir(image_path))) shuffle(list_all) num_train = int(split_ratio * len(list_all)) list_train = list_all[:num_train] list_val", "filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths + 'ISIC_' + filename + '_segmentation.png' image", "image paths and preprocessing module.\"\"\" self.root = root # GT : Ground Truth", "+ filename + '_segmentation.png' image = Image.open(image_path) #GT = Image.open(GT_path) annot_fn = self.GT_paths", "os import torch from torchvision import transforms as T from scipy import interpolate", "__len__(self): \"\"\"Returns the total number of font files.\"\"\" return len(self.image_paths) def get_loader(root_path, image_size,", "= torch.ones(img_size) * (-1) f = interpolate.interp1d(xper, yper) xnew = list(range(xper[0],xper[-1]+1)) ynew =", "it and returns.\"\"\" image_path = self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths +", "bbox = obj.find('bndbox') x11 = int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text)) y12", "ynew = f(xnew) ynew = [int(i) for i in ynew] for n,xn in", "xml.etree.ElementTree as ET ## Config img_size = 256 ## End of config class", "def __getitem__(self, index): #img_size = 224 \"\"\"Reads an image from a file and", "= interpolate.interp1d(xper, yper) xnew = list(range(xper[0],xper[-1]+1)) ynew = f(xnew) ynew = [int(i) for", "image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix = torch.zeros(img_size,img_size) vector = torch.ones(img_size) * (-1) f =", "+ filename.split('/')[-1] + '.xml' tree = ET.parse(annot_fn) objs = tree.findall('object') #img = Image.open(fn)", "file and preprocesses it and returns.\"\"\" image_path = self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path", "obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper') #print(xper.text) xper = xper.text.split(' ') xper = [int(float(i)*224) for", "f(xnew) ynew = [int(i) for i in ynew] for n,xn in enumerate(xnew): matrix[xn,", "vector = torch.ones(img_size) * (-1) f = interpolate.interp1d(xper, yper) xnew = list(range(xper[0],xper[-1]+1)) ynew", "matrix[xn, ynew[n]] = 1 vector[xn] = ynew[n] Transform = [] Transform.append(T.ToTensor()) Transform =", "from a file and preprocesses it and returns.\"\"\" image_path = self.image_paths[index] filename =", "list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset = LabeledImageFolder(root", "def __len__(self): \"\"\"Returns the total number of font files.\"\"\" return len(self.image_paths) def get_loader(root_path,", "+ '.xml' tree = ET.parse(annot_fn) objs = tree.findall('object') #img = Image.open(fn) wid =", "f = interpolate.interp1d(xper, yper) xnew = list(range(xper[0],xper[-1]+1)) ynew = f(xnew) ynew = [int(i)", "an image from a file and preprocesses it and returns.\"\"\" image_path = self.image_paths[index]", "annot_fn = self.GT_paths + filename.split('/')[-1] + '.xml' tree = ET.parse(annot_fn) objs = tree.findall('object')", "yper) xnew = list(range(xper[0],xper[-1]+1)) ynew = f(xnew) ynew = [int(i) for i in", "= ynew[n] Transform = [] Transform.append(T.ToTensor()) Transform = T.Compose(Transform) image_t = Transform(image) Norm_", "image_path, GT_path=GT_path, list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) val_loader = data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) return", "filename + '_segmentation.png' image = Image.open(image_path) #GT = Image.open(GT_path) annot_fn = self.GT_paths +", "the total number of font files.\"\"\" return len(self.image_paths) def get_loader(root_path, image_size, batch_size, split_ratio", "= int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper = obj.find('xper')", "= 1 vector[xn] = ynew[n] Transform = [] Transform.append(T.ToTensor()) Transform = T.Compose(Transform) image_t", "in xper] yper = obj.find('yper') #print(yper.text) yper = yper.text.split(' ') yper = [int(float(i)*224)", "random import shuffle import xml.etree.ElementTree as ET ## Config img_size = 256 ##", "int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text)) x12 = int(float(bbox.find('xmax').text)) y12 = int(float(bbox.find('ymax').text)) if obj.find('name').text.lower().strip()=='xypercent': xper", "ynew[n]] = 1 vector[xn] = ynew[n] Transform = [] Transform.append(T.ToTensor()) Transform = T.Compose(Transform)", "len(list_all)) list_train = list_all[:num_train] list_val = list_all[num_train:] train_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path,", "\"\"\"Returns the total number of font files.\"\"\" return len(self.image_paths) def get_loader(root_path, image_size, batch_size,", "import data import os import torch from torchvision import transforms as T from", "and returns.\"\"\" image_path = self.image_paths[index] filename = image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths + 'ISIC_'", "class LabeledImageFolder(data.Dataset): def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths and preprocessing module.\"\"\" self.root", "list_val = list_all[num_train:] train_dataset = LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob)", "= image_path.split('_')[-1][:-len(\".jpg\")] #GT_path = self.GT_paths + 'ISIC_' + filename + '_segmentation.png' image =", "image_path, GT_path=GT_path, list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset", "* (-1) f = interpolate.interp1d(xper, yper) xnew = list(range(xper[0],xper[-1]+1)) ynew = f(xnew) ynew", "in enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox') x11 = int(float(bbox.find('xmin').text)) y11 = int(float(bbox.find('ymin').text))", "font files.\"\"\" return len(self.image_paths) def get_loader(root_path, image_size, batch_size, split_ratio = 0.99, num_workers=2, mode='train',augmentation_prob=0.4):", "interpolate from PIL import Image from random import shuffle import xml.etree.ElementTree as ET", "config class LabeledImageFolder(data.Dataset): def __init__(self, root, GT_path,list_img_path,image_size=224,mode='train',augmentation_prob=0.4): \"\"\"Initializes image paths and preprocessing module.\"\"\"", "x: os.path.join(root, x), os.listdir(root))) self.image_paths = list_img_path self.image_size = image_size self.mode = mode", "yper.text.split(' ') yper = [int(float(i)*224) for i in yper] image = image.crop((x11,y11,x12,y12)).resize((img_size,img_size)) matrix", "Image.open(fn) wid = image.width hei = image.height for ix, obj in enumerate(objs): if", "1 vector[xn] = ynew[n] Transform = [] Transform.append(T.ToTensor()) Transform = T.Compose(Transform) image_t =", "LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_val, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) val_loader = data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False,", "xper = obj.find('xper') #print(xper.text) xper = xper.text.split(' ') xper = [int(float(i)*224) for i", "n,xn in enumerate(xnew): matrix[xn, ynew[n]] = 1 vector[xn] = ynew[n] Transform = []", "mode='train',augmentation_prob=0.4): \"\"\"Builds and returns Dataloader.\"\"\" image_path = root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/' list_all =", "= LabeledImageFolder(root = image_path, GT_path=GT_path, list_img_path=list_train, image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size,", "for ix, obj in enumerate(objs): if obj.find('name').text.lower().strip()=='graph': bbox = obj.find('bndbox') x11 = int(float(bbox.find('xmin').text))", "wid = image.width hei = image.height for ix, obj in enumerate(objs): if obj.find('name').text.lower().strip()=='graph':", "= [] Transform.append(T.ToTensor()) Transform = T.Compose(Transform) image_t = Transform(image) Norm_ = T.Normalize((0.5, 0.5,", "self.image_size = image_size self.mode = mode self.RotationDegree = [0,90,180,270] self.augmentation_prob = augmentation_prob print(\"image", "xper = xper.text.split(' ') xper = [int(float(i)*224) for i in xper] yper =", "enumerate(xnew): matrix[xn, ynew[n]] = 1 vector[xn] = ynew[n] Transform = [] Transform.append(T.ToTensor()) Transform", "image_size =image_size, mode=mode,augmentation_prob=augmentation_prob) train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset = LabeledImageFolder(root =", "Dataloader.\"\"\" image_path = root_path+'/JPEGImages/' GT_path = root_path+'/Annotations/' list_all = list(map(lambda x: os.path.join(image_path, x)," ]
[ "+ m.x2 - m.x5 == 0) m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5", "ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10)", "Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3 + m.x6) - m.x5 == 0)", "= Constraint(expr= 2*m.x1 + 5*m.x2 + m.x3 + m.x4 <= 1) m.c7 =", "counts # x b i s1s s2s sc si # Total cont binary", "m.x3 + m.x4 <= 1) m.c7 = Constraint(expr= 3*m.x1 - 2*m.x2 + m.x3", "- m.x4)**2 + 2*(m.x1 + m.x3 - m.x4)**2 + (m.x2 - m.x1 +", "m.c8 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 + m.x5 +", "(m.x2 - m.x1 + m.x3 - m.x4)**2 + 10*sin(m.x1 + m.x5 - m.x6)**2,", "m.x3 - 4*m.x4 <= 0) m.c8 = Constraint(expr= m.x1 + m.x2 + m.x3", "0 0 # FX 0 0 0 0 0 0 0 0 #", "- m.x3**2 - m.x4*m.x5 - m.x6**2 == 0) m.c6 = Constraint(expr= 2*m.x1 +", "m.x2 - m.x5 == 0) m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5 -", "sin(m.x3*m.x4) + m.x2 - m.x5 == 0) m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2 -", "0) m.c8 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 + m.x5", "+ (m.x6 - m.x4)**2 + 2*(m.x1 + m.x3 - m.x4)**2 + (m.x2 -", "equation from pyomo.environ import * model = m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10)", "0) m.c6 = Constraint(expr= 2*m.x1 + 5*m.x2 + m.x3 + m.x4 <= 1)", "0 0 0 0 0 0 # FX 0 0 0 0 0", "# # Variable counts # x b i s1s s2s sc si #", "1 variable and 1 equation from pyomo.environ import * model = m =", "Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2 - m.x5 == 0) m.c5 = Constraint(expr=m.x1*m.x2 -", "<= 1) m.c7 = Constraint(expr= 3*m.x1 - 2*m.x2 + m.x3 - 4*m.x4 <=", "sos2 scont sint # 7 7 0 0 0 0 0 0 #", "FX 0 0 0 0 0 0 0 0 # # Nonzero counts", "Total const NL DLL # 43 19 24 0 # # Reformulation has", "+ m.x6 == 0) m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3", "1) m.c7 = Constraint(expr= 3*m.x1 - 2*m.x2 + m.x3 - 4*m.x4 <= 0)", "m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3 + m.x6) - m.x5", "= m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10)", "+ 2*(m.x1 + m.x3 - m.x4)**2 + (m.x2 - m.x1 + m.x3 -", "has removed 1 variable and 1 equation from pyomo.environ import * model =", "B # 8 5 0 3 0 0 0 0 # # Variable", "= Constraint(expr=m.x1**2 - sin(m.x2) - m.x4 + m.x5 + m.x6 == 0) m.c3", "s1s s2s sc si # Total cont binary integer sos1 sos2 scont sint", "0 0 # # Nonzero counts # Total const NL DLL # 43", "= Objective(expr=(m.x1 + m.x2)**2 + (m.x3 - m.x5)**2 + (m.x6 - m.x4)**2 +", "integer sos1 sos2 scont sint # 7 7 0 0 0 0 0", "C B # 8 5 0 3 0 0 0 0 # #", "= Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5 =", "sc si # Total cont binary integer sos1 sos2 scont sint # 7", "variable and 1 equation from pyomo.environ import * model = m = ConcreteModel()", "0 0 # # Variable counts # x b i s1s s2s sc", "m.x5 == 0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2 - m.x5 ==", "- 4*m.x4 <= 0) m.c8 = Constraint(expr= m.x1 + m.x2 + m.x3 +", "m.x5)**2 + (m.x6 - m.x4)**2 + 2*(m.x1 + m.x3 - m.x4)**2 + (m.x2", "- m.x5 == 0) m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5 - m.x6**2", "+ 10*sin(m.x1 + m.x5 - m.x6)**2, sense=minimize) m.c2 = Constraint(expr=m.x1**2 - sin(m.x2) -", "2*m.x2 + m.x3 - 4*m.x4 <= 0) m.c8 = Constraint(expr= m.x1 + m.x2", "Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5 - m.x6**2 == 0) m.c6 = Constraint(expr= 2*m.x1", "- m.x5)**2 + (m.x6 - m.x4)**2 + 2*(m.x1 + m.x3 - m.x4)**2 +", "m.x4 + m.x5 + m.x6 == 0) m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 -", "Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1", "# Nonzero counts # Total const NL DLL # 43 19 24 0", "m.x5 - m.x6)**2, sense=minimize) m.c2 = Constraint(expr=m.x1**2 - sin(m.x2) - m.x4 + m.x5", "m.x3**2 - m.x4*m.x5 - m.x6**2 == 0) m.c6 = Constraint(expr= 2*m.x1 + 5*m.x2", "4*m.x4 <= 0) m.c8 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4", "== 0) m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3 + m.x6)", "+ m.x3 + m.x4 <= 1) m.c7 = Constraint(expr= 3*m.x1 - 2*m.x2 +", "m.x4 <= 1) m.c7 = Constraint(expr= 3*m.x1 - 2*m.x2 + m.x3 - 4*m.x4", "m.x4)**2 + 10*sin(m.x1 + m.x5 - m.x6)**2, sense=minimize) m.c2 = Constraint(expr=m.x1**2 - sin(m.x2)", "removed 1 variable and 1 equation from pyomo.environ import * model = m", "- sin((-m.x1) - m.x3 + m.x6) - m.x5 == 0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5)", "= Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1 + m.x2)**2 + (m.x3 -", "0 # # Variable counts # x b i s1s s2s sc si", "x b i s1s s2s sc si # Total cont binary integer sos1", "0 0 0 0 0 0 # # Nonzero counts # Total const", "= Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6 =", "Constraint(expr= 2*m.x1 + 5*m.x2 + m.x3 + m.x4 <= 1) m.c7 = Constraint(expr=", "m.c7 = Constraint(expr= 3*m.x1 - 2*m.x2 + m.x3 - 4*m.x4 <= 0) m.c8", "0 0 0 0 0 0 0 # # Nonzero counts # Total", "Equation counts # Total E G L N X C B # 8", "m.x4*m.x5 - m.x6**2 == 0) m.c6 = Constraint(expr= 2*m.x1 + 5*m.x2 + m.x3", "m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4", "from pyomo.environ import * model = m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2", "+ m.x3 - 4*m.x4 <= 0) m.c8 = Constraint(expr= m.x1 + m.x2 +", "+ m.x2)**2 + (m.x3 - m.x5)**2 + (m.x6 - m.x4)**2 + 2*(m.x1 +", "# # Equation counts # Total E G L N X C B", "2*(m.x1 + m.x3 - m.x4)**2 + (m.x2 - m.x1 + m.x3 - m.x4)**2", "- m.x1 + m.x3 - m.x4)**2 + 10*sin(m.x1 + m.x5 - m.x6)**2, sense=minimize)", "0 0 0 0 # # Variable counts # x b i s1s", "+ m.x5 + m.x6 == 0) m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1)", "- m.x6**2 == 0) m.c6 = Constraint(expr= 2*m.x1 + 5*m.x2 + m.x3 +", "+ m.x5 - m.x6)**2, sense=minimize) m.c2 = Constraint(expr=m.x1**2 - sin(m.x2) - m.x4 +", "04/21/18 13:52:29 # # Equation counts # Total E G L N X", "X C B # 8 5 0 3 0 0 0 0 #", "m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5", "m.x3 + m.x6) - m.x5 == 0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) +", "m.x6) - m.x5 == 0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2 -", "3 0 0 0 0 # # Variable counts # x b i", "= Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1 + m.x2)**2 + (m.x3 - m.x5)**2 + (m.x6", "model = m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 =", "== 0) m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5 - m.x6**2 == 0)", "Total E G L N X C B # 8 5 0 3", "const NL DLL # 43 19 24 0 # # Reformulation has removed", "0) m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5 - m.x6**2 == 0) m.c6", "= Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6", "at 04/21/18 13:52:29 # # Equation counts # Total E G L N", "+ 5*m.x2 + m.x3 + m.x4 <= 1) m.c7 = Constraint(expr= 3*m.x1 -", "# # Reformulation has removed 1 variable and 1 equation from pyomo.environ import", "NLP written by GAMS Convert at 04/21/18 13:52:29 # # Equation counts #", "m.x4)**2 + (m.x2 - m.x1 + m.x3 - m.x4)**2 + 10*sin(m.x1 + m.x5", "Constraint(expr=m.x1**2 - sin(m.x2) - m.x4 + m.x5 + m.x6 == 0) m.c3 =", "1 equation from pyomo.environ import * model = m = ConcreteModel() m.x1 =", "- 2*m.x2 + m.x3 - 4*m.x4 <= 0) m.c8 = Constraint(expr= m.x1 +", "i s1s s2s sc si # Total cont binary integer sos1 sos2 scont", "- m.x5 == 0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2 - m.x5", "m.x6**2 == 0) m.c6 = Constraint(expr= 2*m.x1 + 5*m.x2 + m.x3 + m.x4", "binary integer sos1 sos2 scont sint # 7 7 0 0 0 0", "m.x3 - m.x4)**2 + (m.x2 - m.x1 + m.x3 - m.x4)**2 + 10*sin(m.x1", "G L N X C B # 8 5 0 3 0 0", "10*sin(m.x1 + m.x5 - m.x6)**2, sense=minimize) m.c2 = Constraint(expr=m.x1**2 - sin(m.x2) - m.x4", "m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1 + m.x2)**2 + (m.x3", "m.c6 = Constraint(expr= 2*m.x1 + 5*m.x2 + m.x3 + m.x4 <= 1) m.c7", "0 0 0 # # Variable counts # x b i s1s s2s", "sense=minimize) m.c2 = Constraint(expr=m.x1**2 - sin(m.x2) - m.x4 + m.x5 + m.x6 ==", "7 0 0 0 0 0 0 # FX 0 0 0 0", "NL DLL # 43 19 24 0 # # Reformulation has removed 1", "= Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1 + m.x2)**2", "# x b i s1s s2s sc si # Total cont binary integer", "# Variable counts # x b i s1s s2s sc si # Total", "24 0 # # Reformulation has removed 1 variable and 1 equation from", "- m.x4*m.x5 - m.x6**2 == 0) m.c6 = Constraint(expr= 2*m.x1 + 5*m.x2 +", "# 7 7 0 0 0 0 0 0 # FX 0 0", "# FX 0 0 0 0 0 0 0 0 # # Nonzero", "Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1 + m.x2)**2 + (m.x3 - m.x5)**2", "- m.x4 + m.x5 + m.x6 == 0) m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1", "19 24 0 # # Reformulation has removed 1 variable and 1 equation", "== 0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2 - m.x5 == 0)", "E G L N X C B # 8 5 0 3 0", "== 0) m.c6 = Constraint(expr= 2*m.x1 + 5*m.x2 + m.x3 + m.x4 <=", "5 0 3 0 0 0 0 # # Variable counts # x", "# Total cont binary integer sos1 sos2 scont sint # 7 7 0", "2*m.x1 + 5*m.x2 + m.x3 + m.x4 <= 1) m.c7 = Constraint(expr= 3*m.x1", "m.x4)**2 + 2*(m.x1 + m.x3 - m.x4)**2 + (m.x2 - m.x1 + m.x3", "m.obj = Objective(expr=(m.x1 + m.x2)**2 + (m.x3 - m.x5)**2 + (m.x6 - m.x4)**2", "0) m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3 + m.x6) -", "m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2 - m.x5 == 0) m.c5 =", "Convert at 04/21/18 13:52:29 # # Equation counts # Total E G L", "Constraint(expr= 3*m.x1 - 2*m.x2 + m.x3 - 4*m.x4 <= 0) m.c8 = Constraint(expr=", "and 1 equation from pyomo.environ import * model = m = ConcreteModel() m.x1", "counts # Total const NL DLL # 43 19 24 0 # #", "L N X C B # 8 5 0 3 0 0 0", "= Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3 + m.x6) - m.x5 ==", "0 # # Nonzero counts # Total const NL DLL # 43 19", "# # Nonzero counts # Total const NL DLL # 43 19 24", "0 0 0 0 0 # # Nonzero counts # Total const NL", "m.x6)**2, sense=minimize) m.c2 = Constraint(expr=m.x1**2 - sin(m.x2) - m.x4 + m.x5 + m.x6", "Nonzero counts # Total const NL DLL # 43 19 24 0 #", "b i s1s s2s sc si # Total cont binary integer sos1 sos2", "m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 <= 2)", "- m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3 + m.x6) - m.x5 == 0) m.c4", "# Total E G L N X C B # 8 5 0", "0 # # Reformulation has removed 1 variable and 1 equation from pyomo.environ", "sin((-m.x1) - m.x3 + m.x6) - m.x5 == 0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) -", "DLL # 43 19 24 0 # # Reformulation has removed 1 variable", "<= 0) m.c8 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 +", "m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3 + m.x6) - m.x5 == 0) m.c4 =", "0 0 0 # # Nonzero counts # Total const NL DLL #", "43 19 24 0 # # Reformulation has removed 1 variable and 1", "= Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj =", "0 0 0 # FX 0 0 0 0 0 0 0 0", "m.x6 == 0) m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1) - m.x3 +", "# 8 5 0 3 0 0 0 0 # # Variable counts", "- m.x6)**2, sense=minimize) m.c2 = Constraint(expr=m.x1**2 - sin(m.x2) - m.x4 + m.x5 +", "m.x1 + m.x3 - m.x4)**2 + 10*sin(m.x1 + m.x5 - m.x6)**2, sense=minimize) m.c2", "m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1 + m.x2)**2 + (m.x3 - m.x5)**2 +", "m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6", "13:52:29 # # Equation counts # Total E G L N X C", "m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5 - m.x6**2 == 0) m.c6 =", "+ m.x3 - m.x4)**2 + (m.x2 - m.x1 + m.x3 - m.x4)**2 +", "GAMS Convert at 04/21/18 13:52:29 # # Equation counts # Total E G", "si # Total cont binary integer sos1 sos2 scont sint # 7 7", "# NLP written by GAMS Convert at 04/21/18 13:52:29 # # Equation counts", "* model = m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3", "# 43 19 24 0 # # Reformulation has removed 1 variable and", "0 # FX 0 0 0 0 0 0 0 0 # #", "(m.x3 - m.x5)**2 + (m.x6 - m.x4)**2 + 2*(m.x1 + m.x3 - m.x4)**2", "Objective(expr=(m.x1 + m.x2)**2 + (m.x3 - m.x5)**2 + (m.x6 - m.x4)**2 + 2*(m.x1", "0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2 - m.x5 == 0) m.c5", "3*m.x1 - 2*m.x2 + m.x3 - 4*m.x4 <= 0) m.c8 = Constraint(expr= m.x1", "Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10)", "counts # Total E G L N X C B # 8 5", "0 0 0 0 0 # FX 0 0 0 0 0 0", "sin(m.x2) - m.x4 + m.x5 + m.x6 == 0) m.c3 = Constraint(expr=m.x1*m.x3 -", "0 3 0 0 0 0 # # Variable counts # x b", "written by GAMS Convert at 04/21/18 13:52:29 # # Equation counts # Total", "0 0 0 0 # FX 0 0 0 0 0 0 0", "0 0 0 0 # # Nonzero counts # Total const NL DLL", "# Total const NL DLL # 43 19 24 0 # # Reformulation", "# Equation counts # Total E G L N X C B #", "+ (m.x2 - m.x1 + m.x3 - m.x4)**2 + 10*sin(m.x1 + m.x5 -", "Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10)", "m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj", "- sin(m.x2) - m.x4 + m.x5 + m.x6 == 0) m.c3 = Constraint(expr=m.x1*m.x3", "- m.x3 + m.x6) - m.x5 == 0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4)", "(m.x6 - m.x4)**2 + 2*(m.x1 + m.x3 - m.x4)**2 + (m.x2 - m.x1", "m.x5 + m.x6 == 0) m.c3 = Constraint(expr=m.x1*m.x3 - m.x2*m.x4*m.x1 - sin((-m.x1) -", "Total cont binary integer sos1 sos2 scont sint # 7 7 0 0", "m.c2 = Constraint(expr=m.x1**2 - sin(m.x2) - m.x4 + m.x5 + m.x6 == 0)", "= Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2 - m.x5 == 0) m.c5 = Constraint(expr=m.x1*m.x2", "Variable counts # x b i s1s s2s sc si # Total cont", "0 0 0 0 0 0 0 0 # # Nonzero counts #", "Reformulation has removed 1 variable and 1 equation from pyomo.environ import * model", "8 5 0 3 0 0 0 0 # # Variable counts #", "m.x2)**2 + (m.x3 - m.x5)**2 + (m.x6 - m.x4)**2 + 2*(m.x1 + m.x3", "- m.x4)**2 + 10*sin(m.x1 + m.x5 - m.x6)**2, sense=minimize) m.c2 = Constraint(expr=m.x1**2 -", "- sin(m.x3*m.x4) + m.x2 - m.x5 == 0) m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2", "cont binary integer sos1 sos2 scont sint # 7 7 0 0 0", "m.x3 - m.x4)**2 + 10*sin(m.x1 + m.x5 - m.x6)**2, sense=minimize) m.c2 = Constraint(expr=m.x1**2", "import * model = m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10)", "+ (m.x3 - m.x5)**2 + (m.x6 - m.x4)**2 + 2*(m.x1 + m.x3 -", "= Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5 - m.x6**2 == 0) m.c6 = Constraint(expr=", "scont sint # 7 7 0 0 0 0 0 0 # FX", "Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1 + m.x2)**2 + (m.x3 - m.x5)**2 + (m.x6 -", "+ m.x3 - m.x4)**2 + 10*sin(m.x1 + m.x5 - m.x6)**2, sense=minimize) m.c2 =", "sos1 sos2 scont sint # 7 7 0 0 0 0 0 0", "5*m.x2 + m.x3 + m.x4 <= 1) m.c7 = Constraint(expr= 3*m.x1 - 2*m.x2", "= ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2 = Var(within=Reals,bounds=(None,None),initialize=-10) m.x3 = Var(within=Reals,bounds=(None,None),initialize=10) m.x4 =", "# Reformulation has removed 1 variable and 1 equation from pyomo.environ import *", "7 7 0 0 0 0 0 0 # FX 0 0 0", "+ m.x6) - m.x5 == 0) m.c4 = Constraint(expr=m.x2*m.x6*cos(m.x5) - sin(m.x3*m.x4) + m.x2", "sint # 7 7 0 0 0 0 0 0 # FX 0", "m.x5 == 0) m.c5 = Constraint(expr=m.x1*m.x2 - m.x3**2 - m.x4*m.x5 - m.x6**2 ==", "= Constraint(expr= 3*m.x1 - 2*m.x2 + m.x3 - 4*m.x4 <= 0) m.c8 =", "- m.x4)**2 + (m.x2 - m.x1 + m.x3 - m.x4)**2 + 10*sin(m.x1 +", "+ m.x4 <= 1) m.c7 = Constraint(expr= 3*m.x1 - 2*m.x2 + m.x3 -", "s2s sc si # Total cont binary integer sos1 sos2 scont sint #", "Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 <=", "Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1 + m.x2)**2 +", "by GAMS Convert at 04/21/18 13:52:29 # # Equation counts # Total E", "pyomo.environ import * model = m = ConcreteModel() m.x1 = Var(within=Reals,bounds=(None,None),initialize=10) m.x2 =", "m.x4 = Var(within=Reals,bounds=(None,None),initialize=10) m.x5 = Var(within=Reals,bounds=(None,None),initialize=10) m.x6 = Var(within=Reals,bounds=(None,None),initialize=-10) m.obj = Objective(expr=(m.x1 +", "N X C B # 8 5 0 3 0 0 0 0" ]
[]
[ "lucky_numbers(val): lower, upper = find_less_greater(val) print lower, upper return (lower, upper) in1 =", "start mid_val = vals[mid_pos] if val < mid_val: return binary_search(start, mid_pos-1, val) elif", "len(vals) < cur: break end_index = cur*(len(vals)/cur)-1 for i in xrange(end_index, -1, -cur):", "Sample solution for question 1 on the 2014 British Informatics Olympiad Round One", "and greater elements. \"\"\" # Generate luck numbers list vals = range(1,11000,2) complete", "1 on the 2014 British Informatics Olympiad Round One exam Lucky Numbers \"\"\"", "\"\"\" # Generate luck numbers list vals = range(1,11000,2) complete = False cur_index", "search to find the corresponding lower and greater elements. \"\"\" # Generate luck", "This is simply an implemenation based problem. We first generate the list of", "numbers def find_less_greater(val): pos = binary_search(0, len(vals)-1, val) if vals[pos] == val: return", "implemenation based problem. We first generate the list of lucky numbers via the", "if len(vals) < cur: break end_index = cur*(len(vals)/cur)-1 for i in xrange(end_index, -1,", "= \"25 January 2016\" \"\"\" This is simply an implemenation based problem. We", "elements. \"\"\" # Generate luck numbers list vals = range(1,11000,2) complete = False", "= \"<NAME>\" __date__ = \"25 January 2016\" \"\"\" This is simply an implemenation", "numbers via the algorithm provided and then use a binary search to find", "use a binary search to find the corresponding lower and greater elements. \"\"\"", "+= 1 # Find the lucky numbers def find_less_greater(val): pos = binary_search(0, len(vals)-1,", "find the corresponding lower and greater elements. \"\"\" # Generate luck numbers list", "elif val > mid_val: return binary_search(mid_pos+1, end, val) return mid_pos def lucky_numbers(val): lower,", "if val < mid_val: return binary_search(start, mid_pos-1, val) elif val > mid_val: return", "an implemenation based problem. We first generate the list of lucky numbers via", "exam Lucky Numbers \"\"\" __author__ = \"<NAME>\" __date__ = \"25 January 2016\" \"\"\"", "val: return vals[pos-1], vals[pos+1] if vals[pos] < val: return vals[pos], vals[pos+1] return vals[pos-1],", "\"25 January 2016\" \"\"\" This is simply an implemenation based problem. We first", "cur: break end_index = cur*(len(vals)/cur)-1 for i in xrange(end_index, -1, -cur): vals.pop(i) cur_index", "return vals[pos-1], vals[pos] def binary_search(start, end, val): if start >= end: return start", "< mid_val: return binary_search(start, mid_pos-1, val) elif val > mid_val: return binary_search(mid_pos+1, end,", "generate the list of lucky numbers via the algorithm provided and then use", "< cur: break end_index = cur*(len(vals)/cur)-1 for i in xrange(end_index, -1, -cur): vals.pop(i)", "\"\"\" This is simply an implemenation based problem. We first generate the list", "vals[pos+1] if vals[pos] < val: return vals[pos], vals[pos+1] return vals[pos-1], vals[pos] def binary_search(start,", "mid_val: return binary_search(start, mid_pos-1, val) elif val > mid_val: return binary_search(mid_pos+1, end, val)", "Generate luck numbers list vals = range(1,11000,2) complete = False cur_index = 1", "end, val): if start >= end: return start mid_pos = (end-start)/2 + start", "__author__ = \"<NAME>\" __date__ = \"25 January 2016\" \"\"\" This is simply an", "in xrange(end_index, -1, -cur): vals.pop(i) cur_index += 1 # Find the lucky numbers", "if vals[pos] == val: return vals[pos-1], vals[pos+1] if vals[pos] < val: return vals[pos],", "-1, -cur): vals.pop(i) cur_index += 1 # Find the lucky numbers def find_less_greater(val):", "lucky numbers def find_less_greater(val): pos = binary_search(0, len(vals)-1, val) if vals[pos] == val:", "Olympiad Round One exam Lucky Numbers \"\"\" __author__ = \"<NAME>\" __date__ = \"25", "return start mid_pos = (end-start)/2 + start mid_val = vals[mid_pos] if val <", "1 # Find the lucky numbers def find_less_greater(val): pos = binary_search(0, len(vals)-1, val)", "the list of lucky numbers via the algorithm provided and then use a", "__date__ = \"25 January 2016\" \"\"\" This is simply an implemenation based problem.", "cur = None while not complete: cur = vals[cur_index] if len(vals) < cur:", "Find the lucky numbers def find_less_greater(val): pos = binary_search(0, len(vals)-1, val) if vals[pos]", "start mid_pos = (end-start)/2 + start mid_val = vals[mid_pos] if val < mid_val:", "vals = range(1,11000,2) complete = False cur_index = 1 cur = None while", "if start >= end: return start mid_pos = (end-start)/2 + start mid_val =", "of lucky numbers via the algorithm provided and then use a binary search", "binary_search(start, end, val): if start >= end: return start mid_pos = (end-start)/2 +", "lucky numbers via the algorithm provided and then use a binary search to", "return vals[pos], vals[pos+1] return vals[pos-1], vals[pos] def binary_search(start, end, val): if start >=", "vals[cur_index] if len(vals) < cur: break end_index = cur*(len(vals)/cur)-1 for i in xrange(end_index,", "break end_index = cur*(len(vals)/cur)-1 for i in xrange(end_index, -1, -cur): vals.pop(i) cur_index +=", "\"\"\" __author__ = \"<NAME>\" __date__ = \"25 January 2016\" \"\"\" This is simply", "end_index = cur*(len(vals)/cur)-1 for i in xrange(end_index, -1, -cur): vals.pop(i) cur_index += 1", "= False cur_index = 1 cur = None while not complete: cur =", "= vals[cur_index] if len(vals) < cur: break end_index = cur*(len(vals)/cur)-1 for i in", "vals[pos] < val: return vals[pos], vals[pos+1] return vals[pos-1], vals[pos] def binary_search(start, end, val):", "2014 British Informatics Olympiad Round One exam Lucky Numbers \"\"\" __author__ = \"<NAME>\"", "the corresponding lower and greater elements. \"\"\" # Generate luck numbers list vals", "return binary_search(start, mid_pos-1, val) elif val > mid_val: return binary_search(mid_pos+1, end, val) return", "binary_search(mid_pos+1, end, val) return mid_pos def lucky_numbers(val): lower, upper = find_less_greater(val) print lower,", "vals[pos-1], vals[pos] def binary_search(start, end, val): if start >= end: return start mid_pos", "= 1 cur = None while not complete: cur = vals[cur_index] if len(vals)", "= range(1,11000,2) complete = False cur_index = 1 cur = None while not", "complete: cur = vals[cur_index] if len(vals) < cur: break end_index = cur*(len(vals)/cur)-1 for", "len(vals)-1, val) if vals[pos] == val: return vals[pos-1], vals[pos+1] if vals[pos] < val:", "vals[pos], vals[pos+1] return vals[pos-1], vals[pos] def binary_search(start, end, val): if start >= end:", "# Find the lucky numbers def find_less_greater(val): pos = binary_search(0, len(vals)-1, val) if", "vals[pos] == val: return vals[pos-1], vals[pos+1] if vals[pos] < val: return vals[pos], vals[pos+1]", "find_less_greater(val): pos = binary_search(0, len(vals)-1, val) if vals[pos] == val: return vals[pos-1], vals[pos+1]", "numbers list vals = range(1,11000,2) complete = False cur_index = 1 cur =", "(end-start)/2 + start mid_val = vals[mid_pos] if val < mid_val: return binary_search(start, mid_pos-1,", "on the 2014 British Informatics Olympiad Round One exam Lucky Numbers \"\"\" __author__", "val) elif val > mid_val: return binary_search(mid_pos+1, end, val) return mid_pos def lucky_numbers(val):", "binary_search(start, mid_pos-1, val) elif val > mid_val: return binary_search(mid_pos+1, end, val) return mid_pos", "vals[pos-1], vals[pos+1] if vals[pos] < val: return vals[pos], vals[pos+1] return vals[pos-1], vals[pos] def", "val < mid_val: return binary_search(start, mid_pos-1, val) elif val > mid_val: return binary_search(mid_pos+1,", "simply an implemenation based problem. We first generate the list of lucky numbers", "One exam Lucky Numbers \"\"\" __author__ = \"<NAME>\" __date__ = \"25 January 2016\"", "lower, upper = find_less_greater(val) print lower, upper return (lower, upper) in1 = raw_input()", "provided and then use a binary search to find the corresponding lower and", "< val: return vals[pos], vals[pos+1] return vals[pos-1], vals[pos] def binary_search(start, end, val): if", "algorithm provided and then use a binary search to find the corresponding lower", "return binary_search(mid_pos+1, end, val) return mid_pos def lucky_numbers(val): lower, upper = find_less_greater(val) print", "cur = vals[cur_index] if len(vals) < cur: break end_index = cur*(len(vals)/cur)-1 for i", "-cur): vals.pop(i) cur_index += 1 # Find the lucky numbers def find_less_greater(val): pos", "is simply an implemenation based problem. We first generate the list of lucky", "val: return vals[pos], vals[pos+1] return vals[pos-1], vals[pos] def binary_search(start, end, val): if start", "upper = find_less_greater(val) print lower, upper return (lower, upper) in1 = raw_input() lucky_numbers(int(in1))", "#!/usr/bin/env python \"\"\" bio-2014-1-1.py: Sample solution for question 1 on the 2014 British", "val) return mid_pos def lucky_numbers(val): lower, upper = find_less_greater(val) print lower, upper return", "vals[mid_pos] if val < mid_val: return binary_search(start, mid_pos-1, val) elif val > mid_val:", "1 cur = None while not complete: cur = vals[cur_index] if len(vals) <", "mid_pos def lucky_numbers(val): lower, upper = find_less_greater(val) print lower, upper return (lower, upper)", "British Informatics Olympiad Round One exam Lucky Numbers \"\"\" __author__ = \"<NAME>\" __date__", "for question 1 on the 2014 British Informatics Olympiad Round One exam Lucky", "corresponding lower and greater elements. \"\"\" # Generate luck numbers list vals =", "not complete: cur = vals[cur_index] if len(vals) < cur: break end_index = cur*(len(vals)/cur)-1", "2016\" \"\"\" This is simply an implemenation based problem. We first generate the", "\"<NAME>\" __date__ = \"25 January 2016\" \"\"\" This is simply an implemenation based", "def find_less_greater(val): pos = binary_search(0, len(vals)-1, val) if vals[pos] == val: return vals[pos-1],", "vals.pop(i) cur_index += 1 # Find the lucky numbers def find_less_greater(val): pos =", "binary_search(0, len(vals)-1, val) if vals[pos] == val: return vals[pos-1], vals[pos+1] if vals[pos] <", "def binary_search(start, end, val): if start >= end: return start mid_pos = (end-start)/2", "cur*(len(vals)/cur)-1 for i in xrange(end_index, -1, -cur): vals.pop(i) cur_index += 1 # Find", "= vals[mid_pos] if val < mid_val: return binary_search(start, mid_pos-1, val) elif val >", "binary search to find the corresponding lower and greater elements. \"\"\" # Generate", "for i in xrange(end_index, -1, -cur): vals.pop(i) cur_index += 1 # Find the", "xrange(end_index, -1, -cur): vals.pop(i) cur_index += 1 # Find the lucky numbers def", "complete = False cur_index = 1 cur = None while not complete: cur", "vals[pos] def binary_search(start, end, val): if start >= end: return start mid_pos =", "end: return start mid_pos = (end-start)/2 + start mid_val = vals[mid_pos] if val", "# Generate luck numbers list vals = range(1,11000,2) complete = False cur_index =", "cur_index += 1 # Find the lucky numbers def find_less_greater(val): pos = binary_search(0,", "python \"\"\" bio-2014-1-1.py: Sample solution for question 1 on the 2014 British Informatics", "= cur*(len(vals)/cur)-1 for i in xrange(end_index, -1, -cur): vals.pop(i) cur_index += 1 #", "= binary_search(0, len(vals)-1, val) if vals[pos] == val: return vals[pos-1], vals[pos+1] if vals[pos]", "val) if vals[pos] == val: return vals[pos-1], vals[pos+1] if vals[pos] < val: return", "mid_pos-1, val) elif val > mid_val: return binary_search(mid_pos+1, end, val) return mid_pos def", "val): if start >= end: return start mid_pos = (end-start)/2 + start mid_val", "Lucky Numbers \"\"\" __author__ = \"<NAME>\" __date__ = \"25 January 2016\" \"\"\" This", "vals[pos+1] return vals[pos-1], vals[pos] def binary_search(start, end, val): if start >= end: return", "while not complete: cur = vals[cur_index] if len(vals) < cur: break end_index =", "problem. We first generate the list of lucky numbers via the algorithm provided", "return mid_pos def lucky_numbers(val): lower, upper = find_less_greater(val) print lower, upper return (lower,", "= None while not complete: cur = vals[cur_index] if len(vals) < cur: break", "pos = binary_search(0, len(vals)-1, val) if vals[pos] == val: return vals[pos-1], vals[pos+1] if", "greater elements. \"\"\" # Generate luck numbers list vals = range(1,11000,2) complete =", "list vals = range(1,11000,2) complete = False cur_index = 1 cur = None", "mid_val = vals[mid_pos] if val < mid_val: return binary_search(start, mid_pos-1, val) elif val", "> mid_val: return binary_search(mid_pos+1, end, val) return mid_pos def lucky_numbers(val): lower, upper =", "end, val) return mid_pos def lucky_numbers(val): lower, upper = find_less_greater(val) print lower, upper", "val > mid_val: return binary_search(mid_pos+1, end, val) return mid_pos def lucky_numbers(val): lower, upper", "Informatics Olympiad Round One exam Lucky Numbers \"\"\" __author__ = \"<NAME>\" __date__ =", "then use a binary search to find the corresponding lower and greater elements.", "We first generate the list of lucky numbers via the algorithm provided and", "mid_pos = (end-start)/2 + start mid_val = vals[mid_pos] if val < mid_val: return", "luck numbers list vals = range(1,11000,2) complete = False cur_index = 1 cur", "range(1,11000,2) complete = False cur_index = 1 cur = None while not complete:", "question 1 on the 2014 British Informatics Olympiad Round One exam Lucky Numbers", "Numbers \"\"\" __author__ = \"<NAME>\" __date__ = \"25 January 2016\" \"\"\" This is", "= (end-start)/2 + start mid_val = vals[mid_pos] if val < mid_val: return binary_search(start,", "False cur_index = 1 cur = None while not complete: cur = vals[cur_index]", "bio-2014-1-1.py: Sample solution for question 1 on the 2014 British Informatics Olympiad Round", "== val: return vals[pos-1], vals[pos+1] if vals[pos] < val: return vals[pos], vals[pos+1] return", "first generate the list of lucky numbers via the algorithm provided and then", "def lucky_numbers(val): lower, upper = find_less_greater(val) print lower, upper return (lower, upper) in1", "None while not complete: cur = vals[cur_index] if len(vals) < cur: break end_index", "cur_index = 1 cur = None while not complete: cur = vals[cur_index] if", "Round One exam Lucky Numbers \"\"\" __author__ = \"<NAME>\" __date__ = \"25 January", "solution for question 1 on the 2014 British Informatics Olympiad Round One exam", "based problem. We first generate the list of lucky numbers via the algorithm", "list of lucky numbers via the algorithm provided and then use a binary", "i in xrange(end_index, -1, -cur): vals.pop(i) cur_index += 1 # Find the lucky", "January 2016\" \"\"\" This is simply an implemenation based problem. We first generate", "a binary search to find the corresponding lower and greater elements. \"\"\" #", "the 2014 British Informatics Olympiad Round One exam Lucky Numbers \"\"\" __author__ =", "mid_val: return binary_search(mid_pos+1, end, val) return mid_pos def lucky_numbers(val): lower, upper = find_less_greater(val)", ">= end: return start mid_pos = (end-start)/2 + start mid_val = vals[mid_pos] if", "via the algorithm provided and then use a binary search to find the", "start >= end: return start mid_pos = (end-start)/2 + start mid_val = vals[mid_pos]", "return vals[pos-1], vals[pos+1] if vals[pos] < val: return vals[pos], vals[pos+1] return vals[pos-1], vals[pos]", "+ start mid_val = vals[mid_pos] if val < mid_val: return binary_search(start, mid_pos-1, val)", "to find the corresponding lower and greater elements. \"\"\" # Generate luck numbers", "if vals[pos] < val: return vals[pos], vals[pos+1] return vals[pos-1], vals[pos] def binary_search(start, end,", "\"\"\" bio-2014-1-1.py: Sample solution for question 1 on the 2014 British Informatics Olympiad", "the lucky numbers def find_less_greater(val): pos = binary_search(0, len(vals)-1, val) if vals[pos] ==", "lower and greater elements. \"\"\" # Generate luck numbers list vals = range(1,11000,2)", "and then use a binary search to find the corresponding lower and greater", "the algorithm provided and then use a binary search to find the corresponding" ]
[ "mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData = { \"CBN\": set(), \"Process\": set(), \"Cell-Type\": set() }", "formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\", } celltype2nicename = {", "\"Vascular remodeling\", \"targetMirsTCell\": \"T cell differentiation &\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC", "VEGFA miR-140 EC 27035554 cbn = network2nicename.get(line[0], line[0]) gene = line[1] miRNA =", "miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData orderDict = OrderedDict() for type in [\"CBN\", \"Process\", \"Cell-Type\"]:", "print(len(filteredData)) print(stages2) print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout)", "\"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences = defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\",", "\"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b',", "0: pass#continue if len(dataUpPlot[miRNA]['CBN']) == 0: continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2)", "= defaultdict(list) for miRNA in allMiRNA: miRNAEvs = set() for x in mirna2evidenceCBN.get(miRNA,", "\"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam cell\" } def source2index( sname ): if", "= line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process)", "+ \" (\" + str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict() for miRNA", "if cbns != None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if process != None: miRNAData['Process'] =", "{ \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\",", "x: x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <= 0: continue if len(dataUpPlot[miRNA]['Process']) ==", "cbn.replace(\"\\n\", \" \").replace(\" \", \" \"), process.replace(\"\\n\", \" \").replace(\" \", \" \"), celltype,", "pass#continue if len(dataUpPlot[miRNA]['CBN']) == 0: continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0)", "miRNAEvs.add(x) miRNAData = { \"CBN\": set(), \"Process\": set(), \"Cell-Type\": set() } for ev", "cbns = mirna2evidenceCBN[miRNA].get(ev, None) process = mirna2evidenceProcess[miRNA].get(ev, None) if cellT != None: miRNAData['Cell-Type']", "\"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III)", "print(miRNA, cbn.replace(\"\\n\", \" \").replace(\" \", \" \"), process.replace(\"\\n\", \" \").replace(\" \", \" \"),", "\"CBN\": set(), \"Process\": set(), \"Cell-Type\": set() } for ev in miRNAEvs: cellT =", "miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData orderDict = OrderedDict() for type in [\"CBN\",", "filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\",", "\", \" \"), celltype, sep=\",\", file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData]", "= defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\",", "= defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin: for line in fin: line =", "activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC proliferation &\\n SMC migration\" } network2nicename =", "miRNAData = { \"CBN\": set(), \"Process\": set(), \"Cell-Type\": set() } for ev in", "{ 'SMC': \"Smooth muscle cell\", 'EC': \"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam cell\"", "allMiRNA: miRNAEvs = set() for x in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for x in", "selMirnas = sorted([x for x in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for", "dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else: print(\"Missing manu\", miRNA) stages2 = 0 stages0 =", "\"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam cell\" } def source2index( sname ): if sname !=", "+ str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict() for miRNA in manuMirnas: if", "27035554 process = processToTitle.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT =", "processes: continue for celltype in cellT: for cbn in cbns: for process in", "'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504'] #manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a',", "Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam", "if process != None: miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData orderDict = OrderedDict()", "None and sname.startswith(\"CV-IPN\"): return 0 return 1 mirna2evidenceCellT = defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN =", "celltype) ) selMirnas = sorted([x for x in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x]))", "= mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA == \"miR-98\": print(ev, cbns,", "reverse=True, key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA in manuMirnas: for (cbn, process, celltype)", "fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 process = processToTitle.get(line[0], line[0])", "stages2 = 0 stages0 = 0 from natsort import natsorted for miRNA in", "miRNA) stages2 = 0 stages0 = 0 from natsort import natsorted for miRNA", "line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 process =", "\"w\") print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple = defaultdict(list) for miRNA in", "): if sname != None and sname.startswith(\"CV-IPN\"): return 0 return 1 mirna2evidenceCellT =", "line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process", "manu\", miRNA) stages2 = 0 stages0 = 0 from natsort import natsorted for", "\"Process\", \"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type]) def makeMIRNAName(miRNA): return miRNA return miRNA + \"", "{ \"CBN\": set(), \"Process\": set(), \"Cell-Type\": set() } for ev in miRNAEvs: cellT", "file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA", "'miR-126', 'miR-21'}) manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"]", "len(dataUpPlot[miRNA]['CBN']) == 0: continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0) fout =", "and sname.startswith(\"CV-IPN\"): return 0 return 1 mirna2evidenceCellT = defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda:", "= mirna2evidenceCBN[miRNA].get(ev, None) process = mirna2evidenceProcess[miRNA].get(ev, None) if cellT != None: miRNAData['Cell-Type'] =", "sep=\",\", file=fout) mirna2printTuple = defaultdict(list) for miRNA in allMiRNA: miRNAEvs = set() for", "EC 27035554 cbn = network2nicename.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT", "\"Monocyte diff. &\\nMacrophage act.\", \"targetMirsFCF\": \"Foam cell formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\",", "migration\" } network2nicename = { \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\",", "continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x in manuMirnas: print(x, miRNA2InteractionPartner[x],", "\" (\" + str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict() for miRNA in", "mirna2printTuple = defaultdict(list) for miRNA in allMiRNA: miRNAEvs = set() for x in", "= line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3], line[3]) evidence = line[4] if", "return 1 mirna2evidenceCellT = defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda:", "dataUpPlot[miRNA] = miRNAData orderDict = OrderedDict() for type in [\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type]", "in allMiRNA: miRNAEvs = set() for x in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for x", "= { \"CBN\": set(), \"Process\": set(), \"Cell-Type\": set() } for ev in miRNAEvs:", "celltype in cellT: for cbn in cbns: for process in processes: mirna2printTuple[miRNA].append( (cbn,", "!= None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if cbns != None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if", "stages0 = 0 from natsort import natsorted for miRNA in natsorted(dataUpPlot, key=lambda x:", "\"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\", } celltype2nicename = { 'SMC':", "set(), \"Cell-Type\": set() } for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, None) cbns", "\"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC proliferation &\\n SMC migration\" } network2nicename = { \"CV-IPN-Plaque_destabilization_1\":", "miRNA in allMiRNA: miRNAEvs = set() for x in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for", "x in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for x", "for (cbn, process, celltype) in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \" \").replace(\" \", \" \"),", "allMiRNA.add(x) for x in mirna2evidenceProcess: allMiRNA.add(x) for x in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot =", "act.\", \"targetMirsFCF\": \"Foam cell formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\": \"T cell", "cbns: for process in processes: mirna2printTuple[miRNA].append( (cbn, process, celltype) ) selMirnas = sorted([x", "miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else: print(\"Missing manu\", miRNA) stages2 = 0", "miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if cbns != None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if process !=", "allMiRNA.add(x) for x in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot = {} for miRNA in allMiRNA:", "continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin:", "import defaultdict, OrderedDict from plots.DotSetPlot import DotSetPlot processToTitle = { \"targetMirsECA\": \"EC activation", "#important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin: for line in fin: line = line.strip().split(\"\\t\")", "&\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC proliferation &\\n SMC migration\" } network2nicename", "processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA == \"miR-98\": print(ev, cbns, celltype, process) if", "miRNA in natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <= 0:", "set() } for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, None) cbns = mirna2evidenceCBN[miRNA].get(ev,", "#manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504'] #manuMirnas", "defaultdict(set) mirna2evflows = defaultdict(set) dataLabels = defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\",", "file=fout) mirna2printTuple = defaultdict(list) for miRNA in allMiRNA: miRNAEvs = set() for x", "= defaultdict(lambda: defaultdict(set)) pubmed2tuples = defaultdict(set) mirna2evflows = defaultdict(set) dataLabels = defaultdict(set) #\"miR-98\",", "SMC migration\" } network2nicename = { \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet", "filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else: print(\"Missing manu\", miRNA) stages2 = 0 stages0 = 0", "if len(dataUpPlot[miRNA]['CBN']) == 0: continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0) fout", "\"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas = ['miR-126', 'miR-21', 'miR-155',", "= mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA", "cellT: for cbn in cbns: for process in processes: mirna2printTuple[miRNA].append( (cbn, process, celltype)", "= dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\", \"PROCESS\",", "cell\" } def source2index( sname ): if sname != None and sname.startswith(\"CV-IPN\"): return", "[\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type]) def makeMIRNAName(miRNA): return miRNA return miRNA +", "in natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <= 0: continue", "miRNA in filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor", "!= None: miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData orderDict = OrderedDict() for type", "in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r')", "&\\n SMC migration\" } network2nicename = { \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V)", "} celltype2nicename = { 'SMC': \"Smooth muscle cell\", 'EC': \"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\",", "in filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts", "len(miRNA2Evidences[miRNA]) <= 0: continue if len(dataUpPlot[miRNA]['Process']) == 0: pass#continue if len(dataUpPlot[miRNA]['CBN']) == 0:", "dataUpPlot = {} for miRNA in allMiRNA: miRNAEvs = set() for x in", "\"EC activation and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage act.\", \"targetMirsFCF\": \"Foam cell formation\",", "print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple =", "allMiRNA = set() for x in mirna2evidenceCellT: allMiRNA.add(x) for x in mirna2evidenceProcess: allMiRNA.add(x)", "\"Foam cell\" } def source2index( sname ): if sname != None and sname.startswith(\"CV-IPN\"):", "for x in mirna2evidenceCellT: allMiRNA.add(x) for x in mirna2evidenceProcess: allMiRNA.add(x) for x in", "defaultdict, OrderedDict from plots.DotSetPlot import DotSetPlot processToTitle = { \"targetMirsECA\": \"EC activation and\\n", "for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes", "= [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas = ['miR-126',", "x in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot = {} for miRNA in allMiRNA: miRNAEvs =", "for miRNA in allMiRNA: miRNAEvs = set() for x in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x)", "'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'}) manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\",", "in manuMirnas: if miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else: print(\"Missing manu\", miRNA)", "processes: mirna2printTuple[miRNA].append( (cbn, process, celltype) ) selMirnas = sorted([x for x in mirna2printTuple],", "defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda: defaultdict(set)) pubmed2tuples = defaultdict(set) mirna2evflows = defaultdict(set) dataLabels =", "'miR-93'] manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'})", "mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin: for line in fin: line =", "in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData = {", "miRNA + \" (\" + str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict() for", "inflammation\", \"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage act.\", \"targetMirsFCF\": \"Foam cell formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\":", "'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93'] manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b',", "miRNA == \"miR-98\": print(ev, cbns, celltype, process) if \"None\" in cbns:# or \"None\"", "celltype, process) if \"None\" in cbns:# or \"None\" in processes: continue for celltype", "cell\", 'EC': \"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam cell\" } def source2index( sname", "for type in [\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type]) def makeMIRNAName(miRNA): return miRNA", "!= None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if process != None: miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA]", "} network2nicename = { \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\":", "celltype, sep=\",\", file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA])", "= defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda: defaultdict(set)) pubmed2tuples = defaultdict(set) mirna2evflows = defaultdict(set)", "[\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences", "OrderedDict from plots.DotSetPlot import DotSetPlot processToTitle = { \"targetMirsECA\": \"EC activation and\\n inflammation\",", "Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II)", "EC 27035554 process = processToTitle.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT", "in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x in manuMirnas:", "if miRNA == \"miR-98\": print(ev, cbns, celltype, process) if \"None\" in cbns:# or", "celltype2nicename = { 'SMC': \"Smooth muscle cell\", 'EC': \"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\":", "\"T cell differentiation &\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC proliferation &\\n SMC", "celltype2nicename.get(line[3], line[3]) evidence = line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT)", "['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504'] #manuMirnas = ['miR-181c',", "return 0 return 1 mirna2evidenceCellT = defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess", "\"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple = defaultdict(list) for miRNA in allMiRNA: miRNAEvs = set()", "activation and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage act.\", \"targetMirsFCF\": \"Foam cell formation\", \"targetMirsAngio\":", "in mirna2evidenceCellT: allMiRNA.add(x) for x in mirna2evidenceProcess: allMiRNA.add(x) for x in mirna2evidenceCBN: allMiRNA.add(x)", "= mirna2evidenceProcess[miRNA].get(ev, None) if cellT != None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if cbns !=", "OrderedDict() for type in [\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type]) def makeMIRNAName(miRNA): return", "print(stages2) print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple", "str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict() for miRNA in manuMirnas: if miRNA", "cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if", "\"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\",", "miRNA return miRNA + \" (\" + str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData =", "0: continue if len(dataUpPlot[miRNA]['Process']) == 0: pass#continue if len(dataUpPlot[miRNA]['CBN']) == 0: continue filteredData[makeMIRNAName(miRNA)]", "activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC", "None) if cellT != None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if cbns != None: miRNAData['CBN']", "x in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData =", "\"SMC proliferation &\\n SMC migration\" } network2nicename = { \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\",", "open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin: for line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA", "natsort import natsorted for miRNA in natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN']", "in processes: continue for celltype in cellT: for cbn in cbns: for process", "fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple = defaultdict(list)", "in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA in manuMirnas: for (cbn,", "for line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 process", "fin: for line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554", "for x in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData", "OrderedDict() for miRNA in manuMirnas: if miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else:", "destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell", "\"FC\": \"Foam cell\" } def source2index( sname ): if sname != None and", "DotSetPlot processToTitle = { \"targetMirsECA\": \"EC activation and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage", "cell formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\": \"T cell differentiation &\\n activation\",", "sep=\",\", file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA]) for", "{} for miRNA in allMiRNA: miRNAEvs = set() for x in mirna2evidenceCBN.get(miRNA, []):", "\"Smooth muscle cell\", 'EC': \"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam cell\" } def", "in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for x in", "= open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple = defaultdict(list) for", "mirna2evidenceProcess[miRNA].get(ev, None) if cellT != None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if cbns != None:", "miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as", "'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'}) manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\",", "== \"miR-98\": print(ev, cbns, celltype, process) if \"None\" in cbns:# or \"None\" in", "\"targetMirsECA\": \"EC activation and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage act.\", \"targetMirsFCF\": \"Foam cell", "line[0]) gene = line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3], line[3]) evidence =", "= defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda: defaultdict(set)) pubmed2tuples =", "\"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple = defaultdict(list) for miRNA in allMiRNA: miRNAEvs =", "set(), \"Cell-Type\": set() } for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns", "manuMirnas: for (cbn, process, celltype) in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \" \").replace(\" \", \"", "cbns != None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if process != None: miRNAData['Process'] = miRNAData['Process'].union(process)", "= 0 stages0 = 0 from natsort import natsorted for miRNA in natsorted(dataUpPlot,", "dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <= 0: continue if len(dataUpPlot[miRNA]['Process']) == 0: pass#continue if len(dataUpPlot[miRNA]['CBN'])", "miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x in manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x])", "for process in processes: mirna2printTuple[miRNA].append( (cbn, process, celltype) ) selMirnas = sorted([x for", "defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\",", "in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 cbn = network2nicename.get(line[0],", "= { \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC", "== 0: continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\",", "for miRNA in natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <=", "\" \").replace(\" \", \" \"), process.replace(\"\\n\", \" \").replace(\" \", \" \"), celltype, sep=\",\",", "miR-140 EC 27035554 process = processToTitle.get(line[0], line[0]) gene = line[1] miRNA = line[2]", "\" \"), process.replace(\"\\n\", \" \").replace(\" \", \" \"), celltype, sep=\",\", file=fout) interactorCounts =", "fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 cbn = network2nicename.get(line[0], line[0])", "[len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in filteredData] DotSetPlot().plot(dataLabels,", "\"), process.replace(\"\\n\", \" \").replace(\" \", \" \"), celltype, sep=\",\", file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA])", "'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'}) manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\",", "(cbn, process, celltype) in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \" \").replace(\" \", \" \"), process.replace(\"\\n\",", "process, celltype) ) selMirnas = sorted([x for x in mirna2printTuple], reverse=True, key=lambda x:", "\"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a',", "None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if process != None: miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA] =", "miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"])", "= line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 process = processToTitle.get(line[0], line[0]) gene =", "in manuMirnas: for (cbn, process, celltype) in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \" \").replace(\" \",", "miRNA in manuMirnas: for (cbn, process, celltype) in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \" \").replace(\"", "= defaultdict(set) miRNA2Evidences = defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin: for line in", "formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\": \"T cell differentiation &\\n activation\", \"targetMirsCholEfflux\":", "\"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x in", "miR-140 EC 27035554 cbn = network2nicename.get(line[0], line[0]) gene = line[1] miRNA = line[2]", "ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, None) cbns = mirna2evidenceCBN[miRNA].get(ev, None) process =", "[\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas = ['miR-126', 'miR-21',", "[\"None\"]) if miRNA == \"miR-98\": print(ev, cbns, celltype, process) if \"None\" in cbns:#", "\"Cell-Type\": set() } for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns =", "if cellT != None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if cbns != None: miRNAData['CBN'] =", "efflux\", \"targetMirsSMCProlif\": \"SMC proliferation &\\n SMC migration\" } network2nicename = { \"CV-IPN-Plaque_destabilization_1\": \"(VI)", "from natsort import natsorted for miRNA in natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]): stages =", "'miR-504'] #manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93']", "x in manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA = set() for x in mirna2evidenceCellT:", "\").replace(\" \", \" \"), celltype, sep=\",\", file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in", "\"miR-125a\" manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas", "= dataUpPlot[miRNA] else: print(\"Missing manu\", miRNA) stages2 = 0 stages0 = 0 from", "makeMIRNAName(miRNA): return miRNA return miRNA + \" (\" + str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\"", "= defaultdict(set) dataLabels = defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\",", "in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \" \").replace(\" \", \" \"), process.replace(\"\\n\", \" \").replace(\" \",", "'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504'] #manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155',", "} def source2index( sname ): if sname != None and sname.startswith(\"CV-IPN\"): return 0", "SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I)", "'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93'] manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222',", "print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\",", "EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\", } celltype2nicename = { 'SMC': \"Smooth muscle", "evidence = line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT)", "'miR-221', 'miR-370', 'miR-504'] #manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a',", "(cbn, process, celltype) ) selMirnas = sorted([x for x in mirna2printTuple], reverse=True, key=lambda", "open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple = defaultdict(list) for miRNA", "\"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC proliferation &\\n SMC migration\" } network2nicename = {", "\"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage act.\", \"targetMirsFCF\": \"Foam cell formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular", "dataLabels = defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\",", "\"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\",", "\"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell formation\",", "miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin: for line", "defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin: for line in fin: line = line.strip().split(\"\\t\")", "\"targetMirsSMCProlif\": \"SMC proliferation &\\n SMC migration\" } network2nicename = { \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque", "= sorted(dataLabels[type]) def makeMIRNAName(miRNA): return miRNA return miRNA + \" (\" + str(len(miRNA2InteractionPartner[miRNA]))", "for x in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for", "'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'}) manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\",", "miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin: for", "if \"None\" in cbns:# or \"None\" in processes: continue for celltype in cellT:", "mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA == \"miR-98\": print(ev, cbns, celltype, process) if \"None\" in", "for x in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot = {} for miRNA in allMiRNA: miRNAEvs", "defaultdict(list) for miRNA in allMiRNA: miRNAEvs = set() for x in mirna2evidenceCBN.get(miRNA, []):", "line[3]) evidence = line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process)", "x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA in manuMirnas: for (cbn, process, celltype) in mirna2printTuple[miRNA]:", "\", \" \"), process.replace(\"\\n\", \" \").replace(\" \", \" \"), celltype, sep=\",\", file=fout) interactorCounts", "cellT = mirna2evidenceCellT[miRNA].get(ev, None) cbns = mirna2evidenceCBN[miRNA].get(ev, None) process = mirna2evidenceProcess[miRNA].get(ev, None) if", "<= 0: continue if len(dataUpPlot[miRNA]['Process']) == 0: pass#continue if len(dataUpPlot[miRNA]['CBN']) == 0: continue", "in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 process = processToTitle.get(line[0],", "= set() for x in mirna2evidenceCellT: allMiRNA.add(x) for x in mirna2evidenceProcess: allMiRNA.add(x) for", "\"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences = defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as", "stages = dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <= 0: continue if len(dataUpPlot[miRNA]['Process']) == 0: pass#continue", "mirna2printTuple[miRNA].append( (cbn, process, celltype) ) selMirnas = sorted([x for x in mirna2printTuple], reverse=True,", "return miRNA return miRNA + \" (\" + str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData", "cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\", } celltype2nicename =", "line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 process = processToTitle.get(line[0], line[0]) gene = line[1]", "print(ev, cbns, celltype, process) if \"None\" in cbns:# or \"None\" in processes: continue", "miRNAData['Cell-Type'].union(cellT) if cbns != None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if process != None: miRNAData['Process']", "VEGFA miR-140 EC 27035554 process = processToTitle.get(line[0], line[0]) gene = line[1] miRNA =", "miRNAData orderDict = OrderedDict() for type in [\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type])", "line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 cbn = network2nicename.get(line[0], line[0]) gene", "print(selMirnas[0:10]) for miRNA in manuMirnas: for (cbn, process, celltype) in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\",", "\"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\":", "x in mirna2evidenceCellT: allMiRNA.add(x) for x in mirna2evidenceProcess: allMiRNA.add(x) for x in mirna2evidenceCBN:", "with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin: for line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1", "else: print(\"Missing manu\", miRNA) stages2 = 0 stages0 = 0 from natsort import", "\"miR-370\"] miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences = defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin: for", "in cbns:# or \"None\" in processes: continue for celltype in cellT: for cbn", "'miR-499', 'miR-221', 'miR-370', 'miR-504'] #manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370',", "if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with", "(\" + str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict() for miRNA in manuMirnas:", "miRNA2Evidences[x]) allMiRNA = set() for x in mirna2evidenceCellT: allMiRNA.add(x) for x in mirna2evidenceProcess:", "manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas =", "line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 cbn = network2nicename.get(line[0], line[0]) gene = line[1]", "miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if process != None: miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData", "mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA in manuMirnas: for (cbn, process,", "{ \"targetMirsECA\": \"EC activation and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage act.\", \"targetMirsFCF\": \"Foam", "#CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 cbn = network2nicename.get(line[0], line[0]) gene = line[1] miRNA", "dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\",", "miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences = defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin: for line", "print(\"miRNA\", \"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple = defaultdict(list) for miRNA in allMiRNA:", "defaultdict(lambda: defaultdict(set)) pubmed2tuples = defaultdict(set) mirna2evflows = defaultdict(set) dataLabels = defaultdict(set) #\"miR-98\", \"miR-125a\"", "diff. &\\nMacrophage act.\", \"targetMirsFCF\": \"Foam cell formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\":", "mirna2evidenceCBN = defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda: defaultdict(set)) pubmed2tuples = defaultdict(set) mirna2evflows =", "continue if len(dataUpPlot[miRNA]['Process']) == 0: pass#continue if len(dataUpPlot[miRNA]['CBN']) == 0: continue filteredData[makeMIRNAName(miRNA)] =", "key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA in manuMirnas: for (cbn, process, celltype) in", "in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev,", "\"Process\": set(), \"Cell-Type\": set() } for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, None)", "network2nicename.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3], line[3]) evidence", "= sorted([x for x in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA", "in mirna2evidenceProcess: allMiRNA.add(x) for x in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot = {} for miRNA", "muscle cell\", 'EC': \"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam cell\" } def source2index(", "== 0: pass#continue if len(dataUpPlot[miRNA]['CBN']) == 0: continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData))", "for x in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData = { \"CBN\": set(), \"Process\": set(),", "sorted(dataLabels[type]) def makeMIRNAName(miRNA): return miRNA return miRNA + \" (\" + str(len(miRNA2InteractionPartner[miRNA])) +", "0 return 1 mirna2evidenceCellT = defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess =", "evidence = line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT)", "line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for", "= OrderedDict() for type in [\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type]) def makeMIRNAName(miRNA):", "defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda: defaultdict(set)) pubmed2tuples = defaultdict(set) mirna2evflows", "for line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 cbn", "= dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <= 0: continue if len(dataUpPlot[miRNA]['Process']) == 0: pass#continue if", "in cbns: for process in processes: mirna2printTuple[miRNA].append( (cbn, process, celltype) ) selMirnas =", "\"Process\": set(), \"Cell-Type\": set() } for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"])", "collections import defaultdict, OrderedDict from plots.DotSetPlot import DotSetPlot processToTitle = { \"targetMirsECA\": \"EC", "\"None\" in cbns:# or \"None\" in processes: continue for celltype in cellT: for", "activation\", } celltype2nicename = { 'SMC': \"Smooth muscle cell\", 'EC': \"Endothelial cell\", \"MC\":", "\"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\": \"T cell differentiation &\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\",", "set(), \"Process\": set(), \"Cell-Type\": set() } for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev,", "= defaultdict(set) mirna2evflows = defaultdict(set) dataLabels = defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas = [\"miR-98\",", "for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, None) cbns = mirna2evidenceCBN[miRNA].get(ev, None) process", "defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda: defaultdict(set)) pubmed2tuples = defaultdict(set) mirna2evflows = defaultdict(set) dataLabels", "miRNAData['CBN'].union(cbns) if process != None: miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData orderDict =", "natsorted for miRNA in natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA])", "continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\") print(\"miRNA\",", "None) process = mirna2evidenceProcess[miRNA].get(ev, None) if cellT != None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if", "= [len(miRNA2Evidences[miRNA]) for miRNA in filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts , \"PubMed Evidence", "= miRNAData['Cell-Type'].union(cellT) if cbns != None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if process != None:", "or \"None\" in processes: continue for celltype in cellT: for cbn in cbns:", "sname != None and sname.startswith(\"CV-IPN\"): return 0 return 1 mirna2evidenceCellT = defaultdict(lambda: defaultdict(set))", "process != None: miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData orderDict = OrderedDict() for", "miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x in manuMirnas: print(x,", "= {} for miRNA in allMiRNA: miRNAEvs = set() for x in mirna2evidenceCBN.get(miRNA,", "mirna2evidenceProcess[miRNA][evidence].add(process) for x in manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA = set() for x", "print(\"Missing manu\", miRNA) stages2 = 0 stages0 = 0 from natsort import natsorted", "0 from natsort import natsorted for miRNA in natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]): stages", "as fin: for line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC", "dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin: for line in fin:", "\"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences = defaultdict(set)", "miRNAEvs = set() for x in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceProcess.get(miRNA,", "sname ): if sname != None and sname.startswith(\"CV-IPN\"): return 0 return 1 mirna2evidenceCellT", "mirna2evidenceCellT: allMiRNA.add(x) for x in mirna2evidenceProcess: allMiRNA.add(x) for x in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot", "None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if cbns != None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns) if process", "set() for x in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x)", "['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93'] manuMirnas = list({'miR-155',", "list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'}) manuMirnas = [\"miR-98\",", "dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin: for line in", "\"miR-370\"] #manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504']", "= line[2] cellT = celltype2nicename.get(line[3], line[3]) evidence = line[4] if \"US\" in miRNA:", "0: continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot)) print(len(filteredData)) print(stages2) print(stages0) fout = open(\"/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv\", \"w\")", "!= None and sname.startswith(\"CV-IPN\"): return 0 return 1 mirna2evidenceCellT = defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN", "for miRNA in filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in filteredData] DotSetPlot().plot(dataLabels, filteredData,", "in [\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type]) def makeMIRNAName(miRNA): return miRNA return miRNA", "mirna2evidenceCellT = defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda: defaultdict(set)) pubmed2tuples", "process) if \"None\" in cbns:# or \"None\" in processes: continue for celltype in", "[len(miRNA2Evidences[miRNA]) for miRNA in filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts , \"PubMed Evidence Count\":", "orderDict[type] = sorted(dataLabels[type]) def makeMIRNAName(miRNA): return miRNA return miRNA + \" (\" +", "celltype) in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \" \").replace(\" \", \" \"), process.replace(\"\\n\", \" \").replace(\"", "network2nicename = { \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV)", "dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x in manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA = set()", "#\"miR-98\", \"miR-125a\" manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"]", "mirna2evidenceProcess: allMiRNA.add(x) for x in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot = {} for miRNA in", "in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, None) cbns = mirna2evidenceCBN[miRNA].get(ev, None) process = mirna2evidenceProcess[miRNA].get(ev,", "\"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences = defaultdict(set) with", "processToTitle = { \"targetMirsECA\": \"EC activation and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage act.\",", "'SMC': \"Smooth muscle cell\", 'EC': \"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam cell\" }", "natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <= 0: continue if", "import DotSetPlot processToTitle = { \"targetMirsECA\": \"EC activation and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte diff.", "and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage act.\", \"targetMirsFCF\": \"Foam cell formation\", \"targetMirsAngio\": \"Angiogenesis\",", "mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x in manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA = set() for", "= list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'}) manuMirnas =", "\"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499',", "\"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\",", "\"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\", } celltype2nicename = { 'SMC': \"Smooth", "= 0 from natsort import natsorted for miRNA in natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]):", "line[2] cellT = celltype2nicename.get(line[3], line[3]) evidence = line[4] if \"US\" in miRNA: continue", "'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93'] manuMirnas = list({'miR-155', 'miR-93',", "} for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"])", "in cellT: for cbn in cbns: for process in processes: mirna2printTuple[miRNA].append( (cbn, process,", ") selMirnas = sorted([x for x in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10])", "= mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA == \"miR-98\": print(ev, cbns, celltype, process) if \"None\"", "cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam cell\" } def source2index( sname ): if sname", "\"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\":", "#manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93'] manuMirnas", "= { \"targetMirsECA\": \"EC activation and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte diff. &\\nMacrophage act.\", \"targetMirsFCF\":", "'EC': \"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam cell\" } def source2index( sname ):", "defaultdict(set)) pubmed2tuples = defaultdict(set) mirna2evflows = defaultdict(set) dataLabels = defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas", "gene = line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3], line[3]) evidence = line[4]", "\"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\": \"T cell differentiation &\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\":", "+ \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict() for miRNA in manuMirnas: if miRNA in", "'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504'] #manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b',", "27035554 cbn = network2nicename.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT =", "x in mirna2evidenceProcess: allMiRNA.add(x) for x in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot = {} for", "if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x", "print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA = set() for x in mirna2evidenceCellT: allMiRNA.add(x) for x", "Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\", } celltype2nicename", "for miRNA in manuMirnas: if miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else: print(\"Missing", "'miR-21'}) manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner", "miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, None) cbns = mirna2evidenceCBN[miRNA].get(ev, None) process = mirna2evidenceProcess[miRNA].get(ev, None)", "str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict() for miRNA in manuMirnas: if miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)]", "\"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\", } celltype2nicename = { 'SMC': \"Smooth muscle cell\", 'EC':", "manuMirnas: if miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else: print(\"Missing manu\", miRNA) stages2", "line[3]) evidence = line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn)", "cbns, celltype, process) if \"None\" in cbns:# or \"None\" in processes: continue for", "&\\nMacrophage act.\", \"targetMirsFCF\": \"Foam cell formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\": \"T", "miRNA = line[2] cellT = celltype2nicename.get(line[3], line[3]) evidence = line[4] if \"US\" in", "miRNAEvs.add(x) for x in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x)", "= line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"CBN\"].add(cbn) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn)", "set() } for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev,", "pubmed2tuples = defaultdict(set) mirna2evflows = defaultdict(set) dataLabels = defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas =", "[]): miRNAEvs.add(x) for x in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceCellT.get(miRNA, []):", "line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 process = processToTitle.get(line[0], line[0]) gene", "= miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData orderDict = OrderedDict() for type in [\"CBN\", \"Process\",", "ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes =", "mirna2evidenceCellT[miRNA].get(ev, None) cbns = mirna2evidenceCBN[miRNA].get(ev, None) process = mirna2evidenceProcess[miRNA].get(ev, None) if cellT !=", "activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\": \"(III) Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC", "line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 cbn =", "open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin: for line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA", "mirna2evflows = defaultdict(set) dataLabels = defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\",", "filteredData = OrderedDict() for miRNA in manuMirnas: if miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)] =", "= network2nicename.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3], line[3])", "\"targetMirsTCell\": \"T cell differentiation &\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC proliferation &\\n", "for x in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA in manuMirnas:", "if len(dataUpPlot[miRNA]['Process']) == 0: pass#continue if len(dataUpPlot[miRNA]['CBN']) == 0: continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA]", "\"targetMirsFCF\": \"Foam cell formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\": \"T cell differentiation", "\"(I) EC activation\", } celltype2nicename = { 'SMC': \"Smooth muscle cell\", 'EC': \"Endothelial", "mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA == \"miR-98\": print(ev, cbns, celltype,", "'miR-34a', 'miR-146a', 'miR-126', 'miR-21'}) manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\",", "= [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner = defaultdict(set)", "if sname != None and sname.startswith(\"CV-IPN\"): return 0 return 1 mirna2evidenceCellT = defaultdict(lambda:", "= line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 cbn = network2nicename.get(line[0], line[0]) gene =", "pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts , \"PubMed", "for x in mirna2evidenceProcess: allMiRNA.add(x) for x in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot = {}", "in dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else: print(\"Missing manu\", miRNA) stages2 = 0 stages0", "processToTitle.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3], line[3]) evidence", "for celltype in cellT: for cbn in cbns: for process in processes: mirna2printTuple[miRNA].append(", "matplotlib from collections import defaultdict, OrderedDict from plots.DotSetPlot import DotSetPlot processToTitle = {", "\").replace(\" \", \" \"), process.replace(\"\\n\", \" \").replace(\" \", \" \"), celltype, sep=\",\", file=fout)", "\"miR-155\", \"miR-370\"] #manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370',", "process = mirna2evidenceProcess[miRNA].get(ev, None) if cellT != None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if cbns", "'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93'] manuMirnas = list({'miR-155', 'miR-93', 'miR-181c',", "process = processToTitle.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3],", "orderDict = OrderedDict() for type in [\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type]) def", "DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts , \"PubMed Evidence Count\": pubmedCounts },sortData=False,order=orderDict)#, max=30) matplotlib.pyplot.savefig(\"/mnt/d/owncloud/markus/uni/publications/miReview/dotset_important.pdf\") matplotlib.pyplot.show()", "= miRNAData['CBN'].union(cbns) if process != None: miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData orderDict", "'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'}) manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\",", "cbn = network2nicename.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3],", "#CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554 process = processToTitle.get(line[0], line[0]) gene = line[1] miRNA", "0 stages0 = 0 from natsort import natsorted for miRNA in natsorted(dataUpPlot, key=lambda", "interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in", "\"miR-98\": print(ev, cbns, celltype, process) if \"None\" in cbns:# or \"None\" in processes:", "\"Macrophage/Monocyte\", \"FC\": \"Foam cell\" } def source2index( sname ): if sname != None", "if miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else: print(\"Missing manu\", miRNA) stages2 =", "in processes: mirna2printTuple[miRNA].append( (cbn, process, celltype) ) selMirnas = sorted([x for x in", "manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner =", "x in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA in manuMirnas: for", "allMiRNA.add(x) dataUpPlot = {} for miRNA in allMiRNA: miRNAEvs = set() for x", "cbns:# or \"None\" in processes: continue for celltype in cellT: for cbn in", "in filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts , \"PubMed Evidence Count\": pubmedCounts },sortData=False,order=orderDict)#, max=30)", "\"(III) Foam cell formation\", \"CV-IPN-Endothelial_cell-monocyte_interaction_1\": \"(II) EC/MC interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\", }", "defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda: defaultdict(set)) pubmed2tuples = defaultdict(set)", "'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504'] #manuMirnas = ['miR-181c', 'miR-222', 'miR-126',", "process, celltype) in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \" \").replace(\" \", \" \"), process.replace(\"\\n\", \"", "miRNAEvs.add(x) for x in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData = { \"CBN\": set(), \"Process\":", "import matplotlib from collections import defaultdict, OrderedDict from plots.DotSetPlot import DotSetPlot processToTitle =", "mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \" \").replace(\" \", \" \"), process.replace(\"\\n\", \" \").replace(\" \", \"", "x in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData = { \"CBN\": set(), \"Process\": set(), \"Cell-Type\":", "process.replace(\"\\n\", \" \").replace(\" \", \" \"), celltype, sep=\",\", file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for", "= OrderedDict() for miRNA in manuMirnas: if miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA]", "\"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a',", "'miR-370', 'miR-504'] #manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21',", "'miR-370', 'miR-146a', 'miR-21', 'miR-93'] manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a',", "= ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93'] manuMirnas =", "'miR-146a', 'miR-21', 'miR-93'] manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a',", "plots.DotSetPlot import DotSetPlot processToTitle = { \"targetMirsECA\": \"EC activation and\\n inflammation\", \"targetMirsMonocyte\": \"Monocyte", "\"None\" in processes: continue for celltype in cellT: for cbn in cbns: for", "mirna2evidenceCBN[miRNA].get(ev, None) process = mirna2evidenceProcess[miRNA].get(ev, None) if cellT != None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT)", "key=lambda x: x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <= 0: continue if len(dataUpPlot[miRNA]['Process'])", "\"miR-146a\", \"miR-155\", \"miR-370\"] #manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221',", "mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceCBN[miRNA][evidence].add(cbn) #important_process with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin: for line in fin: line", "\" \").replace(\" \", \" \"), celltype, sep=\",\", file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA", "'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504'] #manuMirnas = ['miR-181c', 'miR-222',", "line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3], line[3]) evidence = line[4] if \"US\"", "sname.startswith(\"CV-IPN\"): return 0 return 1 mirna2evidenceCellT = defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda: defaultdict(set))", "proliferation &\\n SMC migration\" } network2nicename = { \"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\":", "def makeMIRNAName(miRNA): return miRNA return miRNA + \" (\" + str(len(miRNA2InteractionPartner[miRNA])) + \",\"+", "filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts , \"PubMed Evidence Count\": pubmedCounts },sortData=False,order=orderDict)#, max=30) matplotlib.pyplot.savefig(\"/mnt/d/owncloud/markus/uni/publications/miReview/dotset_important.pdf\")", "mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot = {} for miRNA in allMiRNA: miRNAEvs = set() for", "\"CV-IPN-Plaque_destabilization_1\": \"(VI) Plaque destabilization\", \"CV-IPN-Platelet_activation_1\": \"(V) Platelet activation\", \"CV-IPN-Smooth_muscle_cell_activation_1\": \"(IV) SMC activation\", \"CV-IPN-Foam_cell_formation_1\":", "= { 'SMC': \"Smooth muscle cell\", 'EC': \"Endothelial cell\", \"MC\": \"Macrophage/Monocyte\", \"FC\": \"Foam", "miRNA2Evidences[miRNA].add(evidence) dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x in manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA", "def source2index( sname ): if sname != None and sname.startswith(\"CV-IPN\"): return 0 return", "} for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, None) cbns = mirna2evidenceCBN[miRNA].get(ev, None)", "\"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type]) def makeMIRNAName(miRNA): return miRNA return miRNA + \" (\"", "return miRNA + \" (\" + str(len(miRNA2InteractionPartner[miRNA])) + \",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict()", "cellT = celltype2nicename.get(line[3], line[3]) evidence = line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene)", "[\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA == \"miR-98\": print(ev, cbns, celltype, process)", "'miR-146a', 'miR-126', 'miR-21'}) manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\",", "from collections import defaultdict, OrderedDict from plots.DotSetPlot import DotSetPlot processToTitle = { \"targetMirsECA\":", "\"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\", \"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences =", "'r') as fin: for line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140", "\"Cell-Type\": set() } for ev in miRNAEvs: cellT = mirna2evidenceCellT[miRNA].get(ev, None) cbns =", "type in [\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type] = sorted(dataLabels[type]) def makeMIRNAName(miRNA): return miRNA return", "[\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA == \"miR-98\":", "mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceCellT.get(miRNA,", "mirna2evidenceProcess = defaultdict(lambda: defaultdict(set)) pubmed2tuples = defaultdict(set) mirna2evflows = defaultdict(set) dataLabels = defaultdict(set)", "= mirna2evidenceCellT[miRNA].get(ev, None) cbns = mirna2evidenceCBN[miRNA].get(ev, None) process = mirna2evidenceProcess[miRNA].get(ev, None) if cellT", "with open(\"/mnt/d/yanc_network/pathway_important_process.txt\", 'r') as fin: for line in fin: line = line.strip().split(\"\\t\") #CV-IPN-Endothelial_cell-monocyte_interaction_1", "\"), celltype, sep=\",\", file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData] pubmedCounts =", "miRNA in manuMirnas: if miRNA in dataUpPlot: filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] else: print(\"Missing manu\",", "for miRNA in filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts , \"PubMed Evidence Count\": pubmedCounts", "remodeling\", \"targetMirsTCell\": \"T cell differentiation &\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC proliferation", "'miR-21', 'miR-93'] manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126',", "None) cbns = mirna2evidenceCBN[miRNA].get(ev, None) process = mirna2evidenceProcess[miRNA].get(ev, None) if cellT != None:", "source2index( sname ): if sname != None and sname.startswith(\"CV-IPN\"): return 0 return 1", "dataUpPlot[miRNA] else: print(\"Missing manu\", miRNA) stages2 = 0 stages0 = 0 from natsort", "cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA == \"miR-98\": print(ev,", "\"miR-155\", \"miR-370\"] miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences = defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin:", "EC activation\", } celltype2nicename = { 'SMC': \"Smooth muscle cell\", 'EC': \"Endothelial cell\",", "for x in manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA = set() for x in", "'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93'] manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370',", "cbn in cbns: for process in processes: mirna2printTuple[miRNA].append( (cbn, process, celltype) ) selMirnas", "= processToTitle.get(line[0], line[0]) gene = line[1] miRNA = line[2] cellT = celltype2nicename.get(line[3], line[3])", "1 mirna2evidenceCellT = defaultdict(lambda: defaultdict(set)) mirna2evidenceCBN = defaultdict(lambda: defaultdict(set)) mirna2evidenceProcess = defaultdict(lambda: defaultdict(set))", "len(dataUpPlot[miRNA]['Process']) == 0: pass#continue if len(dataUpPlot[miRNA]['CBN']) == 0: continue filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA] print(len(dataUpPlot))", "for miRNA in manuMirnas: for (cbn, process, celltype) in mirna2printTuple[miRNA]: print(miRNA, cbn.replace(\"\\n\", \"", "filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts ,", "\" \"), celltype, sep=\",\", file=fout) interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData] pubmedCounts", "sorted([x for x in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA in", "for cbn in cbns: for process in processes: mirna2printTuple[miRNA].append( (cbn, process, celltype) )", "in manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA = set() for x in mirna2evidenceCellT: allMiRNA.add(x)", "process in processes: mirna2printTuple[miRNA].append( (cbn, process, celltype) ) selMirnas = sorted([x for x", "manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'}) manuMirnas", "= celltype2nicename.get(line[3], line[3]) evidence = line[4] if \"US\" in miRNA: continue miRNA2InteractionPartner[miRNA].add(gene) miRNA2Evidences[miRNA].add(evidence)", "\"Foam cell formation\", \"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\": \"T cell differentiation &\\n", "miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA = set() for x in mirna2evidenceCellT: allMiRNA.add(x) for x in", "x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN'] if len(miRNA2Evidences[miRNA]) <= 0: continue if len(dataUpPlot[miRNA]['Process']) == 0:", "in mirna2evidenceCBN: allMiRNA.add(x) dataUpPlot = {} for miRNA in allMiRNA: miRNAEvs = set()", "\"CBN\", \"PROCESS\", \"CELLTYPE\", sep=\",\", file=fout) mirna2printTuple = defaultdict(list) for miRNA in allMiRNA: miRNAEvs", "cellT != None: miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT) if cbns != None: miRNAData['CBN'] = miRNAData['CBN'].union(cbns)", "set() for x in mirna2evidenceCellT: allMiRNA.add(x) for x in mirna2evidenceProcess: allMiRNA.add(x) for x", "\"miR-126\", \"miR-146a\", \"miR-155\", \"miR-370\"] miRNA2InteractionPartner = defaultdict(set) miRNA2Evidences = defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r')", "[]): miRNAEvs.add(x) for x in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData = { \"CBN\": set(),", "differentiation &\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC proliferation &\\n SMC migration\" }", "= ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504'] #manuMirnas =", "dataLabels[\"Cell-Type\"].add(cellT) dataLabels[\"Process\"].add(process) mirna2evidenceCellT[miRNA][evidence].add(cellT) mirna2evidenceProcess[miRNA][evidence].add(process) for x in manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA =", "mirna2evidenceCellT[miRNA].get(ev, [\"None\"]) cbns = mirna2evidenceCBN[miRNA].get(ev, [\"None\"]) processes = mirna2evidenceProcess[miRNA].get(ev, [\"None\"]) if miRNA ==", "[]): miRNAEvs.add(x) miRNAData = { \"CBN\": set(), \"Process\": set(), \"Cell-Type\": set() } for", "\",\"+ str(len(miRNA2Evidences[miRNA]))+\")\" filteredData = OrderedDict() for miRNA in manuMirnas: if miRNA in dataUpPlot:", "miRNA in filteredData] DotSetPlot().plot(dataLabels, filteredData, numbers={\"Interactor Count\":interactorCounts , \"PubMed Evidence Count\": pubmedCounts },sortData=False,order=orderDict)#,", "= set() for x in mirna2evidenceCBN.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceProcess.get(miRNA, []):", "from plots.DotSetPlot import DotSetPlot processToTitle = { \"targetMirsECA\": \"EC activation and\\n inflammation\", \"targetMirsMonocyte\":", "mirna2evidenceProcess.get(miRNA, []): miRNAEvs.add(x) for x in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData = { \"CBN\":", "manuMirnas: print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x]) allMiRNA = set() for x in mirna2evidenceCellT: allMiRNA.add(x) for", "miRNA2Evidences = defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin: for line in fin: line", "if len(miRNA2Evidences[miRNA]) <= 0: continue if len(dataUpPlot[miRNA]['Process']) == 0: pass#continue if len(dataUpPlot[miRNA]['CBN']) ==", "= [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData] pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in filteredData]", "interaction\", \"CV-IPN-Endothelial_cell_activation_1\": \"(I) EC activation\", } celltype2nicename = { 'SMC': \"Smooth muscle cell\",", "cell differentiation &\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol efflux\", \"targetMirsSMCProlif\": \"SMC proliferation &\\n SMC migration\"", "defaultdict(set) dataLabels = defaultdict(set) #\"miR-98\", \"miR-125a\" manuMirnas = [\"miR-98\", \"miR-125a\",\"miR-21\", \"miR-34a\", \"miR-93\", \"miR-125b\",", "defaultdict(set) miRNA2Evidences = defaultdict(set) with open(\"/mnt/d/yanc_network/disease_pw_important_cbn.txt\", 'r') as fin: for line in fin:", "= miRNAData orderDict = OrderedDict() for type in [\"CBN\", \"Process\", \"Cell-Type\"]: orderDict[type] =", "None: miRNAData['Process'] = miRNAData['Process'].union(process) dataUpPlot[miRNA] = miRNAData orderDict = OrderedDict() for type in", "continue for celltype in cellT: for cbn in cbns: for process in processes:", "in mirna2evidenceCellT.get(miRNA, []): miRNAEvs.add(x) miRNAData = { \"CBN\": set(), \"Process\": set(), \"Cell-Type\": set()", "\"targetMirsAngio\": \"Angiogenesis\", \"targetMirsVasRemod\": \"Vascular remodeling\", \"targetMirsTCell\": \"T cell differentiation &\\n activation\", \"targetMirsCholEfflux\": \"Cholesterol", "import natsorted for miRNA in natsorted(dataUpPlot, key=lambda x: x.split(\"-\")[1]): stages = dataUpPlot[miRNA]['CBN'] if", "len(mirna2printTuple[x])) print(selMirnas[0:10]) for miRNA in manuMirnas: for (cbn, process, celltype) in mirna2printTuple[miRNA]: print(miRNA," ]
[ "clock.get_fps(), '', 'Vehicle: % 20s' % utils.get_actor_display_name(world.player, truncate=20), 'Map: % 20s' % world.map.name,", "= True self._info_text = [] self._server_clock = pygame.time.Clock() def on_world_tick(self, timestamp): \"\"\"Gets informations", "\"\"\"Class for HUD text\"\"\" def __init__(self, width, height, doc): \"\"\"Constructor method\"\"\" self.dim =", "> 0.5 else '' heading += 'W' if -0.5 > transform.rotation.yaw > -179.5", "HUD class\"\"\" if self._show_info: info_surface = pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0, 0)) v_offset", "At this point has to be a str. surface = self._font_mono.render(item, True, (255,", "x in range(0, 200)] max_col = max(1.0, max(collision)) collision = [x / max_col", "utils class HUD(object): \"\"\"Class for HUD text\"\"\" def __init__(self, width, height, doc): \"\"\"Constructor", "range(0, 200)] max_col = max(1.0, max(collision)) collision = [x / max_col for x", "every tick\"\"\" self._notifications.tick(world, clock) self.map_name = world.map.name if not self._show_info: return transform =", "font_name = 'courier' if os.name == 'nt' else 'mono' fonts = [x for", "be a str. surface = self._font_mono.render(item, True, (255, 255, 255)) display.blit(surface, (8, v_offset))", "(dist, vehicle_type)) def toggle_info(self): \"\"\"Toggle info on or off\"\"\" self._show_info = not self._show_info", "Original work Copyright (c) 2018 Intel Labs. # authors: <NAME> (<EMAIL>) # #", "text, (255, 0, 0)) def render(self, display): \"\"\"Render for HUD class\"\"\" if self._show_info:", "+ 8), (bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect_border, 1) fig = (item[1]", "14) self._notifications = FadingText(font, (width, 40), (0, height - 40)) self.help = HelpText(doc,", "else '' heading += 'W' if -0.5 > transform.rotation.yaw > -179.5 else ''", "timestamp.elapsed_seconds def tick(self, world, clock): \"\"\"HUD method for every tick\"\"\" self._notifications.tick(world, clock) self.map_name", "+ (1 - y) * 30) for x, y in enumerate(item)] pygame.draw.lines(display, (255,", "vehicle_type = utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm %s' % (dist, vehicle_type)) def toggle_info(self): \"\"\"Toggle", "pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1) else: rect_border =", "self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height -", "has to be a str. surface = self._font_mono.render(item, True, (255, 255, 255)) display.blit(surface,", "info_surface = pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0, 0)) v_offset = 4 bar_h_offset =", "heading += 'S' if abs(transform.rotation.yaw) > 90.5 else '' heading += 'E' if", "clock) self.map_name = world.map.name if not self._show_info: return transform = world.player.get_transform() vel =", "self._server_clock.tick() self.server_fps = self._server_clock.get_fps() self.frame = timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds def tick(self, world,", "v_offset += 18 elif isinstance(item, tuple): if isinstance(item[1], bool): rect = pygame.Rect((bar_h_offset, v_offset", "'nt' else 14) self._notifications = FadingText(font, (width, 40), (0, height - 40)) self.help", "id: % 20s' % waypoint.road_id, 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed:", "self._server_clock.get_fps() self.frame = timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds def tick(self, world, clock): \"\"\"HUD method", "height) self.server_fps = 0 self.frame = 0 self.simulation_time = 0 self.map_name = None", "<filename>src/hud.py<gh_stars>1-10 # Modified work Copyright (c) 2021 <NAME>, <NAME>. # Original work Copyright", "if traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed! Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text =", "* (bar_width - 6), v_offset + 8), (6, 6)) else: rect = pygame.Rect((bar_h_offset,", "str. surface = self._font_mono.render(item, True, (255, 255, 255)) display.blit(surface, (8, v_offset)) v_offset +=", "'', 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)),", "['Nearby vehicles:'] def dist(l): return math.sqrt((l.x - transform.location.x)**2 + (l.y - transform.location.y) **", "(255, 0, 0)) def render(self, display): \"\"\"Render for HUD class\"\"\" if self._show_info: info_surface", "world.gnss_sensor.lon)), 'Height: % 18.0f m' % transform.location.z, ''] if isinstance(control, carla.VehicleControl): self._info_text +=", "clock): \"\"\"Fading text method for every tick\"\"\" delta_seconds = 1e-3 * clock.get_time() self.seconds_left", "informations from the world at every tick\"\"\" self._server_clock.tick() self.server_fps = self._server_clock.get_fps() self.frame =", "5.556), ('Jump:', control.jump)] self._info_text += [ '', 'Collision:', collision, '', 'Number of vehicles:", "40)) self.help = HelpText(doc, pygame.font.Font(mono, 24), width, height) self.server_fps = 0 self.frame =", "def __init__(self, width, height, doc): \"\"\"Constructor method\"\"\" self.dim = (width, height) font =", "max(0.0, self.seconds_left - delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left) def render(self, display): \"\"\"Render fading text", "import pygame import carla import utils class HUD(object): \"\"\"Class for HUD text\"\"\" def", "of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. # # Original", "v_offset + 8), (bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect_border, 1) fig =", "vehicle_type)) def toggle_info(self): \"\"\"Toggle info on or off\"\"\" self._show_info = not self._show_info def", "fading text\"\"\" text_texture = self.font.render(text, True, color) self.surface = pygame.Surface(self.dim) self.seconds_left = seconds", "delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left) def render(self, display): \"\"\"Render fading text method\"\"\" display.blit(self.surface, self.pos)", "time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: % 15.0f km/h' % (3.6 *", "+= [ ('Speed:', control.speed, 0.0, 5.556), ('Jump:', control.jump)] self._info_text += [ '', 'Collision:',", "pygame.font.Font(pygame.font.get_default_font(), 20) font_name = 'courier' if os.name == 'nt' else 'mono' fonts =", "0)) def render(self, display): \"\"\"Render for HUD class\"\"\" if self._show_info: info_surface = pygame.Surface((250,", "False, points, 2) item = None v_offset += 18 elif isinstance(item, tuple): if", "len(vehicles) > 1: self._info_text += ['Nearby vehicles:'] def dist(l): return math.sqrt((l.x - transform.location.x)**2", "16.0f\\N{DEGREE SIGN} % 2s' % (transform.rotation.yaw, heading), 'Location:% 20s' % ('(% 5.1f, %", "text, seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds) def error(self, text): \"\"\"Error text\"\"\" self._notifications.set_text('Error: %s'", "200)] max_col = max(1.0, max(collision)) collision = [x / max_col for x in", "self.seconds_left = seconds self.surface.fill((0, 0, 0, 0)) self.surface.blit(text_texture, (10, 11)) def tick(self, _,", "transform.location.y) ** 2 + (l.z - transform.location.z)**2) vehicles = [(dist(x.get_location()), x) for x", "18 elif isinstance(item, tuple): if isinstance(item[1], bool): rect = pygame.Rect((bar_h_offset, v_offset + 8),", "(0, 0)) v_offset = 4 bar_h_offset = 100 bar_width = 106 for item", "under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>.", "return math.sqrt((l.x - transform.location.x)**2 + (l.y - transform.location.y) ** 2 + (l.z -", "200.0: break vehicle_type = utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm %s' % (dist, vehicle_type)) def", "\"\"\"Render fading text method\"\"\" display.blit(self.surface, self.pos) class HelpText(object): \"\"\" Helper class for text", "4dm %s' % (dist, vehicle_type)) def toggle_info(self): \"\"\"Toggle info on or off\"\"\" self._show_info", "isinstance(item, tuple): if isinstance(item[1], bool): rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))", "= (680, len(lines) * 22 + 12) self.pos = (0.5 * width -", "[x for x in pygame.font.get_fonts() if font_name in x] default_font = 'ubuntumono' mono", "< 89.5 else '' heading += 'S' if abs(transform.rotation.yaw) > 90.5 else ''", "+= 'E' if 179.5 > transform.rotation.yaw > 0.5 else '' heading += 'W'", "go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [ 'Server: % 16.0f FPS' % self.server_fps, 'Client: %", "work is licensed under the terms of the MIT license. # For a", "max_col = max(1.0, max(collision)) collision = [x / max_col for x in collision]", "% waypoint.road_id, 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: % 15.0f km/h'", "v_offset += 18 self._notifications.render(display) self.help.render(display) class FadingText(object): \"\"\" Class for fading text \"\"\"", "elif isinstance(control, carla.WalkerControl): self._info_text += [ ('Speed:', control.speed, 0.0, 5.556), ('Jump:', control.jump)] self._info_text", "Helper class for text render\"\"\" def __init__(self, doc, font, width, height): \"\"\"Constructor method\"\"\"", "for i, line in enumerate(lines): text_texture = self.font.render(line, True, (255, 255, 255)) self.surface.blit(text_texture,", "% 12s' % datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(vel.x**2", "font = pygame.font.Font(pygame.font.get_default_font(), 20) font_name = 'courier' if os.name == 'nt' else 'mono'", "control.gear)] elif isinstance(control, carla.WalkerControl): self._info_text += [ ('Speed:', control.speed, 0.0, 5.556), ('Jump:', control.jump)]", "\"\"\"Error text\"\"\" self._notifications.set_text('Error: %s' % text, (255, 0, 0)) def render(self, display): \"\"\"Render", "'Client: % 16.0f FPS' % clock.get_fps(), '', 'Vehicle: % 20s' % utils.get_actor_display_name(world.player, truncate=20),", "'' heading += 'S' if abs(transform.rotation.yaw) > 90.5 else '' heading += 'E'", "% 2s' % (transform.rotation.yaw, heading), 'Location:% 20s' % ('(% 5.1f, % 5.1f)' %", "'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (transform.location.x, transform.location.y)), 'GNSS:% 24s' %", "traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed! Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [", "-179.5 else '' colhist = world.collision_sensor.get_collision_history() collision = [colhist[x + self.frame - 200]", "os.name == 'nt' else 'mono' fonts = [x for x in pygame.font.get_fonts() if", "'courier' if os.name == 'nt' else 'mono' fonts = [x for x in", "notification(self, text, seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds) def error(self, text): \"\"\"Error text\"\"\" self._notifications.set_text('Error:", "fonts[0] mono = pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else", "= (width, height) font = pygame.font.Font(pygame.font.get_default_font(), 20) font_name = 'courier' if os.name ==", "for text render\"\"\" def __init__(self, doc, font, width, height): \"\"\"Constructor method\"\"\" lines =", "<NAME>. # Original work Copyright (c) 2018 Intel Labs. # authors: <NAME> (<EMAIL>)", "if len(item) > 1: points = [(x + 8, v_offset + 8 +", "- item[2]) / (item[3] - item[2]) if item[2] < 0.0: rect = pygame.Rect(", "the world at every tick\"\"\" self._server_clock.tick() self.server_fps = self._server_clock.get_fps() self.frame = timestamp.frame_count self.simulation_time", "changed! Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [ 'Server: % 16.0f FPS' %", "def tick(self, _, clock): \"\"\"Fading text method for every tick\"\"\" delta_seconds = 1e-3", "self.simulation_time = timestamp.elapsed_seconds def tick(self, world, clock): \"\"\"HUD method for every tick\"\"\" self._notifications.tick(world,", "'', 'Vehicle: % 20s' % utils.get_actor_display_name(world.player, truncate=20), 'Map: % 20s' % world.map.name, 'Road", "20s' % ('(% 5.1f, % 5.1f)' % (transform.location.x, transform.location.y)), 'GNSS:% 24s' % ('(%", "self.help = HelpText(doc, pygame.font.Font(mono, 24), width, height) self.server_fps = 0 self.frame = 0", "self._notifications.set_text(text, seconds=seconds) def error(self, text): \"\"\"Error text\"\"\" self._notifications.set_text('Error: %s' % text, (255, 0,", "transform.rotation.yaw > -179.5 else '' colhist = world.collision_sensor.get_collision_history() collision = [colhist[x + self.frame", "('Manual:', control.manual_gear_shift), 'Gear: %s' % {-1: 'R', 0: 'N'}.get(control.gear, control.gear)] elif isinstance(control, carla.WalkerControl):", "self.dim = (680, len(lines) * 22 + 12) self.pos = (0.5 * width", "== 'nt' else 14) self._notifications = FadingText(font, (width, 40), (0, height - 40))", "0: 'N'}.get(control.gear, control.gear)] elif isinstance(control, carla.WalkerControl): self._info_text += [ ('Speed:', control.speed, 0.0, 5.556),", "else '' heading += 'S' if abs(transform.rotation.yaw) > 90.5 else '' heading +=", "('(% 5.1f, % 5.1f)' % (transform.location.x, transform.location.y)), 'GNSS:% 24s' % ('(% 2.6f, %", "pos): \"\"\"Constructor method\"\"\" self.font = font self.dim = dim self.pos = pos self.seconds_left", "% 16.0f FPS' % clock.get_fps(), '', 'Vehicle: % 20s' % utils.get_actor_display_name(world.player, truncate=20), 'Map:", "heading = 'N' if abs(transform.rotation.yaw) < 89.5 else '' heading += 'S' if", "= [ 'Server: % 16.0f FPS' % self.server_fps, 'Client: % 16.0f FPS' %", "transform.location.z, ''] if isinstance(control, carla.VehicleControl): self._info_text += [ ('Throttle:', control.throttle, 0.0, 1.0), ('Steer:',", "'', 'Number of vehicles: % 8d' % len(vehicles)] if len(vehicles) > 1: self._info_text", "== 'nt' else 'mono' fonts = [x for x in pygame.font.get_fonts() if font_name", "world, clock): \"\"\"HUD method for every tick\"\"\" self._notifications.tick(world, clock) self.map_name = world.map.name if", "= 1e-3 * clock.get_time() self.seconds_left = max(0.0, self.seconds_left - delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left)", "of vehicles: % 8d' % len(vehicles)] if len(vehicles) > 1: self._info_text += ['Nearby", "timestamp): \"\"\"Gets informations from the world at every tick\"\"\" self._server_clock.tick() self.server_fps = self._server_clock.get_fps()", "[colhist[x + self.frame - 200] for x in range(0, 200)] max_col = max(1.0,", "('Jump:', control.jump)] self._info_text += [ '', 'Collision:', collision, '', 'Number of vehicles: %", "= max(0.0, self.seconds_left - delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left) def render(self, display): \"\"\"Render fading", "datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(vel.x**2 + vel.y**2 +", "% 15.0f km/h' % (3.6 * math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)), u'Heading:% 16.0f\\N{DEGREE", "- transform.location.y) ** 2 + (l.z - transform.location.z)**2) vehicles = [(dist(x.get_location()), x) for", "if x.id != world.player.id] for dist, vehicle in sorted(vehicles): if dist > 200.0:", "%s' % text, (255, 0, 0)) def render(self, display): \"\"\"Render for HUD class\"\"\"", "(item[3] - item[2]) if item[2] < 0.0: rect = pygame.Rect( (bar_h_offset + fig", "control.brake, 0.0, 1.0), ('Reverse:', control.reverse), ('Hand brake:', control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear: %s' %", "waypoint.road_id, 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: % 15.0f km/h' %", "!= world.player.id] for dist, vehicle in sorted(vehicles): if dist > 200.0: break vehicle_type", "tick(self, _, clock): \"\"\"Fading text method for every tick\"\"\" delta_seconds = 1e-3 *", "- 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1]) self.seconds_left =", "self.seconds_left = max(0.0, self.seconds_left - delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left) def render(self, display): \"\"\"Render", "FadingText(object): \"\"\" Class for fading text \"\"\" def __init__(self, font, dim, pos): \"\"\"Constructor", "'Speed: % 15.0f km/h' % (3.6 * math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)), u'Heading:%", "(c) 2021 <NAME>, <NAME>. # Original work Copyright (c) 2018 Intel Labs. #", "import math import os import pygame import carla import utils class HUD(object): \"\"\"Class", "('Speed:', control.speed, 0.0, 5.556), ('Jump:', control.jump)] self._info_text += [ '', 'Collision:', collision, '',", "= FadingText(font, (width, 40), (0, height - 40)) self.help = HelpText(doc, pygame.font.Font(mono, 24),", "def __init__(self, font, dim, pos): \"\"\"Constructor method\"\"\" self.font = font self.dim = dim", "_, clock): \"\"\"Fading text method for every tick\"\"\" delta_seconds = 1e-3 * clock.get_time()", "(0, height - 40)) self.help = HelpText(doc, pygame.font.Font(mono, 24), width, height) self.server_fps =", "= [colhist[x + self.frame - 200] for x in range(0, 200)] max_col =", "'' heading += 'W' if -0.5 > transform.rotation.yaw > -179.5 else '' colhist", "vehicles = world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location() waypoint = world.map.get_waypoint(ego_location, project_to_road=True) # always make", "(width, 40), (0, height - 40)) self.help = HelpText(doc, pygame.font.Font(mono, 24), width, height)", "self.map_name = None self._show_info = True self._info_text = [] self._server_clock = pygame.time.Clock() def", "= [] self._server_clock = pygame.time.Clock() def on_world_tick(self, timestamp): \"\"\"Gets informations from the world", "2018 Intel Labs. # authors: <NAME> (<EMAIL>) # # This work is licensed", "255, 255), rect) item = item[0] if item: # At this point has", "else '' colhist = world.collision_sensor.get_collision_history() collision = [colhist[x + self.frame - 200] for", "in range(0, 200)] max_col = max(1.0, max(collision)) collision = [x / max_col for", "def notification(self, text, seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds) def error(self, text): \"\"\"Error text\"\"\"", "(6, 6)) pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1) else:", "'Road id: % 20s' % waypoint.road_id, 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), '',", "* self.seconds_left) def render(self, display): \"\"\"Render fading text method\"\"\" display.blit(self.surface, self.pos) class HelpText(object):", "self.dim = (width, height) font = pygame.font.Font(pygame.font.get_default_font(), 20) font_name = 'courier' if os.name", "text\"\"\" self._notifications.set_text('Error: %s' % text, (255, 0, 0)) def render(self, display): \"\"\"Render for", "[ ('Throttle:', control.throttle, 0.0, 1.0), ('Steer:', control.steer, -1.0, 1.0), ('Brake:', control.brake, 0.0, 1.0),", "= 4 bar_h_offset = 100 bar_width = 106 for item in self._info_text: if", "in vehicles if x.id != world.player.id] for dist, vehicle in sorted(vehicles): if dist", "text\"\"\" text_texture = self.font.render(text, True, color) self.surface = pygame.Surface(self.dim) self.seconds_left = seconds self.surface.fill((0,", "heading += 'E' if 179.5 > transform.rotation.yaw > 0.5 else '' heading +=", "world.player.get_velocity() control = world.player.get_control() heading = 'N' if abs(transform.rotation.yaw) < 89.5 else ''", "control.reverse), ('Hand brake:', control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear: %s' % {-1: 'R', 0: 'N'}.get(control.gear,", "(transform.location.x, transform.location.y)), 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height:", "= pygame.Rect((bar_h_offset, v_offset + 8), (6, 6)) pygame.draw.rect(display, (255, 255, 255), rect, 0", "control.throttle, 0.0, 1.0), ('Steer:', control.steer, -1.0, 1.0), ('Brake:', control.brake, 0.0, 1.0), ('Reverse:', control.reverse),", "set_text(self, text, color=(255, 255, 255), seconds=2.0): \"\"\"Set fading text\"\"\" text_texture = self.font.render(text, True,", "% 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: % 18.0f m' % transform.location.z, ''] if", "(world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: % 18.0f m' % transform.location.z, ''] if isinstance(control, carla.VehicleControl): self._info_text", "seconds=2.0): \"\"\"Set fading text\"\"\" text_texture = self.font.render(text, True, color) self.surface = pygame.Surface(self.dim) self.seconds_left", "= world.player.get_traffic_light() if traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed! Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green)", "(<EMAIL>) # # This work is licensed under the terms of the MIT", "# Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import math import os import pygame import", "class\"\"\" if self._show_info: info_surface = pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0, 0)) v_offset =", "self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14) self._notifications = FadingText(font,", "m' % transform.location.z, ''] if isinstance(control, carla.VehicleControl): self._info_text += [ ('Throttle:', control.throttle, 0.0,", "# Original work Copyright (c) 2018 Intel Labs. # authors: <NAME> (<EMAIL>) #", "'E' if 179.5 > transform.rotation.yaw > 0.5 else '' heading += 'W' if", "else 1) else: rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6)) pygame.draw.rect(display, (255,", "12 if os.name == 'nt' else 14) self._notifications = FadingText(font, (width, 40), (0,", "on or off\"\"\" self._show_info = not self._show_info def notification(self, text, seconds=2.0): \"\"\"Notification text\"\"\"", "self.map_name = world.map.name if not self._show_info: return transform = world.player.get_transform() vel = world.player.get_velocity()", "v_offset + 8), (6, 6)) else: rect = pygame.Rect((bar_h_offset, v_offset + 8), (fig", "* 30) for x, y in enumerate(item)] pygame.draw.lines(display, (255, 136, 0), False, points,", "'R', 0: 'N'}.get(control.gear, control.gear)] elif isinstance(control, carla.WalkerControl): self._info_text += [ ('Speed:', control.speed, 0.0,", "from the world at every tick\"\"\" self._server_clock.tick() self.server_fps = self._server_clock.get_fps() self.frame = timestamp.frame_count", "(item[1] - item[2]) / (item[3] - item[2]) if item[2] < 0.0: rect =", "item[2] < 0.0: rect = pygame.Rect( (bar_h_offset + fig * (bar_width - 6),", "200] for x in range(0, 200)] max_col = max(1.0, max(collision)) collision = [x", "# # Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import math import os import pygame", "self._show_info = not self._show_info def notification(self, text, seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds) def", "% transform.location.z, ''] if isinstance(control, carla.VehicleControl): self._info_text += [ ('Throttle:', control.throttle, 0.0, 1.0),", "4 bar_h_offset = 100 bar_width = 106 for item in self._info_text: if v_offset", "18 > self.dim[1]: break if isinstance(item, list): if len(item) > 1: points =", "v_offset)) v_offset += 18 self._notifications.render(display) self.help.render(display) class FadingText(object): \"\"\" Class for fading text", "text \"\"\" def __init__(self, font, dim, pos): \"\"\"Constructor method\"\"\" self.font = font self.dim", "# authors: <NAME> (<EMAIL>) # # This work is licensed under the terms", "+ 8 + (1 - y) * 30) for x, y in enumerate(item)]", "+ self.frame - 200] for x in range(0, 200)] max_col = max(1.0, max(collision))", "dim, pos): \"\"\"Constructor method\"\"\" self.font = font self.dim = dim self.pos = pos", "< 0.0: rect = pygame.Rect( (bar_h_offset + fig * (bar_width - 6), v_offset", "Class for fading text \"\"\" def __init__(self, font, dim, pos): \"\"\"Constructor method\"\"\" self.font", "truncate=20), 'Map: % 20s' % world.map.name, 'Road id: % 20s' % waypoint.road_id, 'Simulation", "HelpText(object): \"\"\" Helper class for text render\"\"\" def __init__(self, doc, font, width, height):", "for x in pygame.font.get_fonts() if font_name in x] default_font = 'ubuntumono' mono =", "control.speed, 0.0, 5.556), ('Jump:', control.jump)] self._info_text += [ '', 'Collision:', collision, '', 'Number", "for HUD class\"\"\" if self._show_info: info_surface = pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0, 0))", "work Copyright (c) 2018 Intel Labs. # authors: <NAME> (<EMAIL>) # # This", "- item[2]) if item[2] < 0.0: rect = pygame.Rect( (bar_h_offset + fig *", "carla.WalkerControl): self._info_text += [ ('Speed:', control.speed, 0.0, 5.556), ('Jump:', control.jump)] self._info_text += [", "vel = world.player.get_velocity() control = world.player.get_control() heading = 'N' if abs(transform.rotation.yaw) < 89.5", "> 200.0: break vehicle_type = utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm %s' % (dist, vehicle_type))", "def __init__(self, doc, font, width, height): \"\"\"Constructor method\"\"\" lines = doc.split('\\n') self.font =", "world.player.get_control() heading = 'N' if abs(transform.rotation.yaw) < 89.5 else '' heading += 'S'", "source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import math import os import pygame import carla import", "elif isinstance(item, tuple): if isinstance(item[1], bool): rect = pygame.Rect((bar_h_offset, v_offset + 8), (6,", "self.font = font self.dim = (680, len(lines) * 22 + 12) self.pos =", "text_texture = self.font.render(line, True, (255, 255, 255)) self.surface.blit(text_texture, (22, i * 22)) self._render", "6)) pygame.draw.rect(display, (255, 255, 255), rect_border, 1) fig = (item[1] - item[2]) /", "self.frame = 0 self.simulation_time = 0 self.map_name = None self._show_info = True self._info_text", "= seconds self.surface.fill((0, 0, 0, 0)) self.surface.blit(text_texture, (10, 11)) def tick(self, _, clock):", "control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear: %s' % {-1: 'R', 0: 'N'}.get(control.gear, control.gear)] elif isinstance(control,", "\"\"\"Toggle on or off the render help\"\"\" self._render = not self._render def render(self,", "(255, 255, 255)) display.blit(surface, (8, v_offset)) v_offset += 18 self._notifications.render(display) self.help.render(display) class FadingText(object):", "+= [ '', 'Collision:', collision, '', 'Number of vehicles: % 8d' % len(vehicles)]", "= pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect_border, 1)", "0.0: rect = pygame.Rect( (bar_h_offset + fig * (bar_width - 6), v_offset +", "[] self._server_clock = pygame.time.Clock() def on_world_tick(self, timestamp): \"\"\"Gets informations from the world at", "v_offset = 4 bar_h_offset = 100 bar_width = 106 for item in self._info_text:", "control = world.player.get_control() heading = 'N' if abs(transform.rotation.yaw) < 89.5 else '' heading", "if abs(transform.rotation.yaw) < 89.5 else '' heading += 'S' if abs(transform.rotation.yaw) > 90.5", "2s' % (transform.rotation.yaw, heading), 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (transform.location.x,", "+= ['Nearby vehicles:'] def dist(l): return math.sqrt((l.x - transform.location.x)**2 + (l.y - transform.location.y)", "utils.get_actor_display_name(world.player, truncate=20), 'Map: % 20s' % world.map.name, 'Road id: % 20s' % waypoint.road_id,", "points, 2) item = None v_offset += 18 elif isinstance(item, tuple): if isinstance(item[1],", "6)) pygame.draw.rect(display, (255, 255, 255), rect) item = item[0] if item: # At", "to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [ 'Server: % 16.0f FPS' % self.server_fps, 'Client:", "item[1] else 1) else: rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6)) pygame.draw.rect(display,", "is licensed under the terms of the MIT license. # For a copy,", "abs(transform.rotation.yaw) > 90.5 else '' heading += 'E' if 179.5 > transform.rotation.yaw >", "('Brake:', control.brake, 0.0, 1.0), ('Reverse:', control.reverse), ('Hand brake:', control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear: %s'", "world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location() waypoint = world.map.get_waypoint(ego_location, project_to_road=True) # always make traffic lights", "def tick(self, world, clock): \"\"\"HUD method for every tick\"\"\" self._notifications.tick(world, clock) self.map_name =", "self.font.render(text, True, color) self.surface = pygame.Surface(self.dim) self.seconds_left = seconds self.surface.fill((0, 0, 0, 0))", "clock.get_time() self.seconds_left = max(0.0, self.seconds_left - delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left) def render(self, display):", "* height - 0.5 * self.dim[1]) self.seconds_left = 0 self.surface = pygame.Surface(self.dim) self.surface.fill((0,", "= 100 bar_width = 106 for item in self._info_text: if v_offset + 18", "0 self.surface = pygame.Surface(self.dim) self.surface.fill((0, 0, 0, 0)) for i, line in enumerate(lines):", "every tick\"\"\" self._server_clock.tick() self.server_fps = self._server_clock.get_fps() self.frame = timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds def", "'Gear: %s' % {-1: 'R', 0: 'N'}.get(control.gear, control.gear)] elif isinstance(control, carla.WalkerControl): self._info_text +=", "> 1: self._info_text += ['Nearby vehicles:'] def dist(l): return math.sqrt((l.x - transform.location.x)**2 +", "self._info_text.append('% 4dm %s' % (dist, vehicle_type)) def toggle_info(self): \"\"\"Toggle info on or off\"\"\"", "os.name == 'nt' else 14) self._notifications = FadingText(font, (width, 40), (0, height -", "if not self._show_info: return transform = world.player.get_transform() vel = world.player.get_velocity() control = world.player.get_control()", "% (transform.rotation.yaw, heading), 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (transform.location.x, transform.location.y)),", "def error(self, text): \"\"\"Error text\"\"\" self._notifications.set_text('Error: %s' % text, (255, 0, 0)) def", "bar_width = 106 for item in self._info_text: if v_offset + 18 > self.dim[1]:", "points = [(x + 8, v_offset + 8 + (1 - y) *", "world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light() if traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed! Good to", "* width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])", "if item: # At this point has to be a str. surface =", "[x / max_col for x in collision] vehicles = world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location()", "else '' heading += 'E' if 179.5 > transform.rotation.yaw > 0.5 else ''", "90.5 else '' heading += 'E' if 179.5 > transform.rotation.yaw > 0.5 else", "+= 'W' if -0.5 > transform.rotation.yaw > -179.5 else '' colhist = world.collision_sensor.get_collision_history()", "= self._server_clock.get_fps() self.frame = timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds def tick(self, world, clock): \"\"\"HUD", "for x in collision] vehicles = world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location() waypoint = world.map.get_waypoint(ego_location,", "False self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle on or off the render help\"\"\" self._render =", "2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: % 18.0f m' % transform.location.z, '']", "15.0f km/h' % (3.6 * math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN}", "math import os import pygame import carla import utils class HUD(object): \"\"\"Class for", "display.blit(self.surface, self.pos) class HelpText(object): \"\"\" Helper class for text render\"\"\" def __init__(self, doc,", "method\"\"\" self.dim = (width, height) font = pygame.font.Font(pygame.font.get_default_font(), 20) font_name = 'courier' if", "% world.map.name, 'Road id: % 20s' % waypoint.road_id, 'Simulation time: % 12s' %", "on or off the render help\"\"\" self._render = not self._render def render(self, display):", "+ vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s' % (transform.rotation.yaw, heading), 'Location:% 20s' %", "+ fig * (bar_width - 6), v_offset + 8), (6, 6)) else: rect", "255, 255)) self.surface.blit(text_texture, (22, i * 22)) self._render = False self.surface.set_alpha(220) def toggle(self):", "v_offset + 8), (6, 6)) pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1]", "transform.location.z)**2) vehicles = [(dist(x.get_location()), x) for x in vehicles if x.id != world.player.id]", "tick\"\"\" self._server_clock.tick() self.server_fps = self._server_clock.get_fps() self.frame = timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds def tick(self,", "= 'courier' if os.name == 'nt' else 'mono' fonts = [x for x", "vehicle in sorted(vehicles): if dist > 200.0: break vehicle_type = utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('%", "% 16.0f FPS' % self.server_fps, 'Client: % 16.0f FPS' % clock.get_fps(), '', 'Vehicle:", "''] if isinstance(control, carla.VehicleControl): self._info_text += [ ('Throttle:', control.throttle, 0.0, 1.0), ('Steer:', control.steer,", "- transform.location.z)**2) vehicles = [(dist(x.get_location()), x) for x in vehicles if x.id !=", "world.collision_sensor.get_collision_history() collision = [colhist[x + self.frame - 200] for x in range(0, 200)]", "0 if item[1] else 1) else: rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width,", "% len(vehicles)] if len(vehicles) > 1: self._info_text += ['Nearby vehicles:'] def dist(l): return", "font, dim, pos): \"\"\"Constructor method\"\"\" self.font = font self.dim = dim self.pos =", "a copy, see <https://opensource.org/licenses/MIT>. # # Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import math", "item: # At this point has to be a str. surface = self._font_mono.render(item,", "if self._show_info: info_surface = pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0, 0)) v_offset = 4", "% 18.0f m' % transform.location.z, ''] if isinstance(control, carla.VehicleControl): self._info_text += [ ('Throttle:',", "'W' if -0.5 > transform.rotation.yaw > -179.5 else '' colhist = world.collision_sensor.get_collision_history() collision", "'S' if abs(transform.rotation.yaw) > 90.5 else '' heading += 'E' if 179.5 >", "8, v_offset + 8 + (1 - y) * 30) for x, y", "width, height): \"\"\"Constructor method\"\"\" lines = doc.split('\\n') self.font = font self.dim = (680,", "self.surface.blit(text_texture, (10, 11)) def tick(self, _, clock): \"\"\"Fading text method for every tick\"\"\"", "Labs. # authors: <NAME> (<EMAIL>) # # This work is licensed under the", "% clock.get_fps(), '', 'Vehicle: % 20s' % utils.get_actor_display_name(world.player, truncate=20), 'Map: % 20s' %", "pygame.font.Font(mono, 12 if os.name == 'nt' else 14) self._notifications = FadingText(font, (width, 40),", "For a copy, see <https://opensource.org/licenses/MIT>. # # Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import", "('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: % 18.0f m' % transform.location.z,", "True, color) self.surface = pygame.Surface(self.dim) self.seconds_left = seconds self.surface.fill((0, 0, 0, 0)) self.surface.blit(text_texture,", "copy, see <https://opensource.org/licenses/MIT>. # # Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import math import", "x) for x in vehicles if x.id != world.player.id] for dist, vehicle in", "= timestamp.elapsed_seconds def tick(self, world, clock): \"\"\"HUD method for every tick\"\"\" self._notifications.tick(world, clock)", "self._render = False self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle on or off the render help\"\"\"", "pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect_border, 1) fig", "5.1f)' % (transform.location.x, transform.location.y)), 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat,", "+ 8), (fig * bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect) item =", "__init__(self, doc, font, width, height): \"\"\"Constructor method\"\"\" lines = doc.split('\\n') self.font = font", "* 22)) self._render = False self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle on or off the", "0, 0, 0)) for i, line in enumerate(lines): text_texture = self.font.render(line, True, (255,", "if world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light() if traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed! Good", "* math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s' % (transform.rotation.yaw,", "1e-3 * clock.get_time() self.seconds_left = max(0.0, self.seconds_left - delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left) def", "if -0.5 > transform.rotation.yaw > -179.5 else '' colhist = world.collision_sensor.get_collision_history() collision =", "self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0, 0)) v_offset = 4 bar_h_offset = 100 bar_width =", "% (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: % 18.0f m' % transform.location.z, ''] if isinstance(control, carla.VehicleControl):", "\"\"\"Render for HUD class\"\"\" if self._show_info: info_surface = pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0,", "255), seconds=2.0): \"\"\"Set fading text\"\"\" text_texture = self.font.render(text, True, color) self.surface = pygame.Surface(self.dim)", "for x, y in enumerate(item)] pygame.draw.lines(display, (255, 136, 0), False, points, 2) item", "class for text render\"\"\" def __init__(self, doc, font, width, height): \"\"\"Constructor method\"\"\" lines", "pygame.Rect((bar_h_offset, v_offset + 8), (6, 6)) pygame.draw.rect(display, (255, 255, 255), rect, 0 if", "text\"\"\" self._notifications.set_text(text, seconds=seconds) def error(self, text): \"\"\"Error text\"\"\" self._notifications.set_text('Error: %s' % text, (255,", "% 20s' % utils.get_actor_display_name(world.player, truncate=20), 'Map: % 20s' % world.map.name, 'Road id: %", "HUD(object): \"\"\"Class for HUD text\"\"\" def __init__(self, width, height, doc): \"\"\"Constructor method\"\"\" self.dim", "0)) self.surface.blit(text_texture, (10, 11)) def tick(self, _, clock): \"\"\"Fading text method for every", "class HelpText(object): \"\"\" Helper class for text render\"\"\" def __init__(self, doc, font, width,", "def on_world_tick(self, timestamp): \"\"\"Gets informations from the world at every tick\"\"\" self._server_clock.tick() self.server_fps", "\"\"\"Constructor method\"\"\" self.font = font self.dim = dim self.pos = pos self.seconds_left =", "[ ('Speed:', control.speed, 0.0, 5.556), ('Jump:', control.jump)] self._info_text += [ '', 'Collision:', collision,", "on_world_tick(self, timestamp): \"\"\"Gets informations from the world at every tick\"\"\" self._server_clock.tick() self.server_fps =", "'Height: % 18.0f m' % transform.location.z, ''] if isinstance(control, carla.VehicleControl): self._info_text += [", "** 2 + (l.z - transform.location.z)**2) vehicles = [(dist(x.get_location()), x) for x in", "Copyright (c) 2021 <NAME>, <NAME>. # Original work Copyright (c) 2018 Intel Labs.", "v_offset + 8 + (1 - y) * 30) for x, y in", "v_offset + 18 > self.dim[1]: break if isinstance(item, list): if len(item) > 1:", "for fading text \"\"\" def __init__(self, font, dim, pos): \"\"\"Constructor method\"\"\" self.font =", "self.server_fps = self._server_clock.get_fps() self.frame = timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds def tick(self, world, clock):", "'nt' else 'mono' fonts = [x for x in pygame.font.get_fonts() if font_name in", "vel.y**2 + vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s' % (transform.rotation.yaw, heading), 'Location:% 20s'", "isinstance(item, list): if len(item) > 1: points = [(x + 8, v_offset +", "Copyright (c) 2018 Intel Labs. # authors: <NAME> (<EMAIL>) # # This work", "6)) pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1) else: rect_border", "255, 255)) display.blit(surface, (8, v_offset)) v_offset += 18 self._notifications.render(display) self.help.render(display) class FadingText(object): \"\"\"", "\"\"\"Gets informations from the world at every tick\"\"\" self._server_clock.tick() self.server_fps = self._server_clock.get_fps() self.frame", "= pygame.font.Font(mono, 12 if os.name == 'nt' else 14) self._notifications = FadingText(font, (width,", "= world.map.name if not self._show_info: return transform = world.player.get_transform() vel = world.player.get_velocity() control", "SIGN} % 2s' % (transform.rotation.yaw, heading), 'Location:% 20s' % ('(% 5.1f, % 5.1f)'", "self._info_text += [ '', 'Collision:', collision, '', 'Number of vehicles: % 8d' %", "dist, vehicle in sorted(vehicles): if dist > 200.0: break vehicle_type = utils.get_actor_display_name(vehicle, truncate=22)", "self.seconds_left) def render(self, display): \"\"\"Render fading text method\"\"\" display.blit(self.surface, self.pos) class HelpText(object): \"\"\"", "font self.dim = (680, len(lines) * 22 + 12) self.pos = (0.5 *", "* 22 + 12) self.pos = (0.5 * width - 0.5 * self.dim[0],", "(l.z - transform.location.z)**2) vehicles = [(dist(x.get_location()), x) for x in vehicles if x.id", "i, line in enumerate(lines): text_texture = self.font.render(line, True, (255, 255, 255)) self.surface.blit(text_texture, (22,", "tuple): if isinstance(item[1], bool): rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6)) pygame.draw.rect(display,", "self.seconds_left = 0 self.surface = pygame.Surface(self.dim) def set_text(self, text, color=(255, 255, 255), seconds=2.0):", "len(item) > 1: points = [(x + 8, v_offset + 8 + (1", "0.5 else '' heading += 'W' if -0.5 > transform.rotation.yaw > -179.5 else", "rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6)) pygame.draw.rect(display, (255, 255, 255), rect,", "\"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds) def error(self, text): \"\"\"Error text\"\"\" self._notifications.set_text('Error: %s' % text,", "# # This work is licensed under the terms of the MIT license.", "('Reverse:', control.reverse), ('Hand brake:', control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear: %s' % {-1: 'R', 0:", "method\"\"\" self.font = font self.dim = dim self.pos = pos self.seconds_left = 0", "self._render = not self._render def render(self, display): \"\"\"Render help text method\"\"\" if self._render:", "info_surface.set_alpha(100) display.blit(info_surface, (0, 0)) v_offset = 4 bar_h_offset = 100 bar_width = 106", "# For a copy, see <https://opensource.org/licenses/MIT>. # # Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime", "'Number of vehicles: % 8d' % len(vehicles)] if len(vehicles) > 1: self._info_text +=", "tick(self, world, clock): \"\"\"HUD method for every tick\"\"\" self._notifications.tick(world, clock) self.map_name = world.map.name", "(fig * bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect) item = item[0] if", "self._info_text += [ ('Speed:', control.speed, 0.0, 5.556), ('Jump:', control.jump)] self._info_text += [ '',", "(1 - y) * 30) for x, y in enumerate(item)] pygame.draw.lines(display, (255, 136,", "width, height, doc): \"\"\"Constructor method\"\"\" self.dim = (width, height) font = pygame.font.Font(pygame.font.get_default_font(), 20)", "= timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds def tick(self, world, clock): \"\"\"HUD method for every", "24), width, height) self.server_fps = 0 self.frame = 0 self.simulation_time = 0 self.map_name", "def set_text(self, text, color=(255, 255, 255), seconds=2.0): \"\"\"Set fading text\"\"\" text_texture = self.font.render(text,", "self.seconds_left - delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left) def render(self, display): \"\"\"Render fading text method\"\"\"", "info on or off\"\"\" self._show_info = not self._show_info def notification(self, text, seconds=2.0): \"\"\"Notification", "else: rect = pygame.Rect((bar_h_offset, v_offset + 8), (fig * bar_width, 6)) pygame.draw.rect(display, (255,", "'', 'Collision:', collision, '', 'Number of vehicles: % 8d' % len(vehicles)] if len(vehicles)", "default_font in fonts else fonts[0] mono = pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono, 12 if", "* bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect) item = item[0] if item:", "return transform = world.player.get_transform() vel = world.player.get_velocity() control = world.player.get_control() heading = 'N'", "\"\"\"HUD method for every tick\"\"\" self._notifications.tick(world, clock) self.map_name = world.map.name if not self._show_info:", "[ 'Server: % 16.0f FPS' % self.server_fps, 'Client: % 16.0f FPS' % clock.get_fps(),", "+ 8), (6, 6)) else: rect = pygame.Rect((bar_h_offset, v_offset + 8), (fig *", "HelpText(doc, pygame.font.Font(mono, 24), width, height) self.server_fps = 0 self.frame = 0 self.simulation_time =", "HUD text\"\"\" def __init__(self, width, height, doc): \"\"\"Constructor method\"\"\" self.dim = (width, height)", "'N'}.get(control.gear, control.gear)] elif isinstance(control, carla.WalkerControl): self._info_text += [ ('Speed:', control.speed, 0.0, 5.556), ('Jump:',", "self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle on or off the render help\"\"\" self._render = not", "enumerate(lines): text_texture = self.font.render(line, True, (255, 255, 255)) self.surface.blit(text_texture, (22, i * 22))", "(255, 255, 255), rect_border, 1) fig = (item[1] - item[2]) / (item[3] -", "datetime import math import os import pygame import carla import utils class HUD(object):", "seconds=seconds) def error(self, text): \"\"\"Error text\"\"\" self._notifications.set_text('Error: %s' % text, (255, 0, 0))", "% text, (255, 0, 0)) def render(self, display): \"\"\"Render for HUD class\"\"\" if", "if v_offset + 18 > self.dim[1]: break if isinstance(item, list): if len(item) >", "self._info_text = [] self._server_clock = pygame.time.Clock() def on_world_tick(self, timestamp): \"\"\"Gets informations from the", "= world.player.get_velocity() control = world.player.get_control() heading = 'N' if abs(transform.rotation.yaw) < 89.5 else", "255), rect) item = item[0] if item: # At this point has to", "= HelpText(doc, pygame.font.Font(mono, 24), width, height) self.server_fps = 0 self.frame = 0 self.simulation_time", "make traffic lights if world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light() if traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic", "'' heading += 'E' if 179.5 > transform.rotation.yaw > 0.5 else '' heading", "text\"\"\" def __init__(self, width, height, doc): \"\"\"Constructor method\"\"\" self.dim = (width, height) font", "lights if world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light() if traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed!", "if os.name == 'nt' else 'mono' fonts = [x for x in pygame.font.get_fonts()", "control.steer, -1.0, 1.0), ('Brake:', control.brake, 0.0, 1.0), ('Reverse:', control.reverse), ('Hand brake:', control.hand_brake), ('Manual:',", "width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1]) self.seconds_left", "+= [ ('Throttle:', control.throttle, 0.0, 1.0), ('Steer:', control.steer, -1.0, 1.0), ('Brake:', control.brake, 0.0,", "tick\"\"\" self._notifications.tick(world, clock) self.map_name = world.map.name if not self._show_info: return transform = world.player.get_transform()", "self._show_info = True self._info_text = [] self._server_clock = pygame.time.Clock() def on_world_tick(self, timestamp): \"\"\"Gets", "'mono' fonts = [x for x in pygame.font.get_fonts() if font_name in x] default_font", "not self._show_info def notification(self, text, seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds) def error(self, text):", "* self.dim[1]) self.seconds_left = 0 self.surface = pygame.Surface(self.dim) self.surface.fill((0, 0, 0, 0)) for", "tick\"\"\" delta_seconds = 1e-3 * clock.get_time() self.seconds_left = max(0.0, self.seconds_left - delta_seconds) self.surface.set_alpha(500.0", "def dist(l): return math.sqrt((l.x - transform.location.x)**2 + (l.y - transform.location.y) ** 2 +", "fig = (item[1] - item[2]) / (item[3] - item[2]) if item[2] < 0.0:", "heading), 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (transform.location.x, transform.location.y)), 'GNSS:% 24s'", "import datetime import math import os import pygame import carla import utils class", "self._info_text: if v_offset + 18 > self.dim[1]: break if isinstance(item, list): if len(item)", "fig * (bar_width - 6), v_offset + 8), (6, 6)) else: rect =", "def render(self, display): \"\"\"Render for HUD class\"\"\" if self._show_info: info_surface = pygame.Surface((250, self.dim[1]))", "pygame.Surface(self.dim) self.surface.fill((0, 0, 0, 0)) for i, line in enumerate(lines): text_texture = self.font.render(line,", "11)) def tick(self, _, clock): \"\"\"Fading text method for every tick\"\"\" delta_seconds =", "(6, 6)) else: rect = pygame.Rect((bar_h_offset, v_offset + 8), (fig * bar_width, 6))", "= pygame.time.Clock() def on_world_tick(self, timestamp): \"\"\"Gets informations from the world at every tick\"\"\"", "self._show_info: return transform = world.player.get_transform() vel = world.player.get_velocity() control = world.player.get_control() heading =", "= max(1.0, max(collision)) collision = [x / max_col for x in collision] vehicles", "1.0), ('Steer:', control.steer, -1.0, 1.0), ('Brake:', control.brake, 0.0, 1.0), ('Reverse:', control.reverse), ('Hand brake:',", "'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: % 15.0f km/h' % (3.6", "= world.player.get_location() waypoint = world.map.get_waypoint(ego_location, project_to_road=True) # always make traffic lights if world.player.is_at_traffic_light():", "font self.dim = dim self.pos = pos self.seconds_left = 0 self.surface = pygame.Surface(self.dim)", "'N' if abs(transform.rotation.yaw) < 89.5 else '' heading += 'S' if abs(transform.rotation.yaw) >", "for x in vehicles if x.id != world.player.id] for dist, vehicle in sorted(vehicles):", "Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import math import os import pygame import carla", "= 0 self.surface = pygame.Surface(self.dim) def set_text(self, text, color=(255, 255, 255), seconds=2.0): \"\"\"Set", "self.simulation_time = 0 self.map_name = None self._show_info = True self._info_text = [] self._server_clock", "transform.location.y)), 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: %", "dist(l): return math.sqrt((l.x - transform.location.x)**2 + (l.y - transform.location.y) ** 2 + (l.z", "class HUD(object): \"\"\"Class for HUD text\"\"\" def __init__(self, width, height, doc): \"\"\"Constructor method\"\"\"", "default_font if default_font in fonts else fonts[0] mono = pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono,", "pygame.font.get_fonts() if font_name in x] default_font = 'ubuntumono' mono = default_font if default_font", "= pygame.Surface(self.dim) self.surface.fill((0, 0, 0, 0)) for i, line in enumerate(lines): text_texture =", "> -179.5 else '' colhist = world.collision_sensor.get_collision_history() collision = [colhist[x + self.frame -", "* clock.get_time() self.seconds_left = max(0.0, self.seconds_left - delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left) def render(self,", "+ 18 > self.dim[1]: break if isinstance(item, list): if len(item) > 1: points", "method for every tick\"\"\" delta_seconds = 1e-3 * clock.get_time() self.seconds_left = max(0.0, self.seconds_left", "(transform.rotation.yaw, heading), 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (transform.location.x, transform.location.y)), 'GNSS:%", "bar_h_offset = 100 bar_width = 106 for item in self._info_text: if v_offset +", "> 1: points = [(x + 8, v_offset + 8 + (1 -", "def toggle(self): \"\"\"Toggle on or off the render help\"\"\" self._render = not self._render", "'Collision:', collision, '', 'Number of vehicles: % 8d' % len(vehicles)] if len(vehicles) >", "else 14) self._notifications = FadingText(font, (width, 40), (0, height - 40)) self.help =", "self.surface.blit(text_texture, (22, i * 22)) self._render = False self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle on", "24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: % 18.0f m'", "Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [ 'Server: % 16.0f FPS' % self.server_fps,", "FPS' % self.server_fps, 'Client: % 16.0f FPS' % clock.get_fps(), '', 'Vehicle: % 20s'", "world.player.get_transform() vel = world.player.get_velocity() control = world.player.get_control() heading = 'N' if abs(transform.rotation.yaw) <", "render(self, display): \"\"\"Render for HUD class\"\"\" if self._show_info: info_surface = pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100)", "self.surface.set_alpha(500.0 * self.seconds_left) def render(self, display): \"\"\"Render fading text method\"\"\" display.blit(self.surface, self.pos) class", "0, 0)) for i, line in enumerate(lines): text_texture = self.font.render(line, True, (255, 255,", "enumerate(item)] pygame.draw.lines(display, (255, 136, 0), False, points, 2) item = None v_offset +=", "\"\"\"Constructor method\"\"\" self.dim = (width, height) font = pygame.font.Font(pygame.font.get_default_font(), 20) font_name = 'courier'", "(255, 255, 255), rect, 0 if item[1] else 1) else: rect_border = pygame.Rect((bar_h_offset,", "self.frame = timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds def tick(self, world, clock): \"\"\"HUD method for", "0.0, 1.0), ('Steer:', control.steer, -1.0, 1.0), ('Brake:', control.brake, 0.0, 1.0), ('Reverse:', control.reverse), ('Hand", "y in enumerate(item)] pygame.draw.lines(display, (255, 136, 0), False, points, 2) item = None", "render(self, display): \"\"\"Render fading text method\"\"\" display.blit(self.surface, self.pos) class HelpText(object): \"\"\" Helper class", "+ 12) self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 *", "self.help.render(display) class FadingText(object): \"\"\" Class for fading text \"\"\" def __init__(self, font, dim,", "\"\"\" def __init__(self, font, dim, pos): \"\"\"Constructor method\"\"\" self.font = font self.dim =", "255)) self.surface.blit(text_texture, (22, i * 22)) self._render = False self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle", "self._font_mono.render(item, True, (255, 255, 255)) display.blit(surface, (8, v_offset)) v_offset += 18 self._notifications.render(display) self.help.render(display)", "for item in self._info_text: if v_offset + 18 > self.dim[1]: break if isinstance(item,", "u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s' % (transform.rotation.yaw, heading), 'Location:% 20s' % ('(% 5.1f,", "% 8d' % len(vehicles)] if len(vehicles) > 1: self._info_text += ['Nearby vehicles:'] def", "fading text method\"\"\" display.blit(self.surface, self.pos) class HelpText(object): \"\"\" Helper class for text render\"\"\"", "\"\"\"Toggle info on or off\"\"\" self._show_info = not self._show_info def notification(self, text, seconds=2.0):", "traffic lights if world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light() if traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic light", "(680, len(lines) * 22 + 12) self.pos = (0.5 * width - 0.5", "= font self.dim = dim self.pos = pos self.seconds_left = 0 self.surface =", "- delta_seconds) self.surface.set_alpha(500.0 * self.seconds_left) def render(self, display): \"\"\"Render fading text method\"\"\" display.blit(self.surface,", "a str. surface = self._font_mono.render(item, True, (255, 255, 255)) display.blit(surface, (8, v_offset)) v_offset", "self._notifications.tick(world, clock) self.map_name = world.map.name if not self._show_info: return transform = world.player.get_transform() vel", "FPS' % clock.get_fps(), '', 'Vehicle: % 20s' % utils.get_actor_display_name(world.player, truncate=20), 'Map: % 20s'", "if isinstance(item[1], bool): rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6)) pygame.draw.rect(display, (255,", "off the render help\"\"\" self._render = not self._render def render(self, display): \"\"\"Render help", "('Throttle:', control.throttle, 0.0, 1.0), ('Steer:', control.steer, -1.0, 1.0), ('Brake:', control.brake, 0.0, 1.0), ('Reverse:',", "not self._show_info: return transform = world.player.get_transform() vel = world.player.get_velocity() control = world.player.get_control() heading", "def toggle_info(self): \"\"\"Toggle info on or off\"\"\" self._show_info = not self._show_info def notification(self,", "- 40)) self.help = HelpText(doc, pygame.font.Font(mono, 24), width, height) self.server_fps = 0 self.frame", "colhist = world.collision_sensor.get_collision_history() collision = [colhist[x + self.frame - 200] for x in", "world.map.get_waypoint(ego_location, project_to_road=True) # always make traffic lights if world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light() if", "traffic_light = world.player.get_traffic_light() if traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed! Good to go!\")", "(3.6 * math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s' %", "\"\"\"Constructor method\"\"\" lines = doc.split('\\n') self.font = font self.dim = (680, len(lines) *", "20s' % world.map.name, 'Road id: % 20s' % waypoint.road_id, 'Simulation time: % 12s'", "# Modified work Copyright (c) 2021 <NAME>, <NAME>. # Original work Copyright (c)", "= world.collision_sensor.get_collision_history() collision = [colhist[x + self.frame - 200] for x in range(0,", "max(1.0, max(collision)) collision = [x / max_col for x in collision] vehicles =", "0 self.map_name = None self._show_info = True self._info_text = [] self._server_clock = pygame.time.Clock()", "1) else: rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6)) pygame.draw.rect(display, (255, 255,", "len(lines) * 22 + 12) self.pos = (0.5 * width - 0.5 *", "= 0 self.surface = pygame.Surface(self.dim) self.surface.fill((0, 0, 0, 0)) for i, line in", "\"\"\"Set fading text\"\"\" text_texture = self.font.render(text, True, color) self.surface = pygame.Surface(self.dim) self.seconds_left =", "/ max_col for x in collision] vehicles = world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location() waypoint", "pygame.draw.rect(display, (255, 255, 255), rect) item = item[0] if item: # At this", "if item[2] < 0.0: rect = pygame.Rect( (bar_h_offset + fig * (bar_width -", "(l.y - transform.location.y) ** 2 + (l.z - transform.location.z)**2) vehicles = [(dist(x.get_location()), x)", "'' colhist = world.collision_sensor.get_collision_history() collision = [colhist[x + self.frame - 200] for x", "= pygame.font.Font(pygame.font.get_default_font(), 20) font_name = 'courier' if os.name == 'nt' else 'mono' fonts", "the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. #", "= doc.split('\\n') self.font = font self.dim = (680, len(lines) * 22 + 12)", "for HUD text\"\"\" def __init__(self, width, height, doc): \"\"\"Constructor method\"\"\" self.dim = (width,", "self.dim = dim self.pos = pos self.seconds_left = 0 self.surface = pygame.Surface(self.dim) def", "self.surface.fill((0, 0, 0, 0)) for i, line in enumerate(lines): text_texture = self.font.render(line, True,", "= default_font if default_font in fonts else fonts[0] mono = pygame.font.match_font(mono) self._font_mono =", "0)) v_offset = 4 bar_h_offset = 100 bar_width = 106 for item in", "seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds) def error(self, text): \"\"\"Error text\"\"\" self._notifications.set_text('Error: %s' %", "self.surface = pygame.Surface(self.dim) def set_text(self, text, color=(255, 255, 255), seconds=2.0): \"\"\"Set fading text\"\"\"", "__init__(self, width, height, doc): \"\"\"Constructor method\"\"\" self.dim = (width, height) font = pygame.font.Font(pygame.font.get_default_font(),", "item = item[0] if item: # At this point has to be a", "2 + (l.z - transform.location.z)**2) vehicles = [(dist(x.get_location()), x) for x in vehicles", "world.hud.notification(\"Traffic light changed! Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [ 'Server: % 16.0f", "= self.font.render(text, True, color) self.surface = pygame.Surface(self.dim) self.seconds_left = seconds self.surface.fill((0, 0, 0,", "carla import utils class HUD(object): \"\"\"Class for HUD text\"\"\" def __init__(self, width, height,", "https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import math import os import pygame import carla import utils", "22)) self._render = False self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle on or off the render", "in pygame.font.get_fonts() if font_name in x] default_font = 'ubuntumono' mono = default_font if", "(width, height) font = pygame.font.Font(pygame.font.get_default_font(), 20) font_name = 'courier' if os.name == 'nt'", "= pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0, 0)) v_offset = 4 bar_h_offset = 100", "waypoint = world.map.get_waypoint(ego_location, project_to_road=True) # always make traffic lights if world.player.is_at_traffic_light(): traffic_light =", "% 20s' % world.map.name, 'Road id: % 20s' % waypoint.road_id, 'Simulation time: %", "255), rect_border, 1) fig = (item[1] - item[2]) / (item[3] - item[2]) if", "dim self.pos = pos self.seconds_left = 0 self.surface = pygame.Surface(self.dim) def set_text(self, text,", "in enumerate(lines): text_texture = self.font.render(line, True, (255, 255, 255)) self.surface.blit(text_texture, (22, i *", "height - 0.5 * self.dim[1]) self.seconds_left = 0 self.surface = pygame.Surface(self.dim) self.surface.fill((0, 0,", "import carla import utils class HUD(object): \"\"\"Class for HUD text\"\"\" def __init__(self, width,", "rect = pygame.Rect( (bar_h_offset + fig * (bar_width - 6), v_offset + 8),", "= pygame.Rect( (bar_h_offset + fig * (bar_width - 6), v_offset + 8), (6,", "pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0, 0)) v_offset = 4 bar_h_offset = 100 bar_width", "toggle_info(self): \"\"\"Toggle info on or off\"\"\" self._show_info = not self._show_info def notification(self, text,", "106 for item in self._info_text: if v_offset + 18 > self.dim[1]: break if", "= 0 self.map_name = None self._show_info = True self._info_text = [] self._server_clock =", "default_font = 'ubuntumono' mono = default_font if default_font in fonts else fonts[0] mono", "+ vel.y**2 + vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s' % (transform.rotation.yaw, heading), 'Location:%", "+= 18 self._notifications.render(display) self.help.render(display) class FadingText(object): \"\"\" Class for fading text \"\"\" def", "<NAME>, <NAME>. # Original work Copyright (c) 2018 Intel Labs. # authors: <NAME>", "0 self.simulation_time = 0 self.map_name = None self._show_info = True self._info_text = []", "self._info_text = [ 'Server: % 16.0f FPS' % self.server_fps, 'Client: % 16.0f FPS'", "vehicles if x.id != world.player.id] for dist, vehicle in sorted(vehicles): if dist >", "rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect_border,", "control.jump)] self._info_text += [ '', 'Collision:', collision, '', 'Number of vehicles: % 8d'", "method for every tick\"\"\" self._notifications.tick(world, clock) self.map_name = world.map.name if not self._show_info: return", "0.0, 1.0), ('Reverse:', control.reverse), ('Hand brake:', control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear: %s' % {-1:", "rect, 0 if item[1] else 1) else: rect_border = pygame.Rect((bar_h_offset, v_offset + 8),", "rect) item = item[0] if item: # At this point has to be", "FadingText(font, (width, 40), (0, height - 40)) self.help = HelpText(doc, pygame.font.Font(mono, 24), width,", "render\"\"\" def __init__(self, doc, font, width, height): \"\"\"Constructor method\"\"\" lines = doc.split('\\n') self.font", "Modified work Copyright (c) 2021 <NAME>, <NAME>. # Original work Copyright (c) 2018", "= world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location() waypoint = world.map.get_waypoint(ego_location, project_to_road=True) # always make traffic", "<https://opensource.org/licenses/MIT>. # # Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import math import os import", "0 self.frame = 0 self.simulation_time = 0 self.map_name = None self._show_info = True", "class FadingText(object): \"\"\" Class for fading text \"\"\" def __init__(self, font, dim, pos):", "2021 <NAME>, <NAME>. # Original work Copyright (c) 2018 Intel Labs. # authors:", "1: self._info_text += ['Nearby vehicles:'] def dist(l): return math.sqrt((l.x - transform.location.x)**2 + (l.y", "isinstance(control, carla.VehicleControl): self._info_text += [ ('Throttle:', control.throttle, 0.0, 1.0), ('Steer:', control.steer, -1.0, 1.0),", "= 0 self.simulation_time = 0 self.map_name = None self._show_info = True self._info_text =", "'Map: % 20s' % world.map.name, 'Road id: % 20s' % waypoint.road_id, 'Simulation time:", "- transform.location.x)**2 + (l.y - transform.location.y) ** 2 + (l.z - transform.location.z)**2) vehicles", "None self._show_info = True self._info_text = [] self._server_clock = pygame.time.Clock() def on_world_tick(self, timestamp):", "True, (255, 255, 255)) self.surface.blit(text_texture, (22, i * 22)) self._render = False self.surface.set_alpha(220)", "toggle(self): \"\"\"Toggle on or off the render help\"\"\" self._render = not self._render def", "\"\"\"Fading text method for every tick\"\"\" delta_seconds = 1e-3 * clock.get_time() self.seconds_left =", "carla.VehicleControl): self._info_text += [ ('Throttle:', control.throttle, 0.0, 1.0), ('Steer:', control.steer, -1.0, 1.0), ('Brake:',", "2) item = None v_offset += 18 elif isinstance(item, tuple): if isinstance(item[1], bool):", "# At this point has to be a str. surface = self._font_mono.render(item, True,", "self.surface = pygame.Surface(self.dim) self.surface.fill((0, 0, 0, 0)) for i, line in enumerate(lines): text_texture", "collision = [x / max_col for x in collision] vehicles = world.world.get_actors().filter('vehicle.*') ego_location", "vehicles:'] def dist(l): return math.sqrt((l.x - transform.location.x)**2 + (l.y - transform.location.y) ** 2", "pygame.draw.rect(display, (255, 255, 255), rect_border, 1) fig = (item[1] - item[2]) / (item[3]", "seconds self.surface.fill((0, 0, 0, 0)) self.surface.blit(text_texture, (10, 11)) def tick(self, _, clock): \"\"\"Fading", "if os.name == 'nt' else 14) self._notifications = FadingText(font, (width, 40), (0, height", "('Hand brake:', control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear: %s' % {-1: 'R', 0: 'N'}.get(control.gear, control.gear)]", "item in self._info_text: if v_offset + 18 > self.dim[1]: break if isinstance(item, list):", "rect_border, 1) fig = (item[1] - item[2]) / (item[3] - item[2]) if item[2]", "+ (l.y - transform.location.y) ** 2 + (l.z - transform.location.z)**2) vehicles = [(dist(x.get_location()),", "every tick\"\"\" delta_seconds = 1e-3 * clock.get_time() self.seconds_left = max(0.0, self.seconds_left - delta_seconds)", "x in collision] vehicles = world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location() waypoint = world.map.get_waypoint(ego_location, project_to_road=True)", "= None v_offset += 18 elif isinstance(item, tuple): if isinstance(item[1], bool): rect =", "40), (0, height - 40)) self.help = HelpText(doc, pygame.font.Font(mono, 24), width, height) self.server_fps", "pygame.Rect( (bar_h_offset + fig * (bar_width - 6), v_offset + 8), (6, 6))", "font, width, height): \"\"\"Constructor method\"\"\" lines = doc.split('\\n') self.font = font self.dim =", "dist > 200.0: break vehicle_type = utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm %s' % (dist,", "lines = doc.split('\\n') self.font = font self.dim = (680, len(lines) * 22 +", "89.5 else '' heading += 'S' if abs(transform.rotation.yaw) > 90.5 else '' heading", "= [x / max_col for x in collision] vehicles = world.world.get_actors().filter('vehicle.*') ego_location =", "utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm %s' % (dist, vehicle_type)) def toggle_info(self): \"\"\"Toggle info on", "self.server_fps, 'Client: % 16.0f FPS' % clock.get_fps(), '', 'Vehicle: % 20s' % utils.get_actor_display_name(world.player,", "if dist > 200.0: break vehicle_type = utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm %s' %", "= pygame.Rect((bar_h_offset, v_offset + 8), (fig * bar_width, 6)) pygame.draw.rect(display, (255, 255, 255),", "mono = pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)", "self._show_info: info_surface = pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface, (0, 0)) v_offset = 4 bar_h_offset", "display.blit(surface, (8, v_offset)) v_offset += 18 self._notifications.render(display) self.help.render(display) class FadingText(object): \"\"\" Class for", "world.player.id] for dist, vehicle in sorted(vehicles): if dist > 200.0: break vehicle_type =", "(255, 255, 255)) self.surface.blit(text_texture, (22, i * 22)) self._render = False self.surface.set_alpha(220) def", "color=(255, 255, 255), seconds=2.0): \"\"\"Set fading text\"\"\" text_texture = self.font.render(text, True, color) self.surface", "3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: % 18.0f m' % transform.location.z, ''] if isinstance(control,", "12) self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height", "work Copyright (c) 2021 <NAME>, <NAME>. # Original work Copyright (c) 2018 Intel", "8), (6, 6)) else: rect = pygame.Rect((bar_h_offset, v_offset + 8), (fig * bar_width,", "* self.dim[0], 0.5 * height - 0.5 * self.dim[1]) self.seconds_left = 0 self.surface", "list): if len(item) > 1: points = [(x + 8, v_offset + 8", "30) for x, y in enumerate(item)] pygame.draw.lines(display, (255, 136, 0), False, points, 2)", "if 179.5 > transform.rotation.yaw > 0.5 else '' heading += 'W' if -0.5", "True self._info_text = [] self._server_clock = pygame.time.Clock() def on_world_tick(self, timestamp): \"\"\"Gets informations from", "method\"\"\" display.blit(self.surface, self.pos) class HelpText(object): \"\"\" Helper class for text render\"\"\" def __init__(self,", "= [(x + 8, v_offset + 8 + (1 - y) * 30)", "bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect) item = item[0] if item: #", "(255, 136, 0), False, points, 2) item = None v_offset += 18 elif", "text method for every tick\"\"\" delta_seconds = 1e-3 * clock.get_time() self.seconds_left = max(0.0,", "x in vehicles if x.id != world.player.id] for dist, vehicle in sorted(vehicles): if", "light changed! Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [ 'Server: % 16.0f FPS'", "0.5 * height - 0.5 * self.dim[1]) self.seconds_left = 0 self.surface = pygame.Surface(self.dim)", "self.surface = pygame.Surface(self.dim) self.seconds_left = seconds self.surface.fill((0, 0, 0, 0)) self.surface.blit(text_texture, (10, 11))", "self._info_text += ['Nearby vehicles:'] def dist(l): return math.sqrt((l.x - transform.location.x)**2 + (l.y -", "if default_font in fonts else fonts[0] mono = pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono, 12", "sorted(vehicles): if dist > 200.0: break vehicle_type = utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm %s'", "the render help\"\"\" self._render = not self._render def render(self, display): \"\"\"Render help text", "height): \"\"\"Constructor method\"\"\" lines = doc.split('\\n') self.font = font self.dim = (680, len(lines)", "1.0), ('Reverse:', control.reverse), ('Hand brake:', control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear: %s' % {-1: 'R',", "transform = world.player.get_transform() vel = world.player.get_velocity() control = world.player.get_control() heading = 'N' if", "for dist, vehicle in sorted(vehicles): if dist > 200.0: break vehicle_type = utils.get_actor_display_name(vehicle,", "font_name in x] default_font = 'ubuntumono' mono = default_font if default_font in fonts", "self.seconds_left = 0 self.surface = pygame.Surface(self.dim) self.surface.fill((0, 0, 0, 0)) for i, line", "255, 255), rect_border, 1) fig = (item[1] - item[2]) / (item[3] - item[2])", "[ '', 'Collision:', collision, '', 'Number of vehicles: % 8d' % len(vehicles)] if", "height, doc): \"\"\"Constructor method\"\"\" self.dim = (width, height) font = pygame.font.Font(pygame.font.get_default_font(), 20) font_name", "km/h' % (3.6 * math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN} %", "- 6), v_offset + 8), (6, 6)) else: rect = pygame.Rect((bar_h_offset, v_offset +", "MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. # # Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py", "[(x + 8, v_offset + 8 + (1 - y) * 30) for", "bool): rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6)) pygame.draw.rect(display, (255, 255, 255),", "len(vehicles)] if len(vehicles) > 1: self._info_text += ['Nearby vehicles:'] def dist(l): return math.sqrt((l.x", "text): \"\"\"Error text\"\"\" self._notifications.set_text('Error: %s' % text, (255, 0, 0)) def render(self, display):", "1.0), ('Brake:', control.brake, 0.0, 1.0), ('Reverse:', control.reverse), ('Hand brake:', control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear:", "traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [ 'Server: % 16.0f FPS' % self.server_fps, 'Client: % 16.0f", "item[0] if item: # At this point has to be a str. surface", "+= 'S' if abs(transform.rotation.yaw) > 90.5 else '' heading += 'E' if 179.5", "carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed! Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [ 'Server: %", "(8, v_offset)) v_offset += 18 self._notifications.render(display) self.help.render(display) class FadingText(object): \"\"\" Class for fading", "0, 0, 0)) self.surface.blit(text_texture, (10, 11)) def tick(self, _, clock): \"\"\"Fading text method", "= utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm %s' % (dist, vehicle_type)) def toggle_info(self): \"\"\"Toggle info", "collision] vehicles = world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location() waypoint = world.map.get_waypoint(ego_location, project_to_road=True) # always", "'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: % 18.0f", "% self.server_fps, 'Client: % 16.0f FPS' % clock.get_fps(), '', 'Vehicle: % 20s' %", "'ubuntumono' mono = default_font if default_font in fonts else fonts[0] mono = pygame.font.match_font(mono)", "in collision] vehicles = world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location() waypoint = world.map.get_waypoint(ego_location, project_to_road=True) #", "% {-1: 'R', 0: 'N'}.get(control.gear, control.gear)] elif isinstance(control, carla.WalkerControl): self._info_text += [ ('Speed:',", "% 5.1f)' % (transform.location.x, transform.location.y)), 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' %", "def render(self, display): \"\"\"Render fading text method\"\"\" display.blit(self.surface, self.pos) class HelpText(object): \"\"\" Helper", "vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s' % (transform.rotation.yaw, heading), 'Location:% 20s' % ('(%", "self.frame - 200] for x in range(0, 200)] max_col = max(1.0, max(collision)) collision", "+ 8, v_offset + 8 + (1 - y) * 30) for x,", "18 self._notifications.render(display) self.help.render(display) class FadingText(object): \"\"\" Class for fading text \"\"\" def __init__(self,", "== carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed! Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text = [ 'Server:", "v_offset + 8), (fig * bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect) item", "if font_name in x] default_font = 'ubuntumono' mono = default_font if default_font in", "255)) display.blit(surface, (8, v_offset)) v_offset += 18 self._notifications.render(display) self.help.render(display) class FadingText(object): \"\"\" Class", "if isinstance(control, carla.VehicleControl): self._info_text += [ ('Throttle:', control.throttle, 0.0, 1.0), ('Steer:', control.steer, -1.0,", "= self.font.render(line, True, (255, 255, 255)) self.surface.blit(text_texture, (22, i * 22)) self._render =", "0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1]) self.seconds_left = 0", "= False self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle on or off the render help\"\"\" self._render", "1: points = [(x + 8, v_offset + 8 + (1 - y)", "# This work is licensed under the terms of the MIT license. #", "self.surface.fill((0, 0, 0, 0)) self.surface.blit(text_texture, (10, 11)) def tick(self, _, clock): \"\"\"Fading text", "in fonts else fonts[0] mono = pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono, 12 if os.name", "self.dim[1]: break if isinstance(item, list): if len(item) > 1: points = [(x +", "= 'ubuntumono' mono = default_font if default_font in fonts else fonts[0] mono =", "break if isinstance(item, list): if len(item) > 1: points = [(x + 8,", "heading += 'W' if -0.5 > transform.rotation.yaw > -179.5 else '' colhist =", "% (dist, vehicle_type)) def toggle_info(self): \"\"\"Toggle info on or off\"\"\" self._show_info = not", "for x in range(0, 200)] max_col = max(1.0, max(collision)) collision = [x /", "% (3.6 * math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s'", "= dim self.pos = pos self.seconds_left = 0 self.surface = pygame.Surface(self.dim) def set_text(self,", "(22, i * 22)) self._render = False self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle on or", "pygame.Rect((bar_h_offset, v_offset + 8), (fig * bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect)", "-0.5 > transform.rotation.yaw > -179.5 else '' colhist = world.collision_sensor.get_collision_history() collision = [colhist[x", "isinstance(item[1], bool): rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6)) pygame.draw.rect(display, (255, 255,", "world.map.name, 'Road id: % 20s' % waypoint.road_id, 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),", "ego_location = world.player.get_location() waypoint = world.map.get_waypoint(ego_location, project_to_road=True) # always make traffic lights if", "doc, font, width, height): \"\"\"Constructor method\"\"\" lines = doc.split('\\n') self.font = font self.dim", "{-1: 'R', 0: 'N'}.get(control.gear, control.gear)] elif isinstance(control, carla.WalkerControl): self._info_text += [ ('Speed:', control.speed,", "+= 18 elif isinstance(item, tuple): if isinstance(item[1], bool): rect = pygame.Rect((bar_h_offset, v_offset +", "line in enumerate(lines): text_texture = self.font.render(line, True, (255, 255, 255)) self.surface.blit(text_texture, (22, i", "else 'mono' fonts = [x for x in pygame.font.get_fonts() if font_name in x]", "in x] default_font = 'ubuntumono' mono = default_font if default_font in fonts else", "pygame.draw.lines(display, (255, 136, 0), False, points, 2) item = None v_offset += 18", "project_to_road=True) # always make traffic lights if world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light() if traffic_light.get_state()", "255, 255), rect, 0 if item[1] else 1) else: rect_border = pygame.Rect((bar_h_offset, v_offset", "else: rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6)) pygame.draw.rect(display, (255, 255, 255),", "licensed under the terms of the MIT license. # For a copy, see", "if len(vehicles) > 1: self._info_text += ['Nearby vehicles:'] def dist(l): return math.sqrt((l.x -", "100 bar_width = 106 for item in self._info_text: if v_offset + 18 >", "8), (bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect_border, 1) fig = (item[1] -", "rect = pygame.Rect((bar_h_offset, v_offset + 8), (fig * bar_width, 6)) pygame.draw.rect(display, (255, 255,", "= [x for x in pygame.font.get_fonts() if font_name in x] default_font = 'ubuntumono'", "% ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), 'Height: % 18.0f m' %", "self._notifications.set_text('Error: %s' % text, (255, 0, 0)) def render(self, display): \"\"\"Render for HUD", "True, (255, 255, 255)) display.blit(surface, (8, v_offset)) v_offset += 18 self._notifications.render(display) self.help.render(display) class", "> self.dim[1]: break if isinstance(item, list): if len(item) > 1: points = [(x", "= font self.dim = (680, len(lines) * 22 + 12) self.pos = (0.5", "\"\"\" Class for fading text \"\"\" def __init__(self, font, dim, pos): \"\"\"Constructor method\"\"\"", "pygame.font.Font(mono, 24), width, height) self.server_fps = 0 self.frame = 0 self.simulation_time = 0", "collision = [colhist[x + self.frame - 200] for x in range(0, 200)] max_col", "delta_seconds = 1e-3 * clock.get_time() self.seconds_left = max(0.0, self.seconds_left - delta_seconds) self.surface.set_alpha(500.0 *", "= 0 self.frame = 0 self.simulation_time = 0 self.map_name = None self._show_info =", "timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds def tick(self, world, clock): \"\"\"HUD method for every tick\"\"\"", "in self._info_text: if v_offset + 18 > self.dim[1]: break if isinstance(item, list): if", "/ (item[3] - item[2]) if item[2] < 0.0: rect = pygame.Rect( (bar_h_offset +", "= item[0] if item: # At this point has to be a str.", "0 self.surface = pygame.Surface(self.dim) def set_text(self, text, color=(255, 255, 255), seconds=2.0): \"\"\"Set fading", "self._notifications.render(display) self.help.render(display) class FadingText(object): \"\"\" Class for fading text \"\"\" def __init__(self, font,", "pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14) self._notifications =", "self.pos = pos self.seconds_left = 0 self.surface = pygame.Surface(self.dim) def set_text(self, text, color=(255,", "'Server: % 16.0f FPS' % self.server_fps, 'Client: % 16.0f FPS' % clock.get_fps(), '',", "= (item[1] - item[2]) / (item[3] - item[2]) if item[2] < 0.0: rect", "0, 0)) def render(self, display): \"\"\"Render for HUD class\"\"\" if self._show_info: info_surface =", "self._server_clock = pygame.time.Clock() def on_world_tick(self, timestamp): \"\"\"Gets informations from the world at every", "(255, 255, 255), rect) item = item[0] if item: # At this point", "= not self._show_info def notification(self, text, seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds) def error(self,", "+ (l.z - transform.location.z)**2) vehicles = [(dist(x.get_location()), x) for x in vehicles if", "x, y in enumerate(item)] pygame.draw.lines(display, (255, 136, 0), False, points, 2) item =", "= 'N' if abs(transform.rotation.yaw) < 89.5 else '' heading += 'S' if abs(transform.rotation.yaw)", "or off\"\"\" self._show_info = not self._show_info def notification(self, text, seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text,", "x in pygame.font.get_fonts() if font_name in x] default_font = 'ubuntumono' mono = default_font", "255), rect, 0 if item[1] else 1) else: rect_border = pygame.Rect((bar_h_offset, v_offset +", "color) self.surface = pygame.Surface(self.dim) self.seconds_left = seconds self.surface.fill((0, 0, 0, 0)) self.surface.blit(text_texture, (10,", "= pygame.Surface(self.dim) def set_text(self, text, color=(255, 255, 255), seconds=2.0): \"\"\"Set fading text\"\"\" text_texture", "- y) * 30) for x, y in enumerate(item)] pygame.draw.lines(display, (255, 136, 0),", "point has to be a str. surface = self._font_mono.render(item, True, (255, 255, 255))", "('Steer:', control.steer, -1.0, 1.0), ('Brake:', control.brake, 0.0, 1.0), ('Reverse:', control.reverse), ('Hand brake:', control.hand_brake),", "text_texture = self.font.render(text, True, color) self.surface = pygame.Surface(self.dim) self.seconds_left = seconds self.surface.fill((0, 0,", "self._info_text += [ ('Throttle:', control.throttle, 0.0, 1.0), ('Steer:', control.steer, -1.0, 1.0), ('Brake:', control.brake,", "display.blit(info_surface, (0, 0)) v_offset = 4 bar_h_offset = 100 bar_width = 106 for", "pygame.Surface(self.dim) self.seconds_left = seconds self.surface.fill((0, 0, 0, 0)) self.surface.blit(text_texture, (10, 11)) def tick(self,", "= [(dist(x.get_location()), x) for x in vehicles if x.id != world.player.id] for dist,", "doc): \"\"\"Constructor method\"\"\" self.dim = (width, height) font = pygame.font.Font(pygame.font.get_default_font(), 20) font_name =", "y) * 30) for x, y in enumerate(item)] pygame.draw.lines(display, (255, 136, 0), False,", "height) font = pygame.font.Font(pygame.font.get_default_font(), 20) font_name = 'courier' if os.name == 'nt' else", "8), (fig * bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect) item = item[0]", "self.font = font self.dim = dim self.pos = pos self.seconds_left = 0 self.surface", "transform.rotation.yaw > 0.5 else '' heading += 'W' if -0.5 > transform.rotation.yaw >", "= pos self.seconds_left = 0 self.surface = pygame.Surface(self.dim) def set_text(self, text, color=(255, 255,", "[(dist(x.get_location()), x) for x in vehicles if x.id != world.player.id] for dist, vehicle", "text method\"\"\" display.blit(self.surface, self.pos) class HelpText(object): \"\"\" Helper class for text render\"\"\" def", "(bar_h_offset + fig * (bar_width - 6), v_offset + 8), (6, 6)) else:", "255, 255), seconds=2.0): \"\"\"Set fading text\"\"\" text_texture = self.font.render(text, True, color) self.surface =", "display): \"\"\"Render for HUD class\"\"\" if self._show_info: info_surface = pygame.Surface((250, self.dim[1])) info_surface.set_alpha(100) display.blit(info_surface,", "if item[1] else 1) else: rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))", "- 0.5 * self.dim[1]) self.seconds_left = 0 self.surface = pygame.Surface(self.dim) self.surface.fill((0, 0, 0,", "always make traffic lights if world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light() if traffic_light.get_state() == carla.TrafficLightState.Red:", "vehicles = [(dist(x.get_location()), x) for x in vehicles if x.id != world.player.id] for", "isinstance(control, carla.WalkerControl): self._info_text += [ ('Speed:', control.speed, 0.0, 5.556), ('Jump:', control.jump)] self._info_text +=", "x.id != world.player.id] for dist, vehicle in sorted(vehicles): if dist > 200.0: break", "(0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 *", "= pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14) self._notifications", "control.manual_gear_shift), 'Gear: %s' % {-1: 'R', 0: 'N'}.get(control.gear, control.gear)] elif isinstance(control, carla.WalkerControl): self._info_text", "self.dim[1]) self.seconds_left = 0 self.surface = pygame.Surface(self.dim) self.surface.fill((0, 0, 0, 0)) for i,", "6)) else: rect = pygame.Rect((bar_h_offset, v_offset + 8), (fig * bar_width, 6)) pygame.draw.rect(display,", "8), (6, 6)) pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)", "abs(transform.rotation.yaw) < 89.5 else '' heading += 'S' if abs(transform.rotation.yaw) > 90.5 else", "max_col for x in collision] vehicles = world.world.get_actors().filter('vehicle.*') ego_location = world.player.get_location() waypoint =", "help\"\"\" self._render = not self._render def render(self, display): \"\"\"Render help text method\"\"\" if", "world.player.get_location() waypoint = world.map.get_waypoint(ego_location, project_to_road=True) # always make traffic lights if world.player.is_at_traffic_light(): traffic_light", "% ('(% 5.1f, % 5.1f)' % (transform.location.x, transform.location.y)), 'GNSS:% 24s' % ('(% 2.6f,", "i * 22)) self._render = False self.surface.set_alpha(220) def toggle(self): \"\"\"Toggle on or off", "(bar_width, 6)) pygame.draw.rect(display, (255, 255, 255), rect_border, 1) fig = (item[1] - item[2])", "12s' % datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(vel.x**2 +", "fading text \"\"\" def __init__(self, font, dim, pos): \"\"\"Constructor method\"\"\" self.font = font", "for every tick\"\"\" self._notifications.tick(world, clock) self.map_name = world.map.name if not self._show_info: return transform", "-1.0, 1.0), ('Brake:', control.brake, 0.0, 1.0), ('Reverse:', control.reverse), ('Hand brake:', control.hand_brake), ('Manual:', control.manual_gear_shift),", "error(self, text): \"\"\"Error text\"\"\" self._notifications.set_text('Error: %s' % text, (255, 0, 0)) def render(self,", "self.dim[0], 0.5 * height - 0.5 * self.dim[1]) self.seconds_left = 0 self.surface =", "= world.map.get_waypoint(ego_location, project_to_road=True) # always make traffic lights if world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light()", "if abs(transform.rotation.yaw) > 90.5 else '' heading += 'E' if 179.5 > transform.rotation.yaw", "0, 0)) self.surface.blit(text_texture, (10, 11)) def tick(self, _, clock): \"\"\"Fading text method for", "display): \"\"\"Render fading text method\"\"\" display.blit(self.surface, self.pos) class HelpText(object): \"\"\" Helper class for", "0)) for i, line in enumerate(lines): text_texture = self.font.render(line, True, (255, 255, 255))", "This work is licensed under the terms of the MIT license. # For", "<NAME> (<EMAIL>) # # This work is licensed under the terms of the", "1) fig = (item[1] - item[2]) / (item[3] - item[2]) if item[2] <", "None v_offset += 18 elif isinstance(item, tuple): if isinstance(item[1], bool): rect = pygame.Rect((bar_h_offset,", "pygame import carla import utils class HUD(object): \"\"\"Class for HUD text\"\"\" def __init__(self,", "world at every tick\"\"\" self._server_clock.tick() self.server_fps = self._server_clock.get_fps() self.frame = timestamp.frame_count self.simulation_time =", "18.0f m' % transform.location.z, ''] if isinstance(control, carla.VehicleControl): self._info_text += [ ('Throttle:', control.throttle,", "8d' % len(vehicles)] if len(vehicles) > 1: self._info_text += ['Nearby vehicles:'] def dist(l):", "fonts else fonts[0] mono = pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono, 12 if os.name ==", "20s' % waypoint.road_id, 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: % 15.0f", "22 + 12) self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5", "authors: <NAME> (<EMAIL>) # # This work is licensed under the terms of", "5.1f, % 5.1f)' % (transform.location.x, transform.location.y)), 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)'", "math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)), u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s' % (transform.rotation.yaw, heading),", "width, height) self.server_fps = 0 self.frame = 0 self.simulation_time = 0 self.map_name =", "item[2]) / (item[3] - item[2]) if item[2] < 0.0: rect = pygame.Rect( (bar_h_offset", "os import pygame import carla import utils class HUD(object): \"\"\"Class for HUD text\"\"\"", "mono = default_font if default_font in fonts else fonts[0] mono = pygame.font.match_font(mono) self._font_mono", "136, 0), False, points, 2) item = None v_offset += 18 elif isinstance(item,", "this point has to be a str. surface = self._font_mono.render(item, True, (255, 255,", "% utils.get_actor_display_name(world.player, truncate=20), 'Map: % 20s' % world.map.name, 'Road id: % 20s' %", "% 20s' % waypoint.road_id, 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: %", "pygame.time.Clock() def on_world_tick(self, timestamp): \"\"\"Gets informations from the world at every tick\"\"\" self._server_clock.tick()", "= not self._render def render(self, display): \"\"\"Render help text method\"\"\" if self._render: display.blit(self.surface,", "(10, 11)) def tick(self, _, clock): \"\"\"Fading text method for every tick\"\"\" delta_seconds", "terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. # #", "world.player.get_traffic_light() if traffic_light.get_state() == carla.TrafficLightState.Red: world.hud.notification(\"Traffic light changed! Good to go!\") traffic_light.set_state(carla.TrafficLightState.Green) self._info_text", "16.0f FPS' % self.server_fps, 'Client: % 16.0f FPS' % clock.get_fps(), '', 'Vehicle: %", "import os import pygame import carla import utils class HUD(object): \"\"\"Class for HUD", "(bar_width - 6), v_offset + 8), (6, 6)) else: rect = pygame.Rect((bar_h_offset, v_offset", "%s' % {-1: 'R', 0: 'N'}.get(control.gear, control.gear)] elif isinstance(control, carla.WalkerControl): self._info_text += [", "0.5 * self.dim[1]) self.seconds_left = 0 self.surface = pygame.Surface(self.dim) self.surface.fill((0, 0, 0, 0))", "truncate=22) self._info_text.append('% 4dm %s' % (dist, vehicle_type)) def toggle_info(self): \"\"\"Toggle info on or", "import utils class HUD(object): \"\"\"Class for HUD text\"\"\" def __init__(self, width, height, doc):", "= None self._show_info = True self._info_text = [] self._server_clock = pygame.time.Clock() def on_world_tick(self,", "pygame.Surface(self.dim) def set_text(self, text, color=(255, 255, 255), seconds=2.0): \"\"\"Set fading text\"\"\" text_texture =", "collision, '', 'Number of vehicles: % 8d' % len(vehicles)] if len(vehicles) > 1:", "+ 8), (6, 6)) pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else", "6), v_offset + 8), (6, 6)) else: rect = pygame.Rect((bar_h_offset, v_offset + 8),", "text render\"\"\" def __init__(self, doc, font, width, height): \"\"\"Constructor method\"\"\" lines = doc.split('\\n')", "= 106 for item in self._info_text: if v_offset + 18 > self.dim[1]: break", "max(collision)) collision = [x / max_col for x in collision] vehicles = world.world.get_actors().filter('vehicle.*')", "brake:', control.hand_brake), ('Manual:', control.manual_gear_shift), 'Gear: %s' % {-1: 'R', 0: 'N'}.get(control.gear, control.gear)] elif", "= world.player.get_transform() vel = world.player.get_velocity() control = world.player.get_control() heading = 'N' if abs(transform.rotation.yaw)", "render help\"\"\" self._render = not self._render def render(self, display): \"\"\"Render help text method\"\"\"", "# always make traffic lights if world.player.is_at_traffic_light(): traffic_light = world.player.get_traffic_light() if traffic_light.get_state() ==", "__init__(self, font, dim, pos): \"\"\"Constructor method\"\"\" self.font = font self.dim = dim self.pos", "fonts = [x for x in pygame.font.get_fonts() if font_name in x] default_font =", "self._notifications = FadingText(font, (width, 40), (0, height - 40)) self.help = HelpText(doc, pygame.font.Font(mono,", "method\"\"\" lines = doc.split('\\n') self.font = font self.dim = (680, len(lines) * 22", "8 + (1 - y) * 30) for x, y in enumerate(item)] pygame.draw.lines(display,", "world.map.name if not self._show_info: return transform = world.player.get_transform() vel = world.player.get_velocity() control =", "self.pos) class HelpText(object): \"\"\" Helper class for text render\"\"\" def __init__(self, doc, font,", "> 90.5 else '' heading += 'E' if 179.5 > transform.rotation.yaw > 0.5", "transform.location.x)**2 + (l.y - transform.location.y) ** 2 + (l.z - transform.location.z)**2) vehicles =", "or off the render help\"\"\" self._render = not self._render def render(self, display): \"\"\"Render", "off\"\"\" self._show_info = not self._show_info def notification(self, text, seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds)", "- 200] for x in range(0, 200)] max_col = max(1.0, max(collision)) collision =", "item[2]) if item[2] < 0.0: rect = pygame.Rect( (bar_h_offset + fig * (bar_width", "> transform.rotation.yaw > 0.5 else '' heading += 'W' if -0.5 > transform.rotation.yaw", "self.server_fps = 0 self.frame = 0 self.simulation_time = 0 self.map_name = None self._show_info", "% (transform.location.x, transform.location.y)), 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)),", "vehicles: % 8d' % len(vehicles)] if len(vehicles) > 1: self._info_text += ['Nearby vehicles:']", "> transform.rotation.yaw > -179.5 else '' colhist = world.collision_sensor.get_collision_history() collision = [colhist[x +", "not self._render def render(self, display): \"\"\"Render help text method\"\"\" if self._render: display.blit(self.surface, self.pos)", "16.0f FPS' % clock.get_fps(), '', 'Vehicle: % 20s' % utils.get_actor_display_name(world.player, truncate=20), 'Map: %", "'Vehicle: % 20s' % utils.get_actor_display_name(world.player, truncate=20), 'Map: % 20s' % world.map.name, 'Road id:", "0.0, 5.556), ('Jump:', control.jump)] self._info_text += [ '', 'Collision:', collision, '', 'Number of", "self._show_info def notification(self, text, seconds=2.0): \"\"\"Notification text\"\"\" self._notifications.set_text(text, seconds=seconds) def error(self, text): \"\"\"Error", "in enumerate(item)] pygame.draw.lines(display, (255, 136, 0), False, points, 2) item = None v_offset", "pos self.seconds_left = 0 self.surface = pygame.Surface(self.dim) def set_text(self, text, color=(255, 255, 255),", "doc.split('\\n') self.font = font self.dim = (680, len(lines) * 22 + 12) self.pos", "20) font_name = 'courier' if os.name == 'nt' else 'mono' fonts = [x", "to be a str. surface = self._font_mono.render(item, True, (255, 255, 255)) display.blit(surface, (8,", "= world.player.get_control() heading = 'N' if abs(transform.rotation.yaw) < 89.5 else '' heading +=", "%s' % (dist, vehicle_type)) def toggle_info(self): \"\"\"Toggle info on or off\"\"\" self._show_info =", "Intel Labs. # authors: <NAME> (<EMAIL>) # # This work is licensed under", "0), False, points, 2) item = None v_offset += 18 elif isinstance(item, tuple):", "surface = self._font_mono.render(item, True, (255, 255, 255)) display.blit(surface, (8, v_offset)) v_offset += 18", "clock): \"\"\"HUD method for every tick\"\"\" self._notifications.tick(world, clock) self.map_name = world.map.name if not", "break vehicle_type = utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm %s' % (dist, vehicle_type)) def toggle_info(self):", "= self._font_mono.render(item, True, (255, 255, 255)) display.blit(surface, (8, v_offset)) v_offset += 18 self._notifications.render(display)", "(c) 2018 Intel Labs. # authors: <NAME> (<EMAIL>) # # This work is", "\"\"\" Helper class for text render\"\"\" def __init__(self, doc, font, width, height): \"\"\"Constructor", "at every tick\"\"\" self._server_clock.tick() self.server_fps = self._server_clock.get_fps() self.frame = timestamp.frame_count self.simulation_time = timestamp.elapsed_seconds", "the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. # # Original source:", "see <https://opensource.org/licenses/MIT>. # # Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import datetime import math import os", "math.sqrt((l.x - transform.location.x)**2 + (l.y - transform.location.y) ** 2 + (l.z - transform.location.z)**2)", "height - 40)) self.help = HelpText(doc, pygame.font.Font(mono, 24), width, height) self.server_fps = 0", "text, color=(255, 255, 255), seconds=2.0): \"\"\"Set fading text\"\"\" text_texture = self.font.render(text, True, color)", "= (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5", "in sorted(vehicles): if dist > 200.0: break vehicle_type = utils.get_actor_display_name(vehicle, truncate=22) self._info_text.append('% 4dm", "if isinstance(item, list): if len(item) > 1: points = [(x + 8, v_offset", "else fonts[0] mono = pygame.font.match_font(mono) self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt'", "item = None v_offset += 18 elif isinstance(item, tuple): if isinstance(item[1], bool): rect", "20s' % utils.get_actor_display_name(world.player, truncate=20), 'Map: % 20s' % world.map.name, 'Road id: % 20s'", "% datetime.timedelta(seconds=int(self.simulation_time)), '', 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(vel.x**2 + vel.y**2", "= pygame.Surface(self.dim) self.seconds_left = seconds self.surface.fill((0, 0, 0, 0)) self.surface.blit(text_texture, (10, 11)) def", "self.font.render(line, True, (255, 255, 255)) self.surface.blit(text_texture, (22, i * 22)) self._render = False", "x] default_font = 'ubuntumono' mono = default_font if default_font in fonts else fonts[0]", "license. # For a copy, see <https://opensource.org/licenses/MIT>. # # Original source: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/automatic_control.py import", "for every tick\"\"\" delta_seconds = 1e-3 * clock.get_time() self.seconds_left = max(0.0, self.seconds_left -", "179.5 > transform.rotation.yaw > 0.5 else '' heading += 'W' if -0.5 >" ]
[]
[ "CirculoPerfeito: def __init__(self): self.cor = 'Azul' self.circuferencia = 4 self.material = 'Papel' def", "4 self.material = 'Papel' def mostra_cor(self): return id(self) if __name__ == '__main__': circulo_primeiro", "'Papel' def mostra_cor(self): return id(self) if __name__ == '__main__': circulo_primeiro = CirculoPerfeito() circulo_segundo", "= CirculoPerfeito() circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro is circulo_segundo) print(id(circulo_primeiro), circulo_primeiro.mostra_cor()) circulo_segundo.cor =", "return id(self) if __name__ == '__main__': circulo_primeiro = CirculoPerfeito() circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro))", "if __name__ == '__main__': circulo_primeiro = CirculoPerfeito() circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro is", "CirculoPerfeito() circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro is circulo_segundo) print(id(circulo_primeiro), circulo_primeiro.mostra_cor()) circulo_segundo.cor = 'Amarelo'", "= CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro is circulo_segundo) print(id(circulo_primeiro), circulo_primeiro.mostra_cor()) circulo_segundo.cor = 'Amarelo' print(circulo_primeiro.cor, circulo_segundo.cor)", "idade self.nome = nome def cumprimentar(self): return f'Ola {id(self)}' class CirculoPerfeito: def __init__(self):", "{id(self)}' class CirculoPerfeito: def __init__(self): self.cor = 'Azul' self.circuferencia = 4 self.material =", "nome=None, idade=35): self.idade = idade self.nome = nome def cumprimentar(self): return f'Ola {id(self)}'", "circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro is circulo_segundo) print(id(circulo_primeiro), circulo_primeiro.mostra_cor()) circulo_segundo.cor = 'Amarelo' print(circulo_primeiro.cor,", "def mostra_cor(self): return id(self) if __name__ == '__main__': circulo_primeiro = CirculoPerfeito() circulo_segundo =", "__name__ == '__main__': circulo_primeiro = CirculoPerfeito() circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro is circulo_segundo)", "self.nome = nome def cumprimentar(self): return f'Ola {id(self)}' class CirculoPerfeito: def __init__(self): self.cor", "class CirculoPerfeito: def __init__(self): self.cor = 'Azul' self.circuferencia = 4 self.material = 'Papel'", "'__main__': circulo_primeiro = CirculoPerfeito() circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro is circulo_segundo) print(id(circulo_primeiro), circulo_primeiro.mostra_cor())", "cumprimentar(self): return f'Ola {id(self)}' class CirculoPerfeito: def __init__(self): self.cor = 'Azul' self.circuferencia =", "__init__(self, nome=None, idade=35): self.idade = idade self.nome = nome def cumprimentar(self): return f'Ola", "Pessoa: def __init__(self, nome=None, idade=35): self.idade = idade self.nome = nome def cumprimentar(self):", "def cumprimentar(self): return f'Ola {id(self)}' class CirculoPerfeito: def __init__(self): self.cor = 'Azul' self.circuferencia", "= 4 self.material = 'Papel' def mostra_cor(self): return id(self) if __name__ == '__main__':", "= 'Papel' def mostra_cor(self): return id(self) if __name__ == '__main__': circulo_primeiro = CirculoPerfeito()", "circulo_primeiro = CirculoPerfeito() circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro is circulo_segundo) print(id(circulo_primeiro), circulo_primeiro.mostra_cor()) circulo_segundo.cor", "idade=35): self.idade = idade self.nome = nome def cumprimentar(self): return f'Ola {id(self)}' class", "nome def cumprimentar(self): return f'Ola {id(self)}' class CirculoPerfeito: def __init__(self): self.cor = 'Azul'", "= idade self.nome = nome def cumprimentar(self): return f'Ola {id(self)}' class CirculoPerfeito: def", "= nome def cumprimentar(self): return f'Ola {id(self)}' class CirculoPerfeito: def __init__(self): self.cor =", "def __init__(self): self.cor = 'Azul' self.circuferencia = 4 self.material = 'Papel' def mostra_cor(self):", "self.circuferencia = 4 self.material = 'Papel' def mostra_cor(self): return id(self) if __name__ ==", "__init__(self): self.cor = 'Azul' self.circuferencia = 4 self.material = 'Papel' def mostra_cor(self): return", "mostra_cor(self): return id(self) if __name__ == '__main__': circulo_primeiro = CirculoPerfeito() circulo_segundo = CirculoPerfeito()", "class Pessoa: def __init__(self, nome=None, idade=35): self.idade = idade self.nome = nome def", "self.cor = 'Azul' self.circuferencia = 4 self.material = 'Papel' def mostra_cor(self): return id(self)", "self.idade = idade self.nome = nome def cumprimentar(self): return f'Ola {id(self)}' class CirculoPerfeito:", "id(self) if __name__ == '__main__': circulo_primeiro = CirculoPerfeito() circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro", "<reponame>limaon/pythonbirds<filename>oo/pessoa.py class Pessoa: def __init__(self, nome=None, idade=35): self.idade = idade self.nome = nome", "'Azul' self.circuferencia = 4 self.material = 'Papel' def mostra_cor(self): return id(self) if __name__", "== '__main__': circulo_primeiro = CirculoPerfeito() circulo_segundo = CirculoPerfeito() print(type(circulo_primeiro)) print(circulo_primeiro is circulo_segundo) print(id(circulo_primeiro),", "def __init__(self, nome=None, idade=35): self.idade = idade self.nome = nome def cumprimentar(self): return", "= 'Azul' self.circuferencia = 4 self.material = 'Papel' def mostra_cor(self): return id(self) if", "self.material = 'Papel' def mostra_cor(self): return id(self) if __name__ == '__main__': circulo_primeiro =", "f'Ola {id(self)}' class CirculoPerfeito: def __init__(self): self.cor = 'Azul' self.circuferencia = 4 self.material", "return f'Ola {id(self)}' class CirculoPerfeito: def __init__(self): self.cor = 'Azul' self.circuferencia = 4" ]
[ "re import sys,os def dofeaindex(file,filetype): feamap = {} linenum = 0 for line", "0 while j < len(felist): if 0 == filetype: feamap[felist[j]] = feamap.get(felist[j],0) +", "id = 0 for fea,num in feamap.items(): if num > cutoff: myfile.write(str(id) +", "while j < len(felist): if 0 == filetype: feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j", "def dofeaindex(file,filetype): feamap = {} linenum = 0 for line in open(file): line", "0 for fea,num in feamap.items(): if num > cutoff: myfile.write(str(id) + ' '", "== \"__main__\": inputfile = sys.argv[1] outputfile = sys.argv[2] filetype = int(sys.argv[3]) cutoff =", "j = 0 while j < len(felist): if 0 == filetype: feamap[felist[j]] =", "__name__ == \"__main__\": inputfile = sys.argv[1] outputfile = sys.argv[2] filetype = int(sys.argv[3]) cutoff", "+= 1 linenum += 1 return feamap,linenum if __name__ == \"__main__\": inputfile =", "@author: cyzhang ''' import re import sys,os def dofeaindex(file,filetype): feamap = {} linenum", "1 return feamap,linenum if __name__ == \"__main__\": inputfile = sys.argv[1] outputfile = sys.argv[2]", "= 0 for line in open(file): line = line.strip() content = line.split('\\t') if", "num > cutoff: myfile.write(str(id) + ' ' + fea + '\\n') id +=", "felist=content[1].split(' ') if len(felist) == 0: continue j = 0 while j <", "= int(sys.argv[4]) myfile = open(outputfile , 'w') feamap,linenum = dofeaindex(inputfile,filetype) id = 0", "+ 1 j += 1 linenum += 1 return feamap,linenum if __name__ ==", "= line.split('\\t') if len(content) != 2: continue felist=content[1].split(' ') if len(felist) == 0:", "+ int(felist[j + 1]) j += 2 else: feamap[felist[j]] = feamap.get(felist[j],0) + 1", "= sys.argv[1] outputfile = sys.argv[2] filetype = int(sys.argv[3]) cutoff = int(sys.argv[4]) myfile =", "= 0 while j < len(felist): if 0 == filetype: feamap[felist[j]] = feamap.get(felist[j],0)", "feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j + 1]) j += 2 else: feamap[felist[j]] =", "feamap = {} linenum = 0 for line in open(file): line = line.strip()", "content = line.split('\\t') if len(content) != 2: continue felist=content[1].split(' ') if len(felist) ==", "filetype = int(sys.argv[3]) cutoff = int(sys.argv[4]) myfile = open(outputfile , 'w') feamap,linenum =", "feamap,linenum if __name__ == \"__main__\": inputfile = sys.argv[1] outputfile = sys.argv[2] filetype =", "sys,os def dofeaindex(file,filetype): feamap = {} linenum = 0 for line in open(file):", "2 else: feamap[felist[j]] = feamap.get(felist[j],0) + 1 j += 1 linenum += 1", "0 == filetype: feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j + 1]) j += 2", "filetype: feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j + 1]) j += 2 else: feamap[felist[j]]", "cutoff = int(sys.argv[4]) myfile = open(outputfile , 'w') feamap,linenum = dofeaindex(inputfile,filetype) id =", "in feamap.items(): if num > cutoff: myfile.write(str(id) + ' ' + fea +", "1 j += 1 linenum += 1 return feamap,linenum if __name__ == \"__main__\":", "linenum = 0 for line in open(file): line = line.strip() content = line.split('\\t')", "if len(felist) == 0: continue j = 0 while j < len(felist): if", "') if len(felist) == 0: continue j = 0 while j < len(felist):", "line.split('\\t') if len(content) != 2: continue felist=content[1].split(' ') if len(felist) == 0: continue", "= 0 for fea,num in feamap.items(): if num > cutoff: myfile.write(str(id) + '", "else: feamap[felist[j]] = feamap.get(felist[j],0) + 1 j += 1 linenum += 1 return", "= dofeaindex(inputfile,filetype) id = 0 for fea,num in feamap.items(): if num > cutoff:", "''' import re import sys,os def dofeaindex(file,filetype): feamap = {} linenum = 0", "if num > cutoff: myfile.write(str(id) + ' ' + fea + '\\n') id", "dofeaindex(inputfile,filetype) id = 0 for fea,num in feamap.items(): if num > cutoff: myfile.write(str(id)", "1 linenum += 1 return feamap,linenum if __name__ == \"__main__\": inputfile = sys.argv[1]", "= feamap.get(felist[j],0) + 1 j += 1 linenum += 1 return feamap,linenum if", "if 0 == filetype: feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j + 1]) j +=", "int(sys.argv[3]) cutoff = int(sys.argv[4]) myfile = open(outputfile , 'w') feamap,linenum = dofeaindex(inputfile,filetype) id", "len(felist) == 0: continue j = 0 while j < len(felist): if 0", "for fea,num in feamap.items(): if num > cutoff: myfile.write(str(id) + ' ' +", "line = line.strip() content = line.split('\\t') if len(content) != 2: continue felist=content[1].split(' ')", "len(felist): if 0 == filetype: feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j + 1]) j", "if len(content) != 2: continue felist=content[1].split(' ') if len(felist) == 0: continue j", "Created on 2011-5-30 @author: cyzhang ''' import re import sys,os def dofeaindex(file,filetype): feamap", "in open(file): line = line.strip() content = line.split('\\t') if len(content) != 2: continue", "continue j = 0 while j < len(felist): if 0 == filetype: feamap[felist[j]]", "== 0: continue j = 0 while j < len(felist): if 0 ==", "2: continue felist=content[1].split(' ') if len(felist) == 0: continue j = 0 while", "!= 2: continue felist=content[1].split(' ') if len(felist) == 0: continue j = 0", "feamap,linenum = dofeaindex(inputfile,filetype) id = 0 for fea,num in feamap.items(): if num >", "2011-5-30 @author: cyzhang ''' import re import sys,os def dofeaindex(file,filetype): feamap = {}", "return feamap,linenum if __name__ == \"__main__\": inputfile = sys.argv[1] outputfile = sys.argv[2] filetype", "cyzhang ''' import re import sys,os def dofeaindex(file,filetype): feamap = {} linenum =", "== filetype: feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j + 1]) j += 2 else:", "feamap.get(felist[j],0) + int(felist[j + 1]) j += 2 else: feamap[felist[j]] = feamap.get(felist[j],0) +", "sys.argv[2] filetype = int(sys.argv[3]) cutoff = int(sys.argv[4]) myfile = open(outputfile , 'w') feamap,linenum", "= sys.argv[2] filetype = int(sys.argv[3]) cutoff = int(sys.argv[4]) myfile = open(outputfile , 'w')", "linenum += 1 return feamap,linenum if __name__ == \"__main__\": inputfile = sys.argv[1] outputfile", "= {} linenum = 0 for line in open(file): line = line.strip() content", "int(sys.argv[4]) myfile = open(outputfile , 'w') feamap,linenum = dofeaindex(inputfile,filetype) id = 0 for", "on 2011-5-30 @author: cyzhang ''' import re import sys,os def dofeaindex(file,filetype): feamap =", "+= 1 return feamap,linenum if __name__ == \"__main__\": inputfile = sys.argv[1] outputfile =", "open(outputfile , 'w') feamap,linenum = dofeaindex(inputfile,filetype) id = 0 for fea,num in feamap.items():", "'w') feamap,linenum = dofeaindex(inputfile,filetype) id = 0 for fea,num in feamap.items(): if num", "''' Created on 2011-5-30 @author: cyzhang ''' import re import sys,os def dofeaindex(file,filetype):", "1]) j += 2 else: feamap[felist[j]] = feamap.get(felist[j],0) + 1 j += 1", "feamap.get(felist[j],0) + 1 j += 1 linenum += 1 return feamap,linenum if __name__", "line.strip() content = line.split('\\t') if len(content) != 2: continue felist=content[1].split(' ') if len(felist)", "inputfile = sys.argv[1] outputfile = sys.argv[2] filetype = int(sys.argv[3]) cutoff = int(sys.argv[4]) myfile", ", 'w') feamap,linenum = dofeaindex(inputfile,filetype) id = 0 for fea,num in feamap.items(): if", "line in open(file): line = line.strip() content = line.split('\\t') if len(content) != 2:", "feamap.items(): if num > cutoff: myfile.write(str(id) + ' ' + fea + '\\n')", "int(felist[j + 1]) j += 2 else: feamap[felist[j]] = feamap.get(felist[j],0) + 1 j", "continue felist=content[1].split(' ') if len(felist) == 0: continue j = 0 while j", "0: continue j = 0 while j < len(felist): if 0 == filetype:", "import sys,os def dofeaindex(file,filetype): feamap = {} linenum = 0 for line in", "sys.argv[1] outputfile = sys.argv[2] filetype = int(sys.argv[3]) cutoff = int(sys.argv[4]) myfile = open(outputfile", "= open(outputfile , 'w') feamap,linenum = dofeaindex(inputfile,filetype) id = 0 for fea,num in", "fea,num in feamap.items(): if num > cutoff: myfile.write(str(id) + ' ' + fea", "+ 1]) j += 2 else: feamap[felist[j]] = feamap.get(felist[j],0) + 1 j +=", "j += 2 else: feamap[felist[j]] = feamap.get(felist[j],0) + 1 j += 1 linenum", "import re import sys,os def dofeaindex(file,filetype): feamap = {} linenum = 0 for", "= feamap.get(felist[j],0) + int(felist[j + 1]) j += 2 else: feamap[felist[j]] = feamap.get(felist[j],0)", "open(file): line = line.strip() content = line.split('\\t') if len(content) != 2: continue felist=content[1].split('", "0 for line in open(file): line = line.strip() content = line.split('\\t') if len(content)", "< len(felist): if 0 == filetype: feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j + 1])", "= int(sys.argv[3]) cutoff = int(sys.argv[4]) myfile = open(outputfile , 'w') feamap,linenum = dofeaindex(inputfile,filetype)", "len(content) != 2: continue felist=content[1].split(' ') if len(felist) == 0: continue j =", "if __name__ == \"__main__\": inputfile = sys.argv[1] outputfile = sys.argv[2] filetype = int(sys.argv[3])", "feamap[felist[j]] = feamap.get(felist[j],0) + 1 j += 1 linenum += 1 return feamap,linenum", "> cutoff: myfile.write(str(id) + ' ' + fea + '\\n') id += 1", "j < len(felist): if 0 == filetype: feamap[felist[j]] = feamap.get(felist[j],0) + int(felist[j +", "dofeaindex(file,filetype): feamap = {} linenum = 0 for line in open(file): line =", "myfile = open(outputfile , 'w') feamap,linenum = dofeaindex(inputfile,filetype) id = 0 for fea,num", "+= 2 else: feamap[felist[j]] = feamap.get(felist[j],0) + 1 j += 1 linenum +=", "\"__main__\": inputfile = sys.argv[1] outputfile = sys.argv[2] filetype = int(sys.argv[3]) cutoff = int(sys.argv[4])", "for line in open(file): line = line.strip() content = line.split('\\t') if len(content) !=", "j += 1 linenum += 1 return feamap,linenum if __name__ == \"__main__\": inputfile", "= line.strip() content = line.split('\\t') if len(content) != 2: continue felist=content[1].split(' ') if", "cutoff: myfile.write(str(id) + ' ' + fea + '\\n') id += 1 myfile.close()", "outputfile = sys.argv[2] filetype = int(sys.argv[3]) cutoff = int(sys.argv[4]) myfile = open(outputfile ,", "{} linenum = 0 for line in open(file): line = line.strip() content =" ]
[ "conn_napalm(): def __init__(self, device, connection_key): self.device = device self.connection_key = connection_key #check for", "get_network_driver class conn_napalm(): def __init__(self, device, connection_key): self.device = device self.connection_key = connection_key", "device[connection_key]: device[connection_key]['password'] = device.get('password', '') self.driver = device[connection_key].pop('driver', device.get('platform','')) def connect(self): driver =", "device.get('username', '') if 'password' not in device[connection_key]: device[connection_key]['password'] = device.get('password', '') self.driver =", "'') self.driver = device[connection_key].pop('driver', device.get('platform','')) def connect(self): driver = get_network_driver(self.driver) self.connection = driver(**self.device[self.connection_key])", "key if not in connection_key #could have some default values? if 'hostname' not", "<filename>connections/conn_napalm.py from napalm import get_network_driver class conn_napalm(): def __init__(self, device, connection_key): self.device =", "def connect(self): driver = get_network_driver(self.driver) self.connection = driver(**self.device[self.connection_key]) self.connection.open() return self.connection def close(self):", "class conn_napalm(): def __init__(self, device, connection_key): self.device = device self.connection_key = connection_key #check", "some default values? if 'hostname' not in device[connection_key]: device[connection_key]['hostname'] = device.get('host', device['name']) if", "device.get('password', '') self.driver = device[connection_key].pop('driver', device.get('platform','')) def connect(self): driver = get_network_driver(self.driver) self.connection =", "'username' not in device[connection_key]: device[connection_key]['username'] = device.get('username', '') if 'password' not in device[connection_key]:", "not in device[connection_key]: device[connection_key]['password'] = device.get('password', '') self.driver = device[connection_key].pop('driver', device.get('platform','')) def connect(self):", "= device.get('username', '') if 'password' not in device[connection_key]: device[connection_key]['password'] = device.get('password', '') self.driver", "in device[connection_key]: device[connection_key]['hostname'] = device.get('host', device['name']) if 'username' not in device[connection_key]: device[connection_key]['username'] =", "= device.get('password', '') self.driver = device[connection_key].pop('driver', device.get('platform','')) def connect(self): driver = get_network_driver(self.driver) self.connection", "'password' not in device[connection_key]: device[connection_key]['password'] = device.get('password', '') self.driver = device[connection_key].pop('driver', device.get('platform','')) def", "device.get('platform','')) def connect(self): driver = get_network_driver(self.driver) self.connection = driver(**self.device[self.connection_key]) self.connection.open() return self.connection def", "device['name']) if 'username' not in device[connection_key]: device[connection_key]['username'] = device.get('username', '') if 'password' not", "device[connection_key]['password'] = device.get('password', '') self.driver = device[connection_key].pop('driver', device.get('platform','')) def connect(self): driver = get_network_driver(self.driver)", "= device self.connection_key = connection_key #check for required kwargs, grab root level key", "#check for required kwargs, grab root level key if not in connection_key #could", "not in device[connection_key]: device[connection_key]['username'] = device.get('username', '') if 'password' not in device[connection_key]: device[connection_key]['password']", "def __init__(self, device, connection_key): self.device = device self.connection_key = connection_key #check for required", "self.connection_key = connection_key #check for required kwargs, grab root level key if not", "connection_key #check for required kwargs, grab root level key if not in connection_key", "self.device = device self.connection_key = connection_key #check for required kwargs, grab root level", "if not in connection_key #could have some default values? if 'hostname' not in", "napalm import get_network_driver class conn_napalm(): def __init__(self, device, connection_key): self.device = device self.connection_key", "not in connection_key #could have some default values? if 'hostname' not in device[connection_key]:", "in device[connection_key]: device[connection_key]['password'] = device.get('password', '') self.driver = device[connection_key].pop('driver', device.get('platform','')) def connect(self): driver", "kwargs, grab root level key if not in connection_key #could have some default", "device[connection_key]: device[connection_key]['hostname'] = device.get('host', device['name']) if 'username' not in device[connection_key]: device[connection_key]['username'] = device.get('username',", "device[connection_key]: device[connection_key]['username'] = device.get('username', '') if 'password' not in device[connection_key]: device[connection_key]['password'] = device.get('password',", "in connection_key #could have some default values? if 'hostname' not in device[connection_key]: device[connection_key]['hostname']", "default values? if 'hostname' not in device[connection_key]: device[connection_key]['hostname'] = device.get('host', device['name']) if 'username'", "= device[connection_key].pop('driver', device.get('platform','')) def connect(self): driver = get_network_driver(self.driver) self.connection = driver(**self.device[self.connection_key]) self.connection.open() return", "= connection_key #check for required kwargs, grab root level key if not in", "device[connection_key]['hostname'] = device.get('host', device['name']) if 'username' not in device[connection_key]: device[connection_key]['username'] = device.get('username', '')", "if 'hostname' not in device[connection_key]: device[connection_key]['hostname'] = device.get('host', device['name']) if 'username' not in", "'hostname' not in device[connection_key]: device[connection_key]['hostname'] = device.get('host', device['name']) if 'username' not in device[connection_key]:", "root level key if not in connection_key #could have some default values? if", "if 'username' not in device[connection_key]: device[connection_key]['username'] = device.get('username', '') if 'password' not in", "device self.connection_key = connection_key #check for required kwargs, grab root level key if", "have some default values? if 'hostname' not in device[connection_key]: device[connection_key]['hostname'] = device.get('host', device['name'])", "device.get('host', device['name']) if 'username' not in device[connection_key]: device[connection_key]['username'] = device.get('username', '') if 'password'", "not in device[connection_key]: device[connection_key]['hostname'] = device.get('host', device['name']) if 'username' not in device[connection_key]: device[connection_key]['username']", "for required kwargs, grab root level key if not in connection_key #could have", "level key if not in connection_key #could have some default values? if 'hostname'", "#could have some default values? if 'hostname' not in device[connection_key]: device[connection_key]['hostname'] = device.get('host',", "in device[connection_key]: device[connection_key]['username'] = device.get('username', '') if 'password' not in device[connection_key]: device[connection_key]['password'] =", "device[connection_key]['username'] = device.get('username', '') if 'password' not in device[connection_key]: device[connection_key]['password'] = device.get('password', '')", "device[connection_key].pop('driver', device.get('platform','')) def connect(self): driver = get_network_driver(self.driver) self.connection = driver(**self.device[self.connection_key]) self.connection.open() return self.connection", "'') if 'password' not in device[connection_key]: device[connection_key]['password'] = device.get('password', '') self.driver = device[connection_key].pop('driver',", "grab root level key if not in connection_key #could have some default values?", "self.driver = device[connection_key].pop('driver', device.get('platform','')) def connect(self): driver = get_network_driver(self.driver) self.connection = driver(**self.device[self.connection_key]) self.connection.open()", "connection_key #could have some default values? if 'hostname' not in device[connection_key]: device[connection_key]['hostname'] =", "__init__(self, device, connection_key): self.device = device self.connection_key = connection_key #check for required kwargs,", "import get_network_driver class conn_napalm(): def __init__(self, device, connection_key): self.device = device self.connection_key =", "= device.get('host', device['name']) if 'username' not in device[connection_key]: device[connection_key]['username'] = device.get('username', '') if", "values? if 'hostname' not in device[connection_key]: device[connection_key]['hostname'] = device.get('host', device['name']) if 'username' not", "from napalm import get_network_driver class conn_napalm(): def __init__(self, device, connection_key): self.device = device", "connect(self): driver = get_network_driver(self.driver) self.connection = driver(**self.device[self.connection_key]) self.connection.open() return self.connection def close(self): self.connection.close()", "if 'password' not in device[connection_key]: device[connection_key]['password'] = device.get('password', '') self.driver = device[connection_key].pop('driver', device.get('platform',''))", "device, connection_key): self.device = device self.connection_key = connection_key #check for required kwargs, grab", "required kwargs, grab root level key if not in connection_key #could have some", "connection_key): self.device = device self.connection_key = connection_key #check for required kwargs, grab root" ]
[ "with avarage price per region (ZipCode) st.title('Avarage Price per Region') avg_region = maps.price_per_region(renamed_houses)", "table. They represent the recommended selling price of the houses, whether it has", "is not a valid ID.') #finally: return None def page_maps(renamed_houses, recommended_houses): # SideBar", "st.title(f'There are {renamed_houses.shape[0]} properties available for purchase today.') st.dataframe(renamed_houses) st.header(\"Main considerations of the", "greater than 8 Houses with condition equal to or greater than 3 Houses", "with condition equal to or greater than 3 Houses priced below the median", "after-sale profit will be 20% higher. \"\"\") st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended", "return None def page_maps(renamed_houses, recommended_houses): # SideBar - - - st.sidebar.title('Filter Map') filter_data", "folium_static(avg_region, width=1200, height=700) if __name__ == '__main__': path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path)", "If the purchase price of the house is less than the \"Total Avarage", "of homes.') st.markdown('* The average price of renovated homes is 22% higher than", "configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path): data = pd.read_csv(path) return data # Pages definition", "house.') st.markdown('* The best season for re-selling homes is Spring.') st.header( \"\"\"After these", "of the houses, whether it has been renovated or not, in addition to", "- st.sidebar.title('Filter Map') filter_data = st.sidebar.radio(label='Filter Houses', options=[ 'All houses', 'Recommended homes to", "of variable \"Grid\" (Quality of the building mateirais of the house) equal or", "renamed_houses = load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path) page_select = sidebar() if", "recommended_houses): # Filter Recommended Houses to Buy DataFrame st.sidebar.title('Search for recommended home for", "with Price and what can be added in a makeover is the bathroom", "table representing the recommended re-sale price and the profit from re-selling the house", "homes is 22% higher than unrenovated homes.') st.markdown('* The biggest correlation with Price", "st.header( \"\"\"After these analyses, the recommended houses for House Rocket to buy follow", "Spring.') st.header( \"\"\"After these analyses, the recommended houses for House Rocket to buy", "Avarage Price\", which means the average value of the region's house prices (ZipCode)", "{recommended_houses.shape[0]} properties are recommended for purchase and re-sale.') st.subheader('New columns have also been", "the recommended price.') st.text(\"\") try: if not id_input: st.dataframe(recommended_houses) else: if int(id_input) in", "__name__ == '__main__': path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses", "numpy as np import seaborn as sns import streamlit as st import sys", "from streamlit_folium import folium_static import pandas as pd import numpy as np import", "profit from re-selling the house if it is renewed. If the house is", "from re-selling the house if it is renewed. If the house is renovated,", "'Recommended homes to buy': st.title('Map of all recommended homes for purchase') st.header('') data", "Buy DataFrame st.sidebar.title('Search for recommended home for purchase') id_input = str(st.sidebar.text_input(label='Enter the ID')).strip()", "homes is Spring.') st.header( \"\"\"After these analyses, the recommended houses for House Rocket", "House Rocket to buy follow the conditions: Places with grade of variable \"Grid\"", "Avarage Price\", then the suggested selling price will be the purchase price +", "the possible profit if sold at the recommended price.') st.text(\"\") try: if not", "Map of density houses_map = maps.houses_map(data) folium_static(houses_map, width=1200, height=700) # Map with avarage", "of all available houses') st.header('') data = renamed_houses.copy() # Map of density houses_map", "per region (ZipCode) st.title('Avarage Price per Region') avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700)", "or not, in addition to the possible profit if sold at the recommended", "data = pd.read_csv(path) return data # Pages definition def sidebar(): st.sidebar.title('Select Page') page_select", "The biggest correlation with Price and what can be added in a makeover", "Houses rated 8 or higher in the \"Grid\" (Quality of the building mateirais", "the re-sale price and the after-sale profit will be 20% higher. \"\"\") st.title(f'After", "variables with the highest positive correlation with Price are \"Grade\" and \"Sqft living\".')", "all available houses') st.header('') data = renamed_houses.copy() # Map of density houses_map =", "height=700) # Map with avarage price per region (ZipCode) st.title('Avarage Price per Region')", "(Quality of the building mateirais of the house) equal or greater than 8", "Houses', options=[ 'All houses', 'Recommended homes to buy']) # Filters - - if", "# Libraries from pandas.io.formats.format import DataFrameFormatter from streamlit_folium import folium_static import pandas as", "been added in the table representing the recommended re-sale price and the profit", "pandas.io.formats.format import DataFrameFormatter from streamlit_folium import folium_static import pandas as pd import numpy", "\"Total Avarage Price\", then the suggested selling price will be the purchase price", "will be the purchase price + 10%. If the purchase price of the", "They represent the recommended selling price of the houses, whether it has been", "DataFrameFormatter from streamlit_folium import folium_static import pandas as pd import numpy as np", "sns import streamlit as st import sys #! Add folder \"src\" as a", "condition equal to or greater than 3 Houses priced below the median price", "folium_static(houses_map, width=1200, height=700) # Map with avarage price per region (ZipCode) st.title('Avarage Price", "def page_maps(renamed_houses, recommended_houses): # SideBar - - - st.sidebar.title('Filter Map') filter_data = st.sidebar.radio(label='Filter", "\"Total Avarage Price\", which means the average value of the region's house prices", "region's house prices (ZipCode) and the average price of the Season that the", "possible profit if sold at the recommended price.') st.text(\"\") try: if not id_input:", "what can be added in a makeover is the bathroom and the amount", "if filter_data == 'Recommended homes to buy': st.title('Map of all recommended homes for", "options=['Final Reports', 'Maps']) return page_select def page_final_reports(renamed_houses, recommended_houses): # Filter Recommended Houses to", "maps #! App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path): data = pd.read_csv(path) return data", "the median price in your region (ZipCode)\"\"\") st.header(\"\"\"The re-sale price of the after-purchased", "selling price of the houses, whether it has been renovated or not, in", "ID.') except: st.error('ERROR: Input value is not a valid ID.') #finally: return None", "biggest correlation with Price and what can be added in a makeover is", "else: st.error( 'Property with this ID is not recommended for purchase or there", "and what can be added in a makeover is the bathroom and the", "variable \"Grid\" (Quality of the building mateirais of the house) equal or greater", "price of the after-purchased homes is based on the various \"Total Avarage Price\",", "ID to search house st.title('House Rocket Analysis') st.title('') st.title(f'There are {renamed_houses.shape[0]} properties available", "the Season that the house was announced. If the purchase price of the", "== '__main__': path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses =", "st import sys #! Add folder \"src\" as a package path project_path =", "than 8 Houses with condition equal to or greater than 3 Houses priced", "filter_data = st.sidebar.radio(label='Filter Houses', options=[ 'All houses', 'Recommended homes to buy']) # Filters", "re-selling the house if it is renewed. If the house is renovated, the", "the after-sale profit will be 20% higher. \"\"\") st.title(f'After analysis, {recommended_houses.shape[0]} properties are", "been renovated or not, in addition to the possible profit if sold at", "st.markdown('* The variables with the highest positive correlation with Price are \"Grade\" and", "project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps as maps #! App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True)", "= pd.read_csv(path) return data # Pages definition def sidebar(): st.sidebar.title('Select Page') page_select =", "page_select def page_final_reports(renamed_houses, recommended_houses): # Filter Recommended Houses to Buy DataFrame st.sidebar.title('Search for", "house) attribute have the best average price per rank and number of homes.')", "Analysis') st.title('') st.title(f'There are {renamed_houses.shape[0]} properties available for purchase today.') st.dataframe(renamed_houses) st.header(\"Main considerations", "id_input: st.dataframe(recommended_houses) else: if int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else: st.error( 'Property", "have the best average price per rank and number of homes.') st.markdown('* The", "st.header(\"Main considerations of the analysis.\") st.markdown('* The variables with the highest positive correlation", "# Pages definition def sidebar(): st.sidebar.title('Select Page') page_select = st.sidebar.selectbox( label='', options=['Final Reports',", "width=1200, height=700) if __name__ == '__main__': path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path) path", "st.markdown('* The biggest correlation with Price and what can be added in a", "for recommended home for purchase') id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID", "Houses priced below the median price in your region (ZipCode)\"\"\") st.header(\"\"\"The re-sale price", "import visualization.maps as maps #! App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path): data =", "equal or greater than 8 Houses with condition equal to or greater than", "or greater than 8 Houses with condition equal to or greater than 3", "st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else: st.error( 'Property with this ID is not recommended for", "(ZipCode)\"\"\") st.header(\"\"\"The re-sale price of the after-purchased homes is based on the various", "\"Grade\" and \"Sqft living\".') st.markdown('* Houses rated 8 or higher in the \"Grid\"", "and the profit from re-selling the house if it is renewed. If the", "seaborn as sns import streamlit as st import sys #! Add folder \"src\"", "of density houses_map = maps.houses_map(data) folium_static(houses_map, width=1200, height=700) # Map with avarage price", "for purchase and re-sale.') st.subheader('New columns have also been added at the end", "with this ID.') except: st.error('ERROR: Input value is not a valid ID.') #finally:", "load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path) page_select = sidebar() if page_select ==", "selling price will be the purchase price + 30%.\"\"\") st.header(\"\"\"A column has also", "per Region') avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700) if __name__ == '__main__': path", "analysis.\") st.markdown('* The variables with the highest positive correlation with Price are \"Grade\"", "higher than the \"Total Avarage Price\", then the suggested selling price will be", "a makeover is the bathroom and the amount of square feet of the", "region (ZipCode) st.title('Avarage Price per Region') avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700) if", "avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700) if __name__ == '__main__': path = f\"{project_path}/data/interim/renamed_data.csv\"", "return page_select def page_final_reports(renamed_houses, recommended_houses): # Filter Recommended Houses to Buy DataFrame st.sidebar.title('Search", "st.text(\"\") try: if not id_input: st.dataframe(recommended_houses) else: if int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] ==", "higher in the \"Grid\" (Quality of the building mateirais of the house) attribute", "unrenovated homes.') st.markdown('* The biggest correlation with Price and what can be added", "st.dataframe(recommended_houses) else: if int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else: st.error( 'Property with", "in a makeover is the bathroom and the amount of square feet of", "with grade of variable \"Grid\" (Quality of the building mateirais of the house)", "positive correlation with Price are \"Grade\" and \"Sqft living\".') st.markdown('* Houses rated 8", "suggested selling price will be the purchase price + 30%.\"\"\") st.header(\"\"\"A column has", "# Map with avarage price per region (ZipCode) st.title('Avarage Price per Region') avg_region", "addition to the possible profit if sold at the recommended price.') st.text(\"\") try:", "load_data(path) page_select = sidebar() if page_select == 'Final Reports': page_final_reports(renamed_houses=renamed_houses, recommended_houses=recommended_houses) else: page_maps(renamed_houses=renamed_houses,", "the purchase price of the house is less than the \"Total Avarage Price\",", "based on the various \"Total Avarage Price\", which means the average value of", "filter_data == 'Recommended homes to buy': st.title('Map of all recommended homes for purchase')", "Map with avarage price per region (ZipCode) st.title('Avarage Price per Region') avg_region =", "If the house is renovated, the re-sale price and the after-sale profit will", "(ZipCode) st.title('Avarage Price per Region') avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700) if __name__", "sidebar(): st.sidebar.title('Select Page') page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps']) return page_select def", "import streamlit as st import sys #! Add folder \"src\" as a package", "number of homes.') st.markdown('* The average price of renovated homes is 22% higher", "Reports', 'Maps']) return page_select def page_final_reports(renamed_houses, recommended_houses): # Filter Recommended Houses to Buy", "house) equal or greater than 8 Houses with condition equal to or greater", "house is renovated, the re-sale price and the after-sale profit will be 20%", "= \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps as maps #! App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def", "purchase price of the house is less than the \"Total Avarage Price\", then", "'Maps']) return page_select def page_final_reports(renamed_houses, recommended_houses): # Filter Recommended Houses to Buy DataFrame", "buy follow the conditions: Places with grade of variable \"Grid\" (Quality of the", "price will be the purchase price + 30%.\"\"\") st.header(\"\"\"A column has also been", "else: if int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else: st.error( 'Property with this", "the house.') st.markdown('* The best season for re-selling homes is Spring.') st.header( \"\"\"After", "is less than the \"Total Avarage Price\", then the suggested selling price will", "the house) attribute have the best average price per rank and number of", "(Quality of the building mateirais of the house) attribute have the best average", "data = recommended_houses.copy() else: st.title('Map of all available houses') st.header('') data = renamed_houses.copy()", "and re-sale.') st.subheader('New columns have also been added at the end of the", "App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path): data = pd.read_csv(path) return data # Pages", "of the region's house prices (ZipCode) and the average price of the Season", "- - st.sidebar.title('Filter Map') filter_data = st.sidebar.radio(label='Filter Houses', options=[ 'All houses', 'Recommended homes", "Pages definition def sidebar(): st.sidebar.title('Select Page') page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps'])", "the building mateirais of the house) attribute have the best average price per", "homes to buy']) # Filters - - if filter_data == 'Recommended homes to", "Filter Recommended Houses to Buy DataFrame st.sidebar.title('Search for recommended home for purchase') id_input", "selling price will be the purchase price + 10%. If the purchase price", "correlation with Price and what can be added in a makeover is the", "buy': st.title('Map of all recommended homes for purchase') st.header('') data = recommended_houses.copy() else:", "the house is higher than the \"Total Avarage Price\", then the suggested selling", "means the average value of the region's house prices (ZipCode) and the average", "average price of the Season that the house was announced. If the purchase", "the table. They represent the recommended selling price of the houses, whether it", "for purchase today.') st.dataframe(renamed_houses) st.header(\"Main considerations of the analysis.\") st.markdown('* The variables with", "the purchase price of the house is higher than the \"Total Avarage Price\",", "re-sale price of the after-purchased homes is based on the various \"Total Avarage", "grade of variable \"Grid\" (Quality of the building mateirais of the house) equal", "announced. If the purchase price of the house is higher than the \"Total", "id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to search house st.title('House Rocket", "load_data(path): data = pd.read_csv(path) return data # Pages definition def sidebar(): st.sidebar.title('Select Page')", "except: st.error('ERROR: Input value is not a valid ID.') #finally: return None def", "the average price of the Season that the house was announced. If the", "then the suggested selling price will be the purchase price + 10%. If", "recommended_houses.copy() else: st.title('Map of all available houses') st.header('') data = renamed_houses.copy() # Map", "value is not a valid ID.') #finally: return None def page_maps(renamed_houses, recommended_houses): #", "recommended_houses): # SideBar - - - st.sidebar.title('Filter Map') filter_data = st.sidebar.radio(label='Filter Houses', options=[", "houses', 'Recommended homes to buy']) # Filters - - if filter_data == 'Recommended", "the house if it is renewed. If the house is renovated, the re-sale", "are recommended for purchase and re-sale.') st.subheader('New columns have also been added at", "with this ID is not recommended for purchase or there is no home", "st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale.') st.subheader('New columns have", "import DataFrameFormatter from streamlit_folium import folium_static import pandas as pd import numpy as", "profit if sold at the recommended price.') st.text(\"\") try: if not id_input: st.dataframe(recommended_houses)", "building mateirais of the house) equal or greater than 8 Houses with condition", "house is higher than the \"Total Avarage Price\", then the suggested selling price", "price in your region (ZipCode)\"\"\") st.header(\"\"\"The re-sale price of the after-purchased homes is", "st.sidebar.radio(label='Filter Houses', options=[ 'All houses', 'Recommended homes to buy']) # Filters - -", "== int(id_input)]) else: st.error( 'Property with this ID is not recommended for purchase", "\"Sqft living\".') st.markdown('* Houses rated 8 or higher in the \"Grid\" (Quality of", "for purchase') st.header('') data = recommended_houses.copy() else: st.title('Map of all available houses') st.header('')", "Price\", then the suggested selling price will be the purchase price + 10%.", "amount of square feet of the house.') st.markdown('* The best season for re-selling", "represent the recommended selling price of the houses, whether it has been renovated", "if __name__ == '__main__': path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\"", "Price are \"Grade\" and \"Sqft living\".') st.markdown('* Houses rated 8 or higher in", "if sold at the recommended price.') st.text(\"\") try: if not id_input: st.dataframe(recommended_houses) else:", "profit will be 20% higher. \"\"\") st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for", "as st import sys #! Add folder \"src\" as a package path project_path", "has been renovated or not, in addition to the possible profit if sold", "as a package path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps as maps #!", "maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700) if __name__ == '__main__': path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses =", "pandas as pd import numpy as np import seaborn as sns import streamlit", "Map') filter_data = st.sidebar.radio(label='Filter Houses', options=[ 'All houses', 'Recommended homes to buy']) #", "all recommended homes for purchase') st.header('') data = recommended_houses.copy() else: st.title('Map of all", "= f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path) page_select =", "the bathroom and the amount of square feet of the house.') st.markdown('* The", "for purchase or there is no home with this ID.') except: st.error('ERROR: Input", "is based on the various \"Total Avarage Price\", which means the average value", "the after-purchased homes is based on the various \"Total Avarage Price\", which means", "purchase price + 10%. If the purchase price of the house is less", "st.header('') data = recommended_houses.copy() else: st.title('Map of all available houses') st.header('') data =", "page_maps(renamed_houses, recommended_houses): # SideBar - - - st.sidebar.title('Filter Map') filter_data = st.sidebar.radio(label='Filter Houses',", "price and the profit from re-selling the house if it is renewed. If", "pd import numpy as np import seaborn as sns import streamlit as st", "Libraries from pandas.io.formats.format import DataFrameFormatter from streamlit_folium import folium_static import pandas as pd", "home for purchase') id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to search", "it has been renovated or not, in addition to the possible profit if", "is higher than the \"Total Avarage Price\", then the suggested selling price will", "The best season for re-selling homes is Spring.') st.header( \"\"\"After these analyses, the", "None def page_maps(renamed_houses, recommended_houses): # SideBar - - - st.sidebar.title('Filter Map') filter_data =", "be added in a makeover is the bathroom and the amount of square", "@st.cache(allow_output_mutation=True) def load_data(path): data = pd.read_csv(path) return data # Pages definition def sidebar():", "price and the after-sale profit will be 20% higher. \"\"\") st.title(f'After analysis, {recommended_houses.shape[0]}", "as np import seaborn as sns import streamlit as st import sys #!", "\"src\" as a package path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps as maps", "of the building mateirais of the house) attribute have the best average price", "3 Houses priced below the median price in your region (ZipCode)\"\"\") st.header(\"\"\"The re-sale", "below the median price in your region (ZipCode)\"\"\") st.header(\"\"\"The re-sale price of the", "st.markdown('* The best season for re-selling homes is Spring.') st.header( \"\"\"After these analyses,", "= renamed_houses.copy() # Map of density houses_map = maps.houses_map(data) folium_static(houses_map, width=1200, height=700) #", "+ 10%. If the purchase price of the house is less than the", "the houses, whether it has been renovated or not, in addition to the", "st.sidebar.title('Search for recommended home for purchase') id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input", "purchase or there is no home with this ID.') except: st.error('ERROR: Input value", "added at the end of the table. They represent the recommended selling price", "page_final_reports(renamed_houses, recommended_houses): # Filter Recommended Houses to Buy DataFrame st.sidebar.title('Search for recommended home", "\"Grid\" (Quality of the building mateirais of the house) equal or greater than", "Season that the house was announced. If the purchase price of the house", "st.header('') data = renamed_houses.copy() # Map of density houses_map = maps.houses_map(data) folium_static(houses_map, width=1200,", "house if it is renewed. If the house is renovated, the re-sale price", "int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else: st.error( 'Property with this ID is", "st.header(\"\"\"The re-sale price of the after-purchased homes is based on the various \"Total", "# Filters - - if filter_data == 'Recommended homes to buy': st.title('Map of", "in your region (ZipCode)\"\"\") st.header(\"\"\"The re-sale price of the after-purchased homes is based", "purchase') st.header('') data = recommended_houses.copy() else: st.title('Map of all available houses') st.header('') data", "to Buy DataFrame st.sidebar.title('Search for recommended home for purchase') id_input = str(st.sidebar.text_input(label='Enter the", "of the building mateirais of the house) equal or greater than 8 Houses", "analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale.') st.subheader('New columns have also", "homes for purchase') st.header('') data = recommended_houses.copy() else: st.title('Map of all available houses')", "or greater than 3 Houses priced below the median price in your region", "correlation with Price are \"Grade\" and \"Sqft living\".') st.markdown('* Houses rated 8 or", "price will be the purchase price + 10%. If the purchase price of", "\"\"\") st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale.') st.subheader('New columns", "average price of renovated homes is 22% higher than unrenovated homes.') st.markdown('* The", "for House Rocket to buy follow the conditions: Places with grade of variable", "renovated or not, in addition to the possible profit if sold at the", "highest positive correlation with Price are \"Grade\" and \"Sqft living\".') st.markdown('* Houses rated", "of the house is higher than the \"Total Avarage Price\", then the suggested", "then the suggested selling price will be the purchase price + 30%.\"\"\") st.header(\"\"\"A", "the purchase price + 30%.\"\"\") st.header(\"\"\"A column has also been added in the", "if it is renewed. If the house is renovated, the re-sale price and", "in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else: st.error( 'Property with this ID is not", "at the end of the table. They represent the recommended selling price of", "not, in addition to the possible profit if sold at the recommended price.')", "st.subheader('New columns have also been added at the end of the table. They", "homes.') st.markdown('* The average price of renovated homes is 22% higher than unrenovated", "- - if filter_data == 'Recommended homes to buy': st.title('Map of all recommended", "buy']) # Filters - - if filter_data == 'Recommended homes to buy': st.title('Map", "is not recommended for purchase or there is no home with this ID.')", "'Recommended homes to buy']) # Filters - - if filter_data == 'Recommended homes", "also been added in the table representing the recommended re-sale price and the", "of the house) equal or greater than 8 Houses with condition equal to", "Recommended Houses to Buy DataFrame st.sidebar.title('Search for recommended home for purchase') id_input =", "purchase') id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to search house st.title('House", "DataFrame st.sidebar.title('Search for recommended home for purchase') id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() #", "if int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else: st.error( 'Property with this ID", "as sns import streamlit as st import sys #! Add folder \"src\" as", "st.markdown('* The average price of renovated homes is 22% higher than unrenovated homes.')", "data = renamed_houses.copy() # Map of density houses_map = maps.houses_map(data) folium_static(houses_map, width=1200, height=700)", "there is no home with this ID.') except: st.error('ERROR: Input value is not", "average price per rank and number of homes.') st.markdown('* The average price of", "building mateirais of the house) attribute have the best average price per rank", "are {renamed_houses.shape[0]} properties available for purchase today.') st.dataframe(renamed_houses) st.header(\"Main considerations of the analysis.\")", "priced below the median price in your region (ZipCode)\"\"\") st.header(\"\"\"The re-sale price of", "today.') st.dataframe(renamed_houses) st.header(\"Main considerations of the analysis.\") st.markdown('* The variables with the highest", "Page') page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps']) return page_select def page_final_reports(renamed_houses, recommended_houses):", "has also been added in the table representing the recommended re-sale price and", "import pandas as pd import numpy as np import seaborn as sns import", "recommended houses for House Rocket to buy follow the conditions: Places with grade", "the region's house prices (ZipCode) and the average price of the Season that", "not a valid ID.') #finally: return None def page_maps(renamed_houses, recommended_houses): # SideBar -", "from pandas.io.formats.format import DataFrameFormatter from streamlit_folium import folium_static import pandas as pd import", "def sidebar(): st.sidebar.title('Select Page') page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps']) return page_select", "st.header(\"\"\"A column has also been added in the table representing the recommended re-sale", "re-sale.') st.subheader('New columns have also been added at the end of the table.", "less than the \"Total Avarage Price\", then the suggested selling price will be", "purchase price + 30%.\"\"\") st.header(\"\"\"A column has also been added in the table", "Price\", then the suggested selling price will be the purchase price + 30%.\"\"\")", "renovated, the re-sale price and the after-sale profit will be 20% higher. \"\"\")", "import seaborn as sns import streamlit as st import sys #! Add folder", "the purchase price + 10%. If the purchase price of the house is", "mateirais of the house) attribute have the best average price per rank and", "st.sidebar.selectbox( label='', options=['Final Reports', 'Maps']) return page_select def page_final_reports(renamed_houses, recommended_houses): # Filter Recommended", "this ID.') except: st.error('ERROR: Input value is not a valid ID.') #finally: return", "these analyses, the recommended houses for House Rocket to buy follow the conditions:", "st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path): data = pd.read_csv(path) return data # Pages definition def", "and number of homes.') st.markdown('* The average price of renovated homes is 22%", "at the recommended price.') st.text(\"\") try: if not id_input: st.dataframe(recommended_houses) else: if int(id_input)", "feet of the house.') st.markdown('* The best season for re-selling homes is Spring.')", "equal to or greater than 3 Houses priced below the median price in", "your region (ZipCode)\"\"\") st.header(\"\"\"The re-sale price of the after-purchased homes is based on", "best season for re-selling homes is Spring.') st.header( \"\"\"After these analyses, the recommended", "considerations of the analysis.\") st.markdown('* The variables with the highest positive correlation with", "re-sale price and the profit from re-selling the house if it is renewed.", "after-purchased homes is based on the various \"Total Avarage Price\", which means the", "re-sale price and the after-sale profit will be 20% higher. \"\"\") st.title(f'After analysis,", "in addition to the possible profit if sold at the recommended price.') st.text(\"\")", "options=[ 'All houses', 'Recommended homes to buy']) # Filters - - if filter_data", "{renamed_houses.shape[0]} properties available for purchase today.') st.dataframe(renamed_houses) st.header(\"Main considerations of the analysis.\") st.markdown('*", "house is less than the \"Total Avarage Price\", then the suggested selling price", "the table representing the recommended re-sale price and the profit from re-selling the", "if not id_input: st.dataframe(recommended_houses) else: if int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else:", "maps.houses_map(data) folium_static(houses_map, width=1200, height=700) # Map with avarage price per region (ZipCode) st.title('Avarage", "of the after-purchased homes is based on the various \"Total Avarage Price\", which", "purchase today.') st.dataframe(renamed_houses) st.header(\"Main considerations of the analysis.\") st.markdown('* The variables with the", "If the purchase price of the house is higher than the \"Total Avarage", "or there is no home with this ID.') except: st.error('ERROR: Input value is", "density houses_map = maps.houses_map(data) folium_static(houses_map, width=1200, height=700) # Map with avarage price per", "houses_map = maps.houses_map(data) folium_static(houses_map, width=1200, height=700) # Map with avarage price per region", "various \"Total Avarage Price\", which means the average value of the region's house", "of the analysis.\") st.markdown('* The variables with the highest positive correlation with Price", "folder \"src\" as a package path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps as", "to the possible profit if sold at the recommended price.') st.text(\"\") try: if", "recommended home for purchase') id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to", "homes to buy': st.title('Map of all recommended homes for purchase') st.header('') data =", "as maps #! App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path): data = pd.read_csv(path) return", "import folium_static import pandas as pd import numpy as np import seaborn as", "conditions: Places with grade of variable \"Grid\" (Quality of the building mateirais of", "of the Season that the house was announced. If the purchase price of", "price of the houses, whether it has been renovated or not, in addition", "is renewed. If the house is renovated, the re-sale price and the after-sale", "higher. \"\"\") st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale.') st.subheader('New", "which means the average value of the region's house prices (ZipCode) and the", "renovated homes is 22% higher than unrenovated homes.') st.markdown('* The biggest correlation with", "and \"Sqft living\".') st.markdown('* Houses rated 8 or higher in the \"Grid\" (Quality", "st.error( 'Property with this ID is not recommended for purchase or there is", "page_select = sidebar() if page_select == 'Final Reports': page_final_reports(renamed_houses=renamed_houses, recommended_houses=recommended_houses) else: page_maps(renamed_houses=renamed_houses, recommended_houses=recommended_houses)", "greater than 3 Houses priced below the median price in your region (ZipCode)\"\"\")", "int(id_input)]) else: st.error( 'Property with this ID is not recommended for purchase or", "ID.') #finally: return None def page_maps(renamed_houses, recommended_houses): # SideBar - - - st.sidebar.title('Filter", "this ID is not recommended for purchase or there is no home with", "def load_data(path): data = pd.read_csv(path) return data # Pages definition def sidebar(): st.sidebar.title('Select", "recommended re-sale price and the profit from re-selling the house if it is", "added in the table representing the recommended re-sale price and the profit from", "of the table. They represent the recommended selling price of the houses, whether", "def page_final_reports(renamed_houses, recommended_houses): # Filter Recommended Houses to Buy DataFrame st.sidebar.title('Search for recommended", "22% higher than unrenovated homes.') st.markdown('* The biggest correlation with Price and what", "st.dataframe(renamed_houses) st.header(\"Main considerations of the analysis.\") st.markdown('* The variables with the highest positive", "in the table representing the recommended re-sale price and the profit from re-selling", "st.sidebar.title('Select Page') page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps']) return page_select def page_final_reports(renamed_houses,", "rank and number of homes.') st.markdown('* The average price of renovated homes is", "for re-selling homes is Spring.') st.header( \"\"\"After these analyses, the recommended houses for", "the analysis.\") st.markdown('* The variables with the highest positive correlation with Price are", "column has also been added in the table representing the recommended re-sale price", "# Map of density houses_map = maps.houses_map(data) folium_static(houses_map, width=1200, height=700) # Map with", "pd.read_csv(path) return data # Pages definition def sidebar(): st.sidebar.title('Select Page') page_select = st.sidebar.selectbox(", "best average price per rank and number of homes.') st.markdown('* The average price", "price of the house is higher than the \"Total Avarage Price\", then the", "#! App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path): data = pd.read_csv(path) return data #", "st.markdown('* Houses rated 8 or higher in the \"Grid\" (Quality of the building", "season for re-selling homes is Spring.') st.header( \"\"\"After these analyses, the recommended houses", "+ 30%.\"\"\") st.header(\"\"\"A column has also been added in the table representing the", "= recommended_houses.copy() else: st.title('Map of all available houses') st.header('') data = renamed_houses.copy() #", "rated 8 or higher in the \"Grid\" (Quality of the building mateirais of", "is renovated, the re-sale price and the after-sale profit will be 20% higher.", "sold at the recommended price.') st.text(\"\") try: if not id_input: st.dataframe(recommended_houses) else: if", "Input value is not a valid ID.') #finally: return None def page_maps(renamed_houses, recommended_houses):", "the house is renovated, the re-sale price and the after-sale profit will be", "representing the recommended re-sale price and the profit from re-selling the house if", "st.sidebar.title('Filter Map') filter_data = st.sidebar.radio(label='Filter Houses', options=[ 'All houses', 'Recommended homes to buy'])", "8 Houses with condition equal to or greater than 3 Houses priced below", "renewed. If the house is renovated, the re-sale price and the after-sale profit", "can be added in a makeover is the bathroom and the amount of", "with the highest positive correlation with Price are \"Grade\" and \"Sqft living\".') st.markdown('*", "prices (ZipCode) and the average price of the Season that the house was", "is the bathroom and the amount of square feet of the house.') st.markdown('*", "Price and what can be added in a makeover is the bathroom and", "to buy']) # Filters - - if filter_data == 'Recommended homes to buy':", "that the house was announced. If the purchase price of the house is", "Houses to Buy DataFrame st.sidebar.title('Search for recommended home for purchase') id_input = str(st.sidebar.text_input(label='Enter", "the various \"Total Avarage Price\", which means the average value of the region's", "import numpy as np import seaborn as sns import streamlit as st import", "a package path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps as maps #! App", "for purchase') id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to search house", "Rocket Analysis') st.title('') st.title(f'There are {renamed_houses.shape[0]} properties available for purchase today.') st.dataframe(renamed_houses) st.header(\"Main", "mateirais of the house) equal or greater than 8 Houses with condition equal", "st.title('') st.title(f'There are {renamed_houses.shape[0]} properties available for purchase today.') st.dataframe(renamed_houses) st.header(\"Main considerations of", "path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path) page_select = sidebar() if page_select == 'Final", "np import seaborn as sns import streamlit as st import sys #! Add", "\"Grid\" (Quality of the building mateirais of the house) attribute have the best", "height=700) if __name__ == '__main__': path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path) path =", "Price per Region') avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700) if __name__ == '__main__':", "st.error('ERROR: Input value is not a valid ID.') #finally: return None def page_maps(renamed_houses,", "folium_static import pandas as pd import numpy as np import seaborn as sns", "30%.\"\"\") st.header(\"\"\"A column has also been added in the table representing the recommended", "the average value of the region's house prices (ZipCode) and the average price", "of renovated homes is 22% higher than unrenovated homes.') st.markdown('* The biggest correlation", "to buy follow the conditions: Places with grade of variable \"Grid\" (Quality of", "as pd import numpy as np import seaborn as sns import streamlit as", "valid ID.') #finally: return None def page_maps(renamed_houses, recommended_houses): # SideBar - - -", "ID')).strip() # Input ID to search house st.title('House Rocket Analysis') st.title('') st.title(f'There are", "price of the house is less than the \"Total Avarage Price\", then the", "houses, whether it has been renovated or not, in addition to the possible", "house st.title('House Rocket Analysis') st.title('') st.title(f'There are {renamed_houses.shape[0]} properties available for purchase today.')", "will be 20% higher. \"\"\") st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase", "than 3 Houses priced below the median price in your region (ZipCode)\"\"\") st.header(\"\"\"The", "no home with this ID.') except: st.error('ERROR: Input value is not a valid", "'All houses', 'Recommended homes to buy']) # Filters - - if filter_data ==", "f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path) page_select = sidebar()", "package path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps as maps #! App configuration", "streamlit as st import sys #! Add folder \"src\" as a package path", "the house is less than the \"Total Avarage Price\", then the suggested selling", "of the house.') st.markdown('* The best season for re-selling homes is Spring.') st.header(", "20% higher. \"\"\") st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale.')", "- - - st.sidebar.title('Filter Map') filter_data = st.sidebar.radio(label='Filter Houses', options=[ 'All houses', 'Recommended", "Places with grade of variable \"Grid\" (Quality of the building mateirais of the", "10%. If the purchase price of the house is less than the \"Total", "price + 30%.\"\"\") st.header(\"\"\"A column has also been added in the table representing", "average value of the region's house prices (ZipCode) and the average price of", "price.') st.text(\"\") try: if not id_input: st.dataframe(recommended_houses) else: if int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID']", "Add folder \"src\" as a package path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps", "are \"Grade\" and \"Sqft living\".') st.markdown('* Houses rated 8 or higher in the", "whether it has been renovated or not, in addition to the possible profit", "end of the table. They represent the recommended selling price of the houses,", "the highest positive correlation with Price are \"Grade\" and \"Sqft living\".') st.markdown('* Houses", "recommended for purchase or there is no home with this ID.') except: st.error('ERROR:", "return data # Pages definition def sidebar(): st.sidebar.title('Select Page') page_select = st.sidebar.selectbox( label='',", "to or greater than 3 Houses priced below the median price in your", "the \"Grid\" (Quality of the building mateirais of the house) attribute have the", "higher than unrenovated homes.') st.markdown('* The biggest correlation with Price and what can", "the recommended re-sale price and the profit from re-selling the house if it", "been added at the end of the table. They represent the recommended selling", "SideBar - - - st.sidebar.title('Filter Map') filter_data = st.sidebar.radio(label='Filter Houses', options=[ 'All houses',", "region (ZipCode)\"\"\") st.header(\"\"\"The re-sale price of the after-purchased homes is based on the", "bathroom and the amount of square feet of the house.') st.markdown('* The best", "of the house is less than the \"Total Avarage Price\", then the suggested", "'Property with this ID is not recommended for purchase or there is no", "the building mateirais of the house) equal or greater than 8 Houses with", "search house st.title('House Rocket Analysis') st.title('') st.title(f'There are {renamed_houses.shape[0]} properties available for purchase", "recommended_houses = load_data(path) page_select = sidebar() if page_select == 'Final Reports': page_final_reports(renamed_houses=renamed_houses, recommended_houses=recommended_houses)", "the conditions: Places with grade of variable \"Grid\" (Quality of the building mateirais", "# Input ID to search house st.title('House Rocket Analysis') st.title('') st.title(f'There are {renamed_houses.shape[0]}", "streamlit_folium import folium_static import pandas as pd import numpy as np import seaborn", "price per region (ZipCode) st.title('Avarage Price per Region') avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200,", "= str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to search house st.title('House Rocket Analysis')", "path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps as maps #! App configuration st.set_page_config(layout='wide')", "The variables with the highest positive correlation with Price are \"Grade\" and \"Sqft", "path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path) page_select", "The average price of renovated homes is 22% higher than unrenovated homes.') st.markdown('*", "recommended price.') st.text(\"\") try: if not id_input: st.dataframe(recommended_houses) else: if int(id_input) in recommended_houses['ID'].values:", "price of the Season that the house was announced. If the purchase price", "the suggested selling price will be the purchase price + 10%. If the", "Filters - - if filter_data == 'Recommended homes to buy': st.title('Map of all", "definition def sidebar(): st.sidebar.title('Select Page') page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps']) return", "to search house st.title('House Rocket Analysis') st.title('') st.title(f'There are {renamed_houses.shape[0]} properties available for", "was announced. If the purchase price of the house is higher than the", "price per rank and number of homes.') st.markdown('* The average price of renovated", "str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to search house st.title('House Rocket Analysis') st.title('')", "= maps.houses_map(data) folium_static(houses_map, width=1200, height=700) # Map with avarage price per region (ZipCode)", "recommended homes for purchase') st.header('') data = recommended_houses.copy() else: st.title('Map of all available", "the amount of square feet of the house.') st.markdown('* The best season for", "Rocket to buy follow the conditions: Places with grade of variable \"Grid\" (Quality", "median price in your region (ZipCode)\"\"\") st.header(\"\"\"The re-sale price of the after-purchased homes", "per rank and number of homes.') st.markdown('* The average price of renovated homes", "(ZipCode) and the average price of the Season that the house was announced.", "= f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path) page_select = sidebar() if page_select == 'Final Reports':", "page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps']) return page_select def page_final_reports(renamed_houses, recommended_houses): #", "'__main__': path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses = load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path)", "the house was announced. If the purchase price of the house is higher", "try: if not id_input: st.dataframe(recommended_houses) else: if int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)])", "homes.') st.markdown('* The biggest correlation with Price and what can be added in", "the recommended houses for House Rocket to buy follow the conditions: Places with", "visualization.maps as maps #! App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path): data = pd.read_csv(path)", "house was announced. If the purchase price of the house is higher than", "the end of the table. They represent the recommended selling price of the", "\"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import visualization.maps as maps #! App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path):", "price + 10%. If the purchase price of the house is less than", "home with this ID.') except: st.error('ERROR: Input value is not a valid ID.')", "the best average price per rank and number of homes.') st.markdown('* The average", "follow the conditions: Places with grade of variable \"Grid\" (Quality of the building", "recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else: st.error( 'Property with this ID is not recommended", "data # Pages definition def sidebar(): st.sidebar.title('Select Page') page_select = st.sidebar.selectbox( label='', options=['Final", "on the various \"Total Avarage Price\", which means the average value of the", "purchase price of the house is higher than the \"Total Avarage Price\", then", "added in a makeover is the bathroom and the amount of square feet", "analyses, the recommended houses for House Rocket to buy follow the conditions: Places", "price of renovated homes is 22% higher than unrenovated homes.') st.markdown('* The biggest", "f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path) page_select = sidebar() if page_select == 'Final Reports': page_final_reports(renamed_houses=renamed_houses,", "available houses') st.header('') data = renamed_houses.copy() # Map of density houses_map = maps.houses_map(data)", "makeover is the bathroom and the amount of square feet of the house.')", "suggested selling price will be the purchase price + 10%. If the purchase", "than the \"Total Avarage Price\", then the suggested selling price will be the", "st.title('Avarage Price per Region') avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700) if __name__ ==", "the recommended selling price of the houses, whether it has been renovated or", "8 or higher in the \"Grid\" (Quality of the building mateirais of the", "the ID')).strip() # Input ID to search house st.title('House Rocket Analysis') st.title('') st.title(f'There", "be 20% higher. \"\"\") st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and", "else: st.title('Map of all available houses') st.header('') data = renamed_houses.copy() # Map of", "available for purchase today.') st.dataframe(renamed_houses) st.header(\"Main considerations of the analysis.\") st.markdown('* The variables", "= load_data(path) path = f\"{project_path}/reports/data/final_houses_sale.csv\" recommended_houses = load_data(path) page_select = sidebar() if page_select", "st.title('Map of all available houses') st.header('') data = renamed_houses.copy() # Map of density", "= st.sidebar.selectbox( label='', options=['Final Reports', 'Maps']) return page_select def page_final_reports(renamed_houses, recommended_houses): # Filter", "of the house) attribute have the best average price per rank and number", "#finally: return None def page_maps(renamed_houses, recommended_houses): # SideBar - - - st.sidebar.title('Filter Map')", "avarage price per region (ZipCode) st.title('Avarage Price per Region') avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region,", "square feet of the house.') st.markdown('* The best season for re-selling homes is", "properties available for purchase today.') st.dataframe(renamed_houses) st.header(\"Main considerations of the analysis.\") st.markdown('* The", "and the amount of square feet of the house.') st.markdown('* The best season", "properties are recommended for purchase and re-sale.') st.subheader('New columns have also been added", "and the average price of the Season that the house was announced. If", "== 'Recommended homes to buy': st.title('Map of all recommended homes for purchase') st.header('')", "sys #! Add folder \"src\" as a package path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/')", "recommended selling price of the houses, whether it has been renovated or not,", "Input ID to search house st.title('House Rocket Analysis') st.title('') st.title(f'There are {renamed_houses.shape[0]} properties", "be the purchase price + 10%. If the purchase price of the house", "width=1200, height=700) # Map with avarage price per region (ZipCode) st.title('Avarage Price per", "attribute have the best average price per rank and number of homes.') st.markdown('*", "not recommended for purchase or there is no home with this ID.') except:", "# SideBar - - - st.sidebar.title('Filter Map') filter_data = st.sidebar.radio(label='Filter Houses', options=[ 'All", "st.title('House Rocket Analysis') st.title('') st.title(f'There are {renamed_houses.shape[0]} properties available for purchase today.') st.dataframe(renamed_houses)", "Price\", which means the average value of the region's house prices (ZipCode) and", "be the purchase price + 30%.\"\"\") st.header(\"\"\"A column has also been added in", "the profit from re-selling the house if it is renewed. If the house", "is Spring.') st.header( \"\"\"After these analyses, the recommended houses for House Rocket to", "and the after-sale profit will be 20% higher. \"\"\") st.title(f'After analysis, {recommended_houses.shape[0]} properties", "is no home with this ID.') except: st.error('ERROR: Input value is not a", "= maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700) if __name__ == '__main__': path = f\"{project_path}/data/interim/renamed_data.csv\" renamed_houses", "to buy': st.title('Map of all recommended homes for purchase') st.header('') data = recommended_houses.copy()", "the \"Total Avarage Price\", then the suggested selling price will be the purchase", "it is renewed. If the house is renovated, the re-sale price and the", "= load_data(path) page_select = sidebar() if page_select == 'Final Reports': page_final_reports(renamed_houses=renamed_houses, recommended_houses=recommended_houses) else:", "homes is based on the various \"Total Avarage Price\", which means the average", "house prices (ZipCode) and the average price of the Season that the house", "\"\"\"After these analyses, the recommended houses for House Rocket to buy follow the", "import sys #! Add folder \"src\" as a package path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\"", "Houses with condition equal to or greater than 3 Houses priced below the", "the house) equal or greater than 8 Houses with condition equal to or", "= st.sidebar.radio(label='Filter Houses', options=[ 'All houses', 'Recommended homes to buy']) # Filters -", "a valid ID.') #finally: return None def page_maps(renamed_houses, recommended_houses): # SideBar - -", "houses for House Rocket to buy follow the conditions: Places with grade of", "value of the region's house prices (ZipCode) and the average price of the", "of all recommended homes for purchase') st.header('') data = recommended_houses.copy() else: st.title('Map of", "columns have also been added at the end of the table. They represent", "than unrenovated homes.') st.markdown('* The biggest correlation with Price and what can be", "have also been added at the end of the table. They represent the", "of square feet of the house.') st.markdown('* The best season for re-selling homes", "Region') avg_region = maps.price_per_region(renamed_houses) folium_static(avg_region, width=1200, height=700) if __name__ == '__main__': path =", "is 22% higher than unrenovated homes.') st.markdown('* The biggest correlation with Price and", "with Price are \"Grade\" and \"Sqft living\".') st.markdown('* Houses rated 8 or higher", "also been added at the end of the table. They represent the recommended", "- if filter_data == 'Recommended homes to buy': st.title('Map of all recommended homes", "renamed_houses.copy() # Map of density houses_map = maps.houses_map(data) folium_static(houses_map, width=1200, height=700) # Map", "sys.path.append(f'{project_path}/src/') import visualization.maps as maps #! App configuration st.set_page_config(layout='wide') @st.cache(allow_output_mutation=True) def load_data(path): data", "purchase and re-sale.') st.subheader('New columns have also been added at the end of", "recommended for purchase and re-sale.') st.subheader('New columns have also been added at the", "#! Add folder \"src\" as a package path project_path = \"Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis\" sys.path.append(f'{project_path}/src/') import", "ID is not recommended for purchase or there is no home with this", "or higher in the \"Grid\" (Quality of the building mateirais of the house)", "# Filter Recommended Houses to Buy DataFrame st.sidebar.title('Search for recommended home for purchase')", "re-selling homes is Spring.') st.header( \"\"\"After these analyses, the recommended houses for House", "the suggested selling price will be the purchase price + 30%.\"\"\") st.header(\"\"\"A column", "label='', options=['Final Reports', 'Maps']) return page_select def page_final_reports(renamed_houses, recommended_houses): # Filter Recommended Houses", "not id_input: st.dataframe(recommended_houses) else: if int(id_input) in recommended_houses['ID'].values: st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)]) else: st.error(", "will be the purchase price + 30%.\"\"\") st.header(\"\"\"A column has also been added", "living\".') st.markdown('* Houses rated 8 or higher in the \"Grid\" (Quality of the", "st.title('Map of all recommended homes for purchase') st.header('') data = recommended_houses.copy() else: st.title('Map", "houses') st.header('') data = renamed_houses.copy() # Map of density houses_map = maps.houses_map(data) folium_static(houses_map,", "in the \"Grid\" (Quality of the building mateirais of the house) attribute have" ]
[ "Any, Dict, List, Type, TypeVar import attr from ..models.severity_response_body import SeverityResponseBody T =", "Dict, List, Type, TypeVar import attr from ..models.severity_response_body import SeverityResponseBody T = TypeVar(\"T\",", "everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]):", "self.severities: severities_item = severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( {", "return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d =", "really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}].", "= d return severities_list_response_body @property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self,", "for severities_item_data in self.severities: severities_item = severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str, Any] = {}", "T: d = src_dict.copy() severities = [] _severities = d.pop(\"severities\") for severities_item_data in", "severities_item_data in _severities: severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body = cls( severities=severities, ) severities_list_response_body.additional_properties", "Dict[str, Any]) -> T: d = src_dict.copy() severities = [] _severities = d.pop(\"severities\")", "..models.severity_response_body import SeverityResponseBody T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\" Example: {'severities':", "severities_list_response_body @property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str) ->", "'2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad,", "List, Type, TypeVar import attr from ..models.severity_response_body import SeverityResponseBody T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\")", "Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0',", "return self.additional_properties[key] def __setitem__(self, key: str, value: Any) -> None: self.additional_properties[key] = value", "SeveritiesListResponseBody: \"\"\" Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone", "1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\",", "-> None: del self.additional_properties[key] def __contains__(self, key: str) -> bool: return key in", "-> Dict[str, Any]: severities = [] for severities_item_data in self.severities: severities_item = severities_item_data.to_dict()", "self.additional_properties[key] = value def __delitem__(self, key: str) -> None: del self.additional_properties[key] def __contains__(self,", "= severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { \"severities\": severities,", "def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() severities =", "that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes:", "(List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\", 'id':", "None: del self.additional_properties[key] def __contains__(self, key: str) -> bool: return key in self.additional_properties", "d return severities_list_response_body @property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key:", "TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\" Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not", "Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() severities = [] _severities", "= cls( severities=severities, ) severities_list_response_body.additional_properties = d return severities_list_response_body @property def additional_keys(self) ->", "'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody] additional_properties: Dict[str, Any] =", "really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]}", "= [] _severities = d.pop(\"severities\") for severities_item_data in _severities: severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item)", "\"\"\" Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\",", "'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that", "return severities_list_response_body @property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str)", "List[SeverityResponseBody] additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: severities", "_severities = d.pop(\"severities\") for severities_item_data in _severities: severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body =", "{} field_dict.update(self.additional_properties) field_dict.update( { \"severities\": severities, } ) return field_dict @classmethod def from_dict(cls:", "d.pop(\"severities\") for severities_item_data in _severities: severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body = cls( severities=severities,", "severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body = cls( severities=severities, ) severities_list_response_body.additional_properties = d return", "def __delitem__(self, key: str) -> None: del self.additional_properties[key] def __contains__(self, key: str) ->", "chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody] additional_properties:", "None: self.additional_properties[key] = value def __delitem__(self, key: str) -> None: del self.additional_properties[key] def", "[] _severities = d.pop(\"severities\") for severities_item_data in _severities: severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body", "Any] = {} field_dict.update(self.additional_properties) field_dict.update( { \"severities\": severities, } ) return field_dict @classmethod", "1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really", "severities_list_response_body.additional_properties = d return severities_list_response_body @property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def", "severities.append(severities_item) field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { \"severities\": severities, } )", "@attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\" Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that", "'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone", "Any: return self.additional_properties[key] def __setitem__(self, key: str, value: Any) -> None: self.additional_properties[key] =", "severities_item_data in self.severities: severities_item = severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties)", "to_dict(self) -> Dict[str, Any]: severities = [] for severities_item_data in self.severities: severities_item =", "for severities_item_data in _severities: severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body = cls( severities=severities, )", "severities_item = severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { \"severities\":", "str) -> None: del self.additional_properties[key] def __contains__(self, key: str) -> bool: return key", "really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'},", "'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not", "import SeverityResponseBody T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\" Example: {'severities': [{'created_at':", "list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any: return self.additional_properties[key] def __setitem__(self, key: str,", "[{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name':", "in _severities: severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body = cls( severities=severities, ) severities_list_response_body.additional_properties =", "= src_dict.copy() severities = [] _severities = d.pop(\"severities\") for severities_item_data in _severities: severities_item", "Type, TypeVar import attr from ..models.severity_response_body import SeverityResponseBody T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True)", "class SeveritiesListResponseBody: \"\"\" Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad,", "import attr from ..models.severity_response_body import SeverityResponseBody T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody:", "Any]: severities = [] for severities_item_data in self.severities: severities_item = severities_item_data.to_dict() severities.append(severities_item) field_dict:", "'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not", "'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\", 'id':", "SeverityResponseBody T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\" Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z',", "field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { \"severities\": severities, } ) return", "in self.severities: severities_item = severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update(", "str, value: Any) -> None: self.additional_properties[key] = value def __delitem__(self, key: str) ->", "'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's", "\"severities\": severities, } ) return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any])", "'2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor',", "-> T: d = src_dict.copy() severities = [] _severities = d.pop(\"severities\") for severities_item_data", "\"\"\" severities: List[SeverityResponseBody] additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str,", "= SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body = cls( severities=severities, ) severities_list_response_body.additional_properties = d return severities_list_response_body", "that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at':", "'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad,", "severities: List[SeverityResponseBody] additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]:", "value def __delitem__(self, key: str) -> None: del self.additional_properties[key] def __contains__(self, key: str)", "Any) -> None: self.additional_properties[key] = value def __delitem__(self, key: str) -> None: del", "Any]) -> T: d = src_dict.copy() severities = [] _severities = d.pop(\"severities\") for", "severities = [] _severities = d.pop(\"severities\") for severities_item_data in _severities: severities_item = SeverityResponseBody.from_dict(severities_item_data)", "\"It's not really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1,", "Attributes: severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone", "from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() severities = []", "d = src_dict.copy() severities = [] _severities = d.pop(\"severities\") for severities_item_data in _severities:", "chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's", "from ..models.severity_response_body import SeverityResponseBody T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\" Example:", "'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody] additional_properties: Dict[str,", "Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { \"severities\": severities, } ) return field_dict", "'01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really", "= TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\" Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's", "Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: severities = [] for", "'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody] additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self)", "field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy()", "} ) return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:", "severities=severities, ) severities_list_response_body.additional_properties = d return severities_list_response_body @property def additional_keys(self) -> List[str]: return", "__delitem__(self, key: str) -> None: del self.additional_properties[key] def __contains__(self, key: str) -> bool:", "'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that", "typing import Any, Dict, List, Type, TypeVar import attr from ..models.severity_response_body import SeverityResponseBody", "additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any: return self.additional_properties[key]", "chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]): Example:", "TypeVar import attr from ..models.severity_response_body import SeverityResponseBody T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class", "self.additional_properties[key] def __setitem__(self, key: str, value: Any) -> None: self.additional_properties[key] = value def", "= value def __delitem__(self, key: str) -> None: del self.additional_properties[key] def __contains__(self, key:", "key: str) -> Any: return self.additional_properties[key] def __setitem__(self, key: str, value: Any) ->", "bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities", "everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody]", "attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: severities = [] for severities_item_data in", "Dict[str, Any]: severities = [] for severities_item_data in self.severities: severities_item = severities_item_data.to_dict() severities.append(severities_item)", "'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody] additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)", "def __getitem__(self, key: str) -> Any: return self.additional_properties[key] def __setitem__(self, key: str, value:", "key: str, value: Any) -> None: self.additional_properties[key] = value def __delitem__(self, key: str)", "{ \"severities\": severities, } ) return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str,", "everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description':", "_severities: severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body = cls( severities=severities, ) severities_list_response_body.additional_properties = d", "from typing import Any, Dict, List, Type, TypeVar import attr from ..models.severity_response_body import", "severities, } ) return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) ->", "Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\", 'id':", "factory=dict) def to_dict(self) -> Dict[str, Any]: severities = [] for severities_item_data in self.severities:", "src_dict: Dict[str, Any]) -> T: d = src_dict.copy() severities = [] _severities =", "-> None: self.additional_properties[key] = value def __delitem__(self, key: str) -> None: del self.additional_properties[key]", "SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body = cls( severities=severities, ) severities_list_response_body.additional_properties = d return severities_list_response_body @property", "'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody] additional_properties: Dict[str, Any] = attr.ib(init=False,", "severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\",", "'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]): Example: [{'created_at':", "field_dict.update( { \"severities\": severities, } ) return field_dict @classmethod def from_dict(cls: Type[T], src_dict:", "= [] for severities_item_data in self.severities: severities_item = severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str, Any]", "severities_list_response_body = cls( severities=severities, ) severities_list_response_body.additional_properties = d return severities_list_response_body @property def additional_keys(self)", "bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\" Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really", ") return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d", "not really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at':", "return list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any: return self.additional_properties[key] def __setitem__(self, key:", "Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: severities = []", ") severities_list_response_body.additional_properties = d return severities_list_response_body @property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys())", "bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z',", "severities.append(severities_item) severities_list_response_body = cls( severities=severities, ) severities_list_response_body.additional_properties = d return severities_list_response_body @property def", "@property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any:", "__setitem__(self, key: str, value: Any) -> None: self.additional_properties[key] = value def __delitem__(self, key:", "= attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: severities = [] for severities_item_data", "'2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0',", "@classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() severities", "severities = [] for severities_item_data in self.severities: severities_item = severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str,", "1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody] additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def", "field_dict.update(self.additional_properties) field_dict.update( { \"severities\": severities, } ) return field_dict @classmethod def from_dict(cls: Type[T],", "List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any: return self.additional_properties[key] def __setitem__(self,", "T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\" Example: {'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description':", "'01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z',", "key: str) -> None: del self.additional_properties[key] def __contains__(self, key: str) -> bool: return", "'description': \"It's not really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank':", "def to_dict(self) -> Dict[str, Any]: severities = [] for severities_item_data in self.severities: severities_item", "def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any: return", "'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]} Attributes: severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description':", "additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: severities =", "cls( severities=severities, ) severities_list_response_body.additional_properties = d return severities_list_response_body @property def additional_keys(self) -> List[str]:", "__getitem__(self, key: str) -> Any: return self.additional_properties[key] def __setitem__(self, key: str, value: Any)", "def __setitem__(self, key: str, value: Any) -> None: self.additional_properties[key] = value def __delitem__(self,", "str) -> Any: return self.additional_properties[key] def __setitem__(self, key: str, value: Any) -> None:", "{'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0',", "that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\"", "[] for severities_item_data in self.severities: severities_item = severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str, Any] =", "src_dict.copy() severities = [] _severities = d.pop(\"severities\") for severities_item_data in _severities: severities_item =", "-> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any: return self.additional_properties[key] def", "{'created_at': '2021-08-17T13:28:57.801578Z', 'description': \"It's not really that bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name':", "'01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody] additional_properties: Dict[str, Any]", "bad, everyone chill\", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'}]. \"\"\" severities:", "severities_item_data.to_dict() severities.append(severities_item) field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { \"severities\": severities, }", "import Any, Dict, List, Type, TypeVar import attr from ..models.severity_response_body import SeverityResponseBody T", "value: Any) -> None: self.additional_properties[key] = value def __delitem__(self, key: str) -> None:", "attr from ..models.severity_response_body import SeverityResponseBody T = TypeVar(\"T\", bound=\"SeveritiesListResponseBody\") @attr.s(auto_attribs=True) class SeveritiesListResponseBody: \"\"\"", "= {} field_dict.update(self.additional_properties) field_dict.update( { \"severities\": severities, } ) return field_dict @classmethod def", "= d.pop(\"severities\") for severities_item_data in _severities: severities_item = SeverityResponseBody.from_dict(severities_item_data) severities.append(severities_item) severities_list_response_body = cls(", "'2021-08-17T13:28:57.801578Z'}]. \"\"\" severities: List[SeverityResponseBody] additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) ->", "-> Any: return self.additional_properties[key] def __setitem__(self, key: str, value: Any) -> None: self.additional_properties[key]" ]
[ "# when path is an existing file try: f = open(f'{relative_path}', 'r') content", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path): # when path is an existing", "'utf-8')) except: # error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8'))", "Server Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path):", "this will keep running until you # interrupt the program with Ctrl-C server.serve_forever()", "if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True #", "200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured self.request.sendall(bytearray(f\"{http_version}", "'/': try: f = open(f'{relative_path}/index.html', 'r') content = f.read() content_length = len(content) response", "governing permissions and # limitations under the License. # # # Furthermore it", "existing file try: f = open(f'{relative_path}', 'r') content = f.read() content_length = len(content)", "run: python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def", "__name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create", "this file except in compliance with the License. # You may obtain a", "server, binding to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) #", "# http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl -v -X GET", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "PORT), MyWebServer) # Activate the server; this will keep running until you #", "ANY KIND, either express or implied. # See the License for the specific", "Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except", "\"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"] # all methods defined in rfc266 other than GET", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "{content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server", "occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: # not a valid path", "path[-1] == '/': try: f = open(f'{relative_path}/index.html', 'r') content = f.read() content_length =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will", "= socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will keep running until", "') try: method = request[0] path = request[1] http_version = request[2][:8].strip() # get", "language governing permissions and # limitations under the License. # # # Furthermore", "OF ANY KIND, either express or implied. # See the License for the", "self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path): # when path", "of the code is Copyright © 2001-2013 Python Software # Foundation; All Rights", "when path is an existing directory if path[-1] == '/': try: f =", "Activate the server; this will keep running until you # interrupt the program", "= path[path.rfind('.') + 1:len(path)] # get file extension response = f'{http_version} 200 OK\\r\\nContent-Type:", "\"HEAD\", \"POST\", \"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"] # all methods defined in rfc266 other", "os.path.isfile(relative_path): # when path is an existing file try: f = open(f'{relative_path}', 'r')", "Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path): # when path is an existing directory", "OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured self.request.sendall(bytearray(f\"{http_version} 500", "permissions and # limitations under the License. # # # Furthermore it is", "= request[1] http_version = request[2][:8].strip() # get http version other_methods = [\"OPTIONS\", \"HEAD\",", "existing directory if path[-1] == '/': try: f = open(f'{relative_path}/index.html', 'r') content =", "path is an existing directory if path[-1] == '/': try: f = open(f'{relative_path}/index.html',", "500 Internal Server Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8'))", "close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path): # when path is an existing directory if path[-1]", "get http version other_methods = [\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"] #", "other than GET if method == \"GET\": relative_path = \"./www\" + os.path.normpath(request[1]) #normalize", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "defined in rfc266 other than GET if method == \"GET\": relative_path = \"./www\"", "len(content) content_type = path[path.rfind('.') + 1:len(path)] # get file extension response = f'{http_version}", "path is an existing file try: f = open(f'{relative_path}', 'r') content = f.read()", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "if path[-1] == '/': try: f = open(f'{relative_path}/index.html', 'r') content = f.read() content_length", "is derived from the Python documentation examples thus # some of the code", "on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this", "OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured self.request.sendall(bytearray(f\"{http_version} 500", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "\"POST\", \"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"] # all methods defined in rfc266 other than", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal", "content_length = len(content) response = f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except:", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "== 'HTTP/1.1' and \"Host\" not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\",", "class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split(' ') try: method", "http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/", "required by applicable law or agreed to in writing, software # distributed under", "Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method in other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method Not Allowed\\r\\nContent-Type:", "applicable law or agreed to in writing, software # distributed under the License", "utf-8 import os import socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed", "'utf-8')) else: # not a valid path self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8'))", "reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: # not a", "or agreed to in writing, software # distributed under the License is distributed", "Internal Server Error\\r\\n\", 'utf-8')) else: # not a valid path self.request.sendall(bytearray(f'{http_version} 404 Not", "= f.read() content_length = len(content) content_type = path[path.rfind('.') + 1:len(path)] # get file", "try: f = open(f'{relative_path}', 'r') content = f.read() content_length = len(content) content_type =", "elif method in other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version}", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "try: f = open(f'{relative_path}/index.html', 'r') content = f.read() content_length = len(content) response =", "file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type:", "request[2][:8].strip() # get http version other_methods = [\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"DELETE\", \"TRACE\",", "request[0] path = request[1] http_version = request[2][:8].strip() # get http version other_methods =", "= request[2][:8].strip() # get http version other_methods = [\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"DELETE\",", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "directory if path[-1] == '/': try: f = open(f'{relative_path}/index.html', 'r') content = f.read()", "# get file extension response = f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8'))", "GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split(' ')", "thus # some of the code is Copyright © 2001-2013 Python Software #", "writing, software # distributed under the License is distributed on an \"AS IS\"", "500 Internal Server Error\\r\\n\", 'utf-8')) else: # not a valid path self.request.sendall(bytearray(f'{http_version} 404", "socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to localhost on port 8080", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "if http_version == 'HTTP/1.1' and \"Host\" not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type:", "content_type = path[path.rfind('.') + 1:len(path)] # get file extension response = f'{http_version} 200", "True # Create the server, binding to localhost on port 8080 server =", "License. # You may obtain a copy of the License at # #", "= request[0] path = request[1] http_version = request[2][:8].strip() # get http version other_methods", "Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8'))", "Copyright 2013 <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0", "Internal Server Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif", "= self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split(' ') try: method = request[0] path = request[1]", "== '/': try: f = open(f'{relative_path}/index.html', 'r') content = f.read() content_length = len(content)", "compliance with the License. # You may obtain a copy of the License", "when path is an existing file try: f = open(f'{relative_path}', 'r') content =", "= \"./www\" + os.path.normpath(request[1]) #normalize case of pathname if http_version == 'HTTP/1.1' and", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\",", "# coding: utf-8 import os import socketserver # Copyright 2013 <NAME>, <NAME> #", "valid path self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method in other_methods: self.request.sendall(bytearray(f\"{http_version}", "'r') content = f.read() content_length = len(content) content_type = path[path.rfind('.') + 1:len(path)] #", "+ os.path.normpath(request[1]) #normalize case of pathname if http_version == 'HTTP/1.1' and \"Host\" not", "if method == \"GET\": relative_path = \"./www\" + os.path.normpath(request[1]) #normalize case of pathname", "not use this file except in compliance with the License. # You may", "of pathname if http_version == 'HTTP/1.1' and \"Host\" not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400", "Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except IndexError:", "MyWebServer) # Activate the server; this will keep running until you # interrupt", "8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to localhost on port", "License, Version 2.0 (the \"License\"); # you may not use this file except", "405 Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\",", "'r') content = f.read() content_length = len(content) response = f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length:", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "self.data.decode(\"utf-8\").split(' ') try: method = request[0] path = request[1] http_version = request[2][:8].strip() #", "code is Copyright © 2001-2013 Python Software # Foundation; All Rights Reserved #", "a valid path self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method in other_methods:", "1:len(path)] # get file extension response = f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response,", "an existing directory if path[-1] == '/': try: f = open(f'{relative_path}/index.html', 'r') content", "Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl -v", "else: self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path): # when path", "os.path.normpath(request[1]) #normalize case of pathname if http_version == 'HTTP/1.1' and \"Host\" not in", "# you may not use this file except in compliance with the License.", "the License. # # # Furthermore it is derived from the Python documentation", "get file extension response = f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except:", "except IndexError: pass if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address", "the server, binding to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer)", "agreed to in writing, software # distributed under the License is distributed on", "# # # Furthermore it is derived from the Python documentation examples thus", "handle(self): self.data = self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split(' ') try: method = request[0] path", "some of the code is Copyright © 2001-2013 Python Software # Foundation; All", "except: # error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else:", "= \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to localhost", "(the \"License\"); # you may not use this file except in compliance with", "case of pathname if http_version == 'HTTP/1.1' and \"Host\" not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version}", "# limitations under the License. # # # Furthermore it is derived from", "# Unless required by applicable law or agreed to in writing, software #", "text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except IndexError: pass if __name__ == \"__main__\": HOST, PORT =", "by applicable law or agreed to in writing, software # distributed under the", "# # Furthermore it is derived from the Python documentation examples thus #", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "file extension response = f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: #", "= f.read() content_length = len(content) response = f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response,", "PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to", "else: self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except IndexError: pass if __name__", "Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python", "methods defined in rfc266 other than GET if method == \"GET\": relative_path =", "pass if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True", "Furthermore it is derived from the Python documentation examples thus # some of", "Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py #", "Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path): # when path is an existing", "<NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "file except in compliance with the License. # You may obtain a copy", "method = request[0] path = request[1] http_version = request[2][:8].strip() # get http version", "\"TRACE\", \"CONNECT\"] # all methods defined in rfc266 other than GET if method", "# all methods defined in rfc266 other than GET if method == \"GET\":", "# some of the code is Copyright © 2001-2013 Python Software # Foundation;", "curl -v -X GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() request", "'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except IndexError: pass if", "License for the specific language governing permissions and # limitations under the License.", "socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will keep running until you", "to in writing, software # distributed under the License is distributed on an", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path): # when path is an existing file try:", "# error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version}", "other_methods = [\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"] # all methods defined", "404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method in other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method Not", "in rfc266 other than GET if method == \"GET\": relative_path = \"./www\" +", "'utf-8')) elif os.path.isfile(relative_path): # when path is an existing file try: f =", "or implied. # See the License for the specific language governing permissions and", "documentation examples thus # some of the code is Copyright © 2001-2013 Python", "than GET if method == \"GET\": relative_path = \"./www\" + os.path.normpath(request[1]) #normalize case", "f = open(f'{relative_path}', 'r') content = f.read() content_length = len(content) content_type = path[path.rfind('.')", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "{path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path): # when path is an existing file try: f", "localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server;", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "import socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed under the Apache", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "all methods defined in rfc266 other than GET if method == \"GET\": relative_path", "#normalize case of pathname if http_version == 'HTTP/1.1' and \"Host\" not in self.data.decode():", "© 2001-2013 Python Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html #", "text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path): # when path is an existing directory if", "Python Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run:", "'utf-8')) elif os.path.isdir(relative_path): # when path is an existing directory if path[-1] ==", "response = f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading", "close\\r\\n\\r\\n\", 'utf-8')) except IndexError: pass if __name__ == \"__main__\": HOST, PORT = \"localhost\",", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method in other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8'))", "an existing file try: f = open(f'{relative_path}', 'r') content = f.read() content_length =", "error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301", "= len(content) response = f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: #", "use this file except in compliance with the License. # You may obtain", "coding: utf-8 import os import socketserver # Copyright 2013 <NAME>, <NAME> # #", "8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will keep", "self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path): # when path is", "Server Error\\r\\n\", 'utf-8')) else: # not a valid path self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type:", "it is derived from the Python documentation examples thus # some of the", "is Copyright © 2001-2013 Python Software # Foundation; All Rights Reserved # #", "# get http version other_methods = [\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"]", "derived from the Python documentation examples thus # some of the code is", "freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "2001-2013 Python Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # #", "= open(f'{relative_path}', 'r') content = f.read() content_length = len(content) content_type = path[path.rfind('.') +", "in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path): # when", "version other_methods = [\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"] # all methods", "HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding", "2.0 (the \"License\"); # you may not use this file except in compliance", "\"Host\" not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path):", "Create the server, binding to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT),", "for the specific language governing permissions and # limitations under the License. #", "self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n',", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path): # when path is an", "IndexError: pass if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address =", "os.path.isdir(relative_path): # when path is an existing directory if path[-1] == '/': try:", "\"GET\": relative_path = \"./www\" + os.path.normpath(request[1]) #normalize case of pathname if http_version ==", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "f.read() content_length = len(content) content_type = path[path.rfind('.') + 1:len(path)] # get file extension", "error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: # not", "self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method in other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method", "self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: # not a valid path self.request.sendall(bytearray(f'{http_version}", "# not a valid path self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method", "either express or implied. # See the License for the specific language governing", "not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path): #", "content = f.read() content_length = len(content) response = f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}'", "file try: f = open(f'{relative_path}', 'r') content = f.read() content_length = len(content) content_type", "'utf-8')) elif method in other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else:", "under the License. # # # Furthermore it is derived from the Python", "self.data = self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split(' ') try: method = request[0] path =", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "pathname if http_version == 'HTTP/1.1' and \"Host\" not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad", "= [\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"] # all methods defined in", "examples thus # some of the code is Copyright © 2001-2013 Python Software", "http version other_methods = [\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"] # all", "the Python documentation examples thus # some of the code is Copyright ©", "# # run: python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ class", "Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method in other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\",", "import os import socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed under", "is an existing file try: f = open(f'{relative_path}', 'r') content = f.read() content_length", "the License. # You may obtain a copy of the License at #", "MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split(' ') try: method =", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "os import socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed under the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "try: method = request[0] path = request[1] http_version = request[2][:8].strip() # get http", "else: # not a valid path self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif", "# Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py", "All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try:", "reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301 Moved", "f = open(f'{relative_path}/index.html', 'r') content = f.read() content_length = len(content) response = f'{http_version}", "self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split(' ') try: method = request[0] path = request[1] http_version", "Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except IndexError: pass if __name__ == \"__main__\": HOST, PORT", "# Copyright 2013 <NAME>, <NAME> # # Licensed under the Apache License, Version", "\"CONNECT\"] # all methods defined in rfc266 other than GET if method ==", "method == \"GET\": relative_path = \"./www\" + os.path.normpath(request[1]) #normalize case of pathname if", "Error\\r\\n\", 'utf-8')) else: # not a valid path self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n',", "with the License. # You may obtain a copy of the License at", "[\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\", \"DELETE\", \"TRACE\", \"CONNECT\"] # all methods defined in rfc266", "relative_path = \"./www\" + os.path.normpath(request[1]) #normalize case of pathname if http_version == 'HTTP/1.1'", "open(f'{relative_path}/index.html', 'r') content = f.read() content_length = len(content) response = f'{http_version} 200 OK\\r\\nContent-Type:", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "path[path.rfind('.') + 1:len(path)] # get file extension response = f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length:", "binding to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate", "f.read() content_length = len(content) response = f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8'))", "Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except IndexError: pass if __name__ == \"__main__\": HOST,", "text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal", "law or agreed to in writing, software # distributed under the License is", "Python documentation examples thus # some of the code is Copyright © 2001-2013", "the License for the specific language governing permissions and # limitations under the", "not a valid path self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method in", "f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "-v -X GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() request =", "Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl", "in other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400 Bad", "from the Python documentation examples thus # some of the code is Copyright", "\"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server,", "'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path): # when", "Copyright © 2001-2013 Python Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html", "socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed under the Apache License,", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will keep running", "self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except IndexError: pass if __name__ ==", "\"./www\" + os.path.normpath(request[1]) #normalize case of pathname if http_version == 'HTTP/1.1' and \"Host\"", "self.request.sendall(bytearray(f\"{http_version} 405 Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection:", "# Furthermore it is derived from the Python documentation examples thus # some", "request[1] http_version = request[2][:8].strip() # get http version other_methods = [\"OPTIONS\", \"HEAD\", \"POST\",", "See the License for the specific language governing permissions and # limitations under", "# # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl -v -X", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type:", "Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path): # when path is an existing file", "http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split(' ') try:", "# error reading file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: #", "elif os.path.isfile(relative_path): # when path is an existing file try: f = open(f'{relative_path}',", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "\"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to localhost on", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "and \"Host\" not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif", "'utf-8')) except IndexError: pass if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080", "extension response = f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error", "== \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the", "== \"GET\": relative_path = \"./www\" + os.path.normpath(request[1]) #normalize case of pathname if http_version", "400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path): # when path is an", "file occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: # not a valid", "specific language governing permissions and # limitations under the License. # # #", "self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) elif os.path.isdir(relative_path): # when path is", "= f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file", "-X GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split('", "License. # # # Furthermore it is derived from the Python documentation examples", "= len(content) content_type = path[path.rfind('.') + 1:len(path)] # get file extension response =", "def handle(self): self.data = self.request.recv(1024).strip() request = self.data.decode(\"utf-8\").split(' ') try: method = request[0]", "<reponame>vhnguyen0707/CMPUT404-assignment-webserver # coding: utf-8 import os import socketserver # Copyright 2013 <NAME>, <NAME>", "and # limitations under the License. # # # Furthermore it is derived", "Version 2.0 (the \"License\"); # you may not use this file except in", "= open(f'{relative_path}/index.html', 'r') content = f.read() content_length = len(content) response = f'{http_version} 200", "except in compliance with the License. # You may obtain a copy of", "# Create the server, binding to localhost on port 8080 server = socketserver.TCPServer((HOST,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "2013 <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# Activate the server; this will keep running until you # interrupt the", "'HTTP/1.1' and \"Host\" not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8'))", "# when path is an existing directory if path[-1] == '/': try: f", "the code is Copyright © 2001-2013 Python Software # Foundation; All Rights Reserved", "server; this will keep running until you # interrupt the program with Ctrl-C", "http_version = request[2][:8].strip() # get http version other_methods = [\"OPTIONS\", \"HEAD\", \"POST\", \"PUT\",", "response = f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading", "try: curl -v -X GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip()", "to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the", "\"DELETE\", \"TRACE\", \"CONNECT\"] # all methods defined in rfc266 other than GET if", "400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except IndexError: pass if __name__ == \"__main__\":", "# run: python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler):", "f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured", "method in other_methods: self.request.sendall(bytearray(f\"{http_version} 405 Method Not Allowed\\r\\nContent-Type: text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400", "text/html\\r\\n\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection: close\\r\\n\\r\\n\", 'utf-8')) except IndexError: pass", "http_version == 'HTTP/1.1' and \"Host\" not in self.data.decode(): self.request.sendall(bytearray(f\"{http_version} 400 Bad Request\\r\\nContent-Type: text/html\\r\\nConnection:", "# try: curl -v -X GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data =", "+ 1:len(path)] # get file extension response = f'{http_version} 200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}'", "= True # Create the server, binding to localhost on port 8080 server", "the specific language governing permissions and # limitations under the License. # #", "= self.data.decode(\"utf-8\").split(' ') try: method = request[0] path = request[1] http_version = request[2][:8].strip()", "Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation: {path}/\\r\\n\\r\\n', 'utf-8')) elif os.path.isfile(relative_path): #", "200 OK\\r\\nContent-Type: text/{content_type}\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error reading file occured self.request.sendall(bytearray(f\"{http_version}", "is an existing directory if path[-1] == '/': try: f = open(f'{relative_path}/index.html', 'r')", "open(f'{relative_path}', 'r') content = f.read() content_length = len(content) content_type = path[path.rfind('.') + 1:len(path)]", "rfc266 other than GET if method == \"GET\": relative_path = \"./www\" + os.path.normpath(request[1])", "python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ class MyWebServer(socketserver.BaseRequestHandler): def handle(self):", "elif os.path.isdir(relative_path): # when path is an existing directory if path[-1] == '/':", "content = f.read() content_length = len(content) content_type = path[path.rfind('.') + 1:len(path)] # get", "path self.request.sendall(bytearray(f'{http_version} 404 Not Found\\r\\nContent-Type: text/html\\r\\nConnection:Close\\r\\n\\r\\n', 'utf-8')) elif method in other_methods: self.request.sendall(bytearray(f\"{http_version} 405", "GET if method == \"GET\": relative_path = \"./www\" + os.path.normpath(request[1]) #normalize case of", "len(content) response = f'{http_version} 200 OK\\r\\nContent-Type: text/html\\r\\nContent-Length: {content_length}\\r\\n\\r\\n{content}' self.request.sendall(bytearray(response, 'utf-8')) except: # error", "occured self.request.sendall(bytearray(f\"{http_version} 500 Internal Server Error\\r\\n\", 'utf-8')) else: self.request.sendall(bytearray(f'{http_version} 301 Moved Permanently\\r\\nContent-Type: text/html\\r\\nLocation:", "the server; this will keep running until you # interrupt the program with", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "path = request[1] http_version = request[2][:8].strip() # get http version other_methods = [\"OPTIONS\",", "content_length = len(content) content_type = path[path.rfind('.') + 1:len(path)] # get file extension response", "limitations under the License. # # # Furthermore it is derived from the", "request = self.data.decode(\"utf-8\").split(' ') try: method = request[0] path = request[1] http_version =" ]
[ "= df1 out = df2; by a; fin = \"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase):", "18 12:54:30 2014 @author: Chapman \"\"\" import unittest from stan.proc import proc_parse cstr", "on Sat Jan 18 12:54:30 2014 @author: Chapman \"\"\" import unittest from stan.proc", "\"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() == \"df2=describe.describe(data=df1,by='a')\") self.assertTrue(proc_parse.proc_parse(cstr1).strip() == \"df2=describe.describe(data=df1,by='a',fin='/usr/test.text')\") if", "Created on Sat Jan 18 12:54:30 2014 @author: Chapman \"\"\" import unittest from", "class TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() == \"df2=describe.describe(data=df1,by='a')\") self.assertTrue(proc_parse.proc_parse(cstr1).strip() == \"df2=describe.describe(data=df1,by='a',fin='/usr/test.text')\") if __name__ ==", "by a; run;\"\"\" cstr1 = \"\"\"proc describe data = df1 out = df2;", "import unittest from stan.proc import proc_parse cstr = \"\"\"proc describe data = df1", "out = df2; by a; fin = \"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase): def test_dataset_opt(self):", "df2; by a; fin = \"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() ==", "= df2; by a; run;\"\"\" cstr1 = \"\"\"proc describe data = df1 out", "a; run;\"\"\" cstr1 = \"\"\"proc describe data = df1 out = df2; by", "fin = \"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() == \"df2=describe.describe(data=df1,by='a')\") self.assertTrue(proc_parse.proc_parse(cstr1).strip() ==", "def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() == \"df2=describe.describe(data=df1,by='a')\") self.assertTrue(proc_parse.proc_parse(cstr1).strip() == \"df2=describe.describe(data=df1,by='a',fin='/usr/test.text')\") if __name__ == '__main__': unittest.main()", "Jan 18 12:54:30 2014 @author: Chapman \"\"\" import unittest from stan.proc import proc_parse", "\"\"\"proc describe data = df1 out = df2; by a; fin = \"/usr/test.text\";", "df2; by a; run;\"\"\" cstr1 = \"\"\"proc describe data = df1 out =", "-*- \"\"\" Created on Sat Jan 18 12:54:30 2014 @author: Chapman \"\"\" import", "Chapman \"\"\" import unittest from stan.proc import proc_parse cstr = \"\"\"proc describe data", "data = df1 out = df2; by a; fin = \"/usr/test.text\"; quit;\"\"\" class", "@author: Chapman \"\"\" import unittest from stan.proc import proc_parse cstr = \"\"\"proc describe", "cstr = \"\"\"proc describe data = df1 out = df2; by a; run;\"\"\"", "# -*- coding: utf-8 -*- \"\"\" Created on Sat Jan 18 12:54:30 2014", "stan.proc import proc_parse cstr = \"\"\"proc describe data = df1 out = df2;", "cstr1 = \"\"\"proc describe data = df1 out = df2; by a; fin", "-*- coding: utf-8 -*- \"\"\" Created on Sat Jan 18 12:54:30 2014 @author:", "proc_parse cstr = \"\"\"proc describe data = df1 out = df2; by a;", "a; fin = \"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() == \"df2=describe.describe(data=df1,by='a')\") self.assertTrue(proc_parse.proc_parse(cstr1).strip()", "TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() == \"df2=describe.describe(data=df1,by='a')\") self.assertTrue(proc_parse.proc_parse(cstr1).strip() == \"df2=describe.describe(data=df1,by='a',fin='/usr/test.text')\") if __name__ == '__main__':", "2014 @author: Chapman \"\"\" import unittest from stan.proc import proc_parse cstr = \"\"\"proc", "data = df1 out = df2; by a; run;\"\"\" cstr1 = \"\"\"proc describe", "out = df2; by a; run;\"\"\" cstr1 = \"\"\"proc describe data = df1", "\"\"\"proc describe data = df1 out = df2; by a; run;\"\"\" cstr1 =", "= \"\"\"proc describe data = df1 out = df2; by a; fin =", "= df1 out = df2; by a; run;\"\"\" cstr1 = \"\"\"proc describe data", "describe data = df1 out = df2; by a; fin = \"/usr/test.text\"; quit;\"\"\"", "import proc_parse cstr = \"\"\"proc describe data = df1 out = df2; by", "describe data = df1 out = df2; by a; run;\"\"\" cstr1 = \"\"\"proc", "from stan.proc import proc_parse cstr = \"\"\"proc describe data = df1 out =", "Sat Jan 18 12:54:30 2014 @author: Chapman \"\"\" import unittest from stan.proc import", "quit;\"\"\" class TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() == \"df2=describe.describe(data=df1,by='a')\") self.assertTrue(proc_parse.proc_parse(cstr1).strip() == \"df2=describe.describe(data=df1,by='a',fin='/usr/test.text')\") if __name__", "12:54:30 2014 @author: Chapman \"\"\" import unittest from stan.proc import proc_parse cstr =", "df1 out = df2; by a; fin = \"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase): def", "by a; fin = \"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() == \"df2=describe.describe(data=df1,by='a')\")", "\"\"\" Created on Sat Jan 18 12:54:30 2014 @author: Chapman \"\"\" import unittest", "df1 out = df2; by a; run;\"\"\" cstr1 = \"\"\"proc describe data =", "= df2; by a; fin = \"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip()", "= \"/usr/test.text\"; quit;\"\"\" class TestDesc(unittest.TestCase): def test_dataset_opt(self): self.assertTrue(proc_parse.proc_parse(cstr).strip() == \"df2=describe.describe(data=df1,by='a')\") self.assertTrue(proc_parse.proc_parse(cstr1).strip() == \"df2=describe.describe(data=df1,by='a',fin='/usr/test.text')\")", "<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\" Created on Sat Jan 18 12:54:30", "unittest from stan.proc import proc_parse cstr = \"\"\"proc describe data = df1 out", "utf-8 -*- \"\"\" Created on Sat Jan 18 12:54:30 2014 @author: Chapman \"\"\"", "coding: utf-8 -*- \"\"\" Created on Sat Jan 18 12:54:30 2014 @author: Chapman", "run;\"\"\" cstr1 = \"\"\"proc describe data = df1 out = df2; by a;", "\"\"\" import unittest from stan.proc import proc_parse cstr = \"\"\"proc describe data =", "= \"\"\"proc describe data = df1 out = df2; by a; run;\"\"\" cstr1" ]
[ "INSTALLED_APPS = ( 'tests', ) MIDDLEWARE_CLASSES = () DATABASES = { 'default': {", "= 'so-secret' INSTALLED_APPS = ( 'tests', ) MIDDLEWARE_CLASSES = () DATABASES = {", "= () DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } }", "SECRET_KEY = 'so-secret' INSTALLED_APPS = ( 'tests', ) MIDDLEWARE_CLASSES = () DATABASES =", "= ( 'tests', ) MIDDLEWARE_CLASSES = () DATABASES = { 'default': { 'ENGINE':", "'tests', ) MIDDLEWARE_CLASSES = () DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME':", "MIDDLEWARE_CLASSES = () DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', }", "'so-secret' INSTALLED_APPS = ( 'tests', ) MIDDLEWARE_CLASSES = () DATABASES = { 'default':", ") MIDDLEWARE_CLASSES = () DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3',", "( 'tests', ) MIDDLEWARE_CLASSES = () DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3',", "<reponame>jamesturk/django-mergeobject<gh_stars>0 SECRET_KEY = 'so-secret' INSTALLED_APPS = ( 'tests', ) MIDDLEWARE_CLASSES = () DATABASES" ]
[ "break update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope def main(): global IMG # read in the", "x1 == x2: for y in range(min(y1,y2), max(y1,y2)+1): if not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a)", "in_bounds(p): global IMG return p.x > 0 and p.x < IMG.shape[1] and p.y", "i in xrange(IMG.shape[0]): for j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw all the halls", "# draw all the halls for splitLine in splitLines: if splitLine[0] != \"Hallway\":", "line in sys.stdin: splitLines += [line.strip().split(' ')] # start transparent for i in", "IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p): global IMG return p.x > 0 and p.x <", "= float(y2 - y1) / float(x2 - x1) if math.fabs(slope) > 1.0: slope", "/ slope if y1 > y2: y1, y2 = y2, y1 x1, x2", "if math.fabs(slope) > 1.0: slope = 1.0 / slope if y1 > y2:", "for splitLine in splitLines: if splitLine[0] != \"Hallway\": continue point0 = splitLines[int(splitLine[1])] point1", "[line.strip().split(' ')] # start transparent for i in xrange(IMG.shape[0]): for j in xrange(IMG.shape[1]):", "- y1) / float(x2 - x1) if math.fabs(slope) > 1.0: slope = 1.0", "> y2: y1, y2 = y2, y1 x1, x2 = x2, x1 x", "= [] for line in sys.stdin: splitLines += [line.strip().split(' ')] # start transparent", "in sys.stdin: splitLines += [line.strip().split(' ')] # start transparent for i in xrange(IMG.shape[0]):", "file_handle = open(fn, 'wb') w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2])))", "in xrange(IMG.shape[0]): for j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw all the halls for", "for y in range(min(y1,y2), max(y1,y2)+1): if not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return slope =", "<gh_stars>1-10 #!/usr/bin/python2 import imageio import png import numpy as np import sys import", "global IMG return p.x > 0 and p.x < IMG.shape[1] and p.y >", "left to right if x1 > x2: x1, x2 = x2, x1 y1,", "- x1) if math.fabs(slope) > 1.0: slope = 1.0 / slope if y1", "import sys import math from collections import namedtuple if len(sys.argv) != 2: print(\"%s", "to right if x1 > x2: x1, x2 = x2, x1 y1, y2", "not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x += slope return # make it definitely go", "= x2, x1 y1, y2 = y2, y1 y = float(y1) for x", "in range(y1, y2+1): if not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x += slope return #", "p.y < IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): # straight line up and down if x1", "!= 2: print(\"%s <campusMap.png>\" % sys.argv[0]) exit() IMG = imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"])", "0 and p.y < IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): # straight line up and down", "splitLine[0] != \"Hallway\": continue point0 = splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]),", "y1) / float(x2 - x1) if math.fabs(slope) > 1.0: slope = 1.0 /", "y2: y1, y2 = y2, y1 x1, x2 = x2, x1 x =", "splitLines += [line.strip().split(' ')] # start transparent for i in xrange(IMG.shape[0]): for j", "IMG return p.x > 0 and p.x < IMG.shape[1] and p.y > 0", "straight line up and down if x1 == x2: for y in range(min(y1,y2),", "y += slope def main(): global IMG # read in the file splitLines", "png import numpy as np import sys import math from collections import namedtuple", "temp[i] def write_png(fn): global IMG file_handle = open(fn, 'wb') w = png.Writer(IMG.shape[1], IMG.shape[0],", "and p.y < IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): # straight line up and down if", "x2, x1 y1, y2 = y2, y1 y = float(y1) for x in", "> 1.0: slope = 1.0 / slope if y1 > y2: y1, y2", "down if x1 == x2: for y in range(min(y1,y2), max(y1,y2)+1): if not in_bounds(Point(x1,y)):", "# start transparent for i in xrange(IMG.shape[0]): for j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) #", "= imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global IMG temp = [r,g,b,a] for", "png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p): global IMG return", "and p.y > 0 and p.y < IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): # straight line", "line up and down if x1 == x2: for y in range(min(y1,y2), max(y1,y2)+1):", "all the halls for splitLine in splitLines: if splitLine[0] != \"Hallway\": continue point0", "halls for splitLine in splitLines: if splitLine[0] != \"Hallway\": continue point0 = splitLines[int(splitLine[1])]", "namedtuple if len(sys.argv) != 2: print(\"%s <campusMap.png>\" % sys.argv[0]) exit() IMG = imageio.imread(sys.argv[1])", "IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): # straight line up and down if x1 == x2:", "w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p): global", "/ float(x2 - x1) if math.fabs(slope) > 1.0: slope = 1.0 / slope", "x1 > x2: x1, x2 = x2, x1 y1, y2 = y2, y1", "for x in range(x1, x2+1): if not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope", "x2, x1 x = float(x1) for y in range(y1, y2+1): if not in_bounds(Point(int(x),y)):", "def main(): global IMG # read in the file splitLines = [] for", "main(): global IMG # read in the file splitLines = [] for line", "= splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]), int(point0[2]), \\ int(point1[3]), int(point1[2]), \\ 0, 255, 255,", "y1, y2 = y2, y1 x1, x2 = x2, x1 x = float(x1)", "= y2, y1 x1, x2 = x2, x1 x = float(x1) for y", "return slope = float(y2 - y1) / float(x2 - x1) if math.fabs(slope) >", "splitLines = [] for line in sys.stdin: splitLines += [line.strip().split(' ')] # start", "slope = 1.0 / slope if y1 > y2: y1, y2 = y2,", "if not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x += slope return # make it definitely", "x2 = x2, x1 x = float(x1) for y in range(y1, y2+1): if", "IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p): global IMG return p.x", "y1 > y2: y1, y2 = y2, y1 x1, x2 = x2, x1", "math from collections import namedtuple if len(sys.argv) != 2: print(\"%s <campusMap.png>\" % sys.argv[0])", "it definitely go left to right if x1 > x2: x1, x2 =", "y = float(y1) for x in range(x1, x2+1): if not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a)", "for i in xrange(len(temp)): IMG[row,col,i] = temp[i] def write_png(fn): global IMG file_handle =", "imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global IMG temp = [r,g,b,a] for i", "continue point0 = splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]), int(point0[2]), \\ int(point1[3]),", "# read in the file splitLines = [] for line in sys.stdin: splitLines", "= float(x1) for y in range(y1, y2+1): if not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x", "float(x1) for y in range(y1, y2+1): if not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x +=", "splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]), int(point0[2]), \\ int(point1[3]), int(point1[2]), \\ 0, 255, 255, 255", "= open(fn, 'wb') w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close()", "transparent for i in xrange(IMG.shape[0]): for j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw all", "the halls for splitLine in splitLines: if splitLine[0] != \"Hallway\": continue point0 =", "float(y2 - y1) / float(x2 - x1) if math.fabs(slope) > 1.0: slope =", "in range(x1, x2+1): if not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope def main():", "x1 y1, y2 = y2, y1 y = float(y1) for x in range(x1,", "y2 = y2, y1 x1, x2 = x2, x1 x = float(x1) for", "up and down if x1 == x2: for y in range(min(y1,y2), max(y1,y2)+1): if", "slope if y1 > y2: y1, y2 = y2, y1 x1, x2 =", "return # make it definitely go left to right if x1 > x2:", "!= \"Hallway\": continue point0 = splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]), int(point0[2]),", "splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]), int(point0[2]), \\ int(point1[3]), int(point1[2]), \\ 0,", "global IMG temp = [r,g,b,a] for i in xrange(len(temp)): IMG[row,col,i] = temp[i] def", "if not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope def main(): global IMG #", "and down if x1 == x2: for y in range(min(y1,y2), max(y1,y2)+1): if not", "sys.stdin: splitLines += [line.strip().split(' ')] # start transparent for i in xrange(IMG.shape[0]): for", "p.x > 0 and p.x < IMG.shape[1] and p.y > 0 and p.y", "in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x += slope return # make it definitely go left", "import png import numpy as np import sys import math from collections import", "def in_bounds(p): global IMG return p.x > 0 and p.x < IMG.shape[1] and", "in range(min(y1,y2), max(y1,y2)+1): if not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return slope = float(y2 -", "draw all the halls for splitLine in splitLines: if splitLine[0] != \"Hallway\": continue", "open(fn, 'wb') w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def", "xrange(len(temp)): IMG[row,col,i] = temp[i] def write_png(fn): global IMG file_handle = open(fn, 'wb') w", "> 0 and p.y < IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): # straight line up and", "float(x2 - x1) if math.fabs(slope) > 1.0: slope = 1.0 / slope if", "if splitLine[0] != \"Hallway\": continue point0 = splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])] draw_line( \\", "<campusMap.png>\" % sys.argv[0]) exit() IMG = imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global", "x2: for y in range(min(y1,y2), max(y1,y2)+1): if not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return slope", "< IMG.shape[1] and p.y > 0 and p.y < IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): #", "if not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return slope = float(y2 - y1) / float(x2", "def write_png(fn): global IMG file_handle = open(fn, 'wb') w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True)", "1.0 / slope if y1 > y2: y1, y2 = y2, y1 x1,", "update_pixel(y,int(x),r,g,b,a) x += slope return # make it definitely go left to right", "xrange(IMG.shape[0]): for j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw all the halls for splitLine", "len(sys.argv) != 2: print(\"%s <campusMap.png>\" % sys.argv[0]) exit() IMG = imageio.imread(sys.argv[1]) Point =", "definitely go left to right if x1 > x2: x1, x2 = x2,", "update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope def main(): global IMG # read in the file", "% sys.argv[0]) exit() IMG = imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global IMG", "x in range(x1, x2+1): if not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope def", "slope = float(y2 - y1) / float(x2 - x1) if math.fabs(slope) > 1.0:", "= png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p): global IMG", "> 0 and p.x < IMG.shape[1] and p.y > 0 and p.y <", "0 and p.x < IMG.shape[1] and p.y > 0 and p.y < IMG.shape[0]", "Point = namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global IMG temp = [r,g,b,a] for i in", "[] for line in sys.stdin: splitLines += [line.strip().split(' ')] # start transparent for", "global IMG # read in the file splitLines = [] for line in", "range(min(y1,y2), max(y1,y2)+1): if not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return slope = float(y2 - y1)", "+= slope return # make it definitely go left to right if x1", "def update_pixel(row,col,r,g,b,a): global IMG temp = [r,g,b,a] for i in xrange(len(temp)): IMG[row,col,i] =", "print(\"%s <campusMap.png>\" % sys.argv[0]) exit() IMG = imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a):", "global IMG file_handle = open(fn, 'wb') w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG,", "w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p): global IMG return p.x > 0", "1.0: slope = 1.0 / slope if y1 > y2: y1, y2 =", "x2 = x2, x1 y1, y2 = y2, y1 y = float(y1) for", "x1 x = float(x1) for y in range(y1, y2+1): if not in_bounds(Point(int(x),y)): break", "range(y1, y2+1): if not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x += slope return # make", "the file splitLines = [] for line in sys.stdin: splitLines += [line.strip().split(' ')]", "np import sys import math from collections import namedtuple if len(sys.argv) != 2:", "collections import namedtuple if len(sys.argv) != 2: print(\"%s <campusMap.png>\" % sys.argv[0]) exit() IMG", "== x2: for y in range(min(y1,y2), max(y1,y2)+1): if not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return", "y1 x1, x2 = x2, x1 x = float(x1) for y in range(y1,", "for line in sys.stdin: splitLines += [line.strip().split(' ')] # start transparent for i", "x += slope return # make it definitely go left to right if", "in xrange(len(temp)): IMG[row,col,i] = temp[i] def write_png(fn): global IMG file_handle = open(fn, 'wb')", "exit() IMG = imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global IMG temp =", "draw_line( \\ int(point0[3]), int(point0[2]), \\ int(point1[3]), int(point1[2]), \\ 0, 255, 255, 255 \\", "p.x < IMG.shape[1] and p.y > 0 and p.y < IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a):", "i in xrange(len(temp)): IMG[row,col,i] = temp[i] def write_png(fn): global IMG file_handle = open(fn,", "x = float(x1) for y in range(y1, y2+1): if not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a)", "x1) if math.fabs(slope) > 1.0: slope = 1.0 / slope if y1 >", "temp = [r,g,b,a] for i in xrange(len(temp)): IMG[row,col,i] = temp[i] def write_png(fn): global", "< IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): # straight line up and down if x1 ==", "break update_pixel(y,x1,r,g,b,a) return slope = float(y2 - y1) / float(x2 - x1) if", "j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw all the halls for splitLine in splitLines:", "return p.x > 0 and p.x < IMG.shape[1] and p.y > 0 and", "IMG = imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global IMG temp = [r,g,b,a]", "splitLines: if splitLine[0] != \"Hallway\": continue point0 = splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])] draw_line(", "# make it definitely go left to right if x1 > x2: x1,", "right if x1 > x2: x1, x2 = x2, x1 y1, y2 =", "= float(y1) for x in range(x1, x2+1): if not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y", "from collections import namedtuple if len(sys.argv) != 2: print(\"%s <campusMap.png>\" % sys.argv[0]) exit()", "int(point0[2]), \\ int(point1[3]), int(point1[2]), \\ 0, 255, 255, 255 \\ ) write_png(\"halls.png\") main()", "in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw all the halls for splitLine in splitLines: if", "x2+1): if not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope def main(): global IMG", "if x1 == x2: for y in range(min(y1,y2), max(y1,y2)+1): if not in_bounds(Point(x1,y)): break", "make it definitely go left to right if x1 > x2: x1, x2", "sys.argv[0]) exit() IMG = imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global IMG temp", "x1, x2 = x2, x1 x = float(x1) for y in range(y1, y2+1):", "xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw all the halls for splitLine in splitLines: if splitLine[0]", "slope def main(): global IMG # read in the file splitLines = []", "\\ int(point0[3]), int(point0[2]), \\ int(point1[3]), int(point1[2]), \\ 0, 255, 255, 255 \\ )", "import imageio import png import numpy as np import sys import math from", "y in range(y1, y2+1): if not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x += slope return", "np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p): global IMG return p.x > 0 and", "2: print(\"%s <campusMap.png>\" % sys.argv[0]) exit() IMG = imageio.imread(sys.argv[1]) Point = namedtuple(\"Point\",[\"x\",\"y\"]) def", "IMG[row,col,i] = temp[i] def write_png(fn): global IMG file_handle = open(fn, 'wb') w =", "math.fabs(slope) > 1.0: slope = 1.0 / slope if y1 > y2: y1,", "'wb') w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p):", "x2: x1, x2 = x2, x1 y1, y2 = y2, y1 y =", "point0 = splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]), int(point0[2]), \\ int(point1[3]), int(point1[2]),", "[r,g,b,a] for i in xrange(len(temp)): IMG[row,col,i] = temp[i] def write_png(fn): global IMG file_handle", "not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return slope = float(y2 - y1) / float(x2 -", "if y1 > y2: y1, y2 = y2, y1 x1, x2 = x2,", "in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope def main(): global IMG # read in", "#!/usr/bin/python2 import imageio import png import numpy as np import sys import math", "p.y > 0 and p.y < IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): # straight line up", "import numpy as np import sys import math from collections import namedtuple if", "int(point0[3]), int(point0[2]), \\ int(point1[3]), int(point1[2]), \\ 0, 255, 255, 255 \\ ) write_png(\"halls.png\")", "y2 = y2, y1 y = float(y1) for x in range(x1, x2+1): if", "in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return slope = float(y2 - y1) / float(x2 - x1)", "y2, y1 y = float(y1) for x in range(x1, x2+1): if not in_bounds(Point(x,int(y))):", "range(x1, x2+1): if not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope def main(): global", "y1 y = float(y1) for x in range(x1, x2+1): if not in_bounds(Point(x,int(y))): break", "y2, y1 x1, x2 = x2, x1 x = float(x1) for y in", "+= [line.strip().split(' ')] # start transparent for i in xrange(IMG.shape[0]): for j in", "def draw_line(x1,y1,x2,y2,r,g,b,a): # straight line up and down if x1 == x2: for", "sys import math from collections import namedtuple if len(sys.argv) != 2: print(\"%s <campusMap.png>\"", "for j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw all the halls for splitLine in", "= 1.0 / slope if y1 > y2: y1, y2 = y2, y1", "file_handle.close() def in_bounds(p): global IMG return p.x > 0 and p.x < IMG.shape[1]", "update_pixel(i,j,0,0,0,0) # draw all the halls for splitLine in splitLines: if splitLine[0] !=", "\"Hallway\": continue point0 = splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]), int(point0[2]), \\", "import math from collections import namedtuple if len(sys.argv) != 2: print(\"%s <campusMap.png>\" %", "namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global IMG temp = [r,g,b,a] for i in xrange(len(temp)): IMG[row,col,i]", "import namedtuple if len(sys.argv) != 2: print(\"%s <campusMap.png>\" % sys.argv[0]) exit() IMG =", "max(y1,y2)+1): if not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return slope = float(y2 - y1) /", "# straight line up and down if x1 == x2: for y in", "IMG temp = [r,g,b,a] for i in xrange(len(temp)): IMG[row,col,i] = temp[i] def write_png(fn):", "')] # start transparent for i in xrange(IMG.shape[0]): for j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0)", "y in range(min(y1,y2), max(y1,y2)+1): if not in_bounds(Point(x1,y)): break update_pixel(y,x1,r,g,b,a) return slope = float(y2", "float(y1) for x in range(x1, x2+1): if not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y +=", "= x2, x1 x = float(x1) for y in range(y1, y2+1): if not", "in splitLines: if splitLine[0] != \"Hallway\": continue point0 = splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])]", "and p.x < IMG.shape[1] and p.y > 0 and p.y < IMG.shape[0] def", "slope return # make it definitely go left to right if x1 >", "IMG.shape[1] and p.y > 0 and p.y < IMG.shape[0] def draw_line(x1,y1,x2,y2,r,g,b,a): # straight", "file splitLines = [] for line in sys.stdin: splitLines += [line.strip().split(' ')] #", "update_pixel(row,col,r,g,b,a): global IMG temp = [r,g,b,a] for i in xrange(len(temp)): IMG[row,col,i] = temp[i]", "draw_line(x1,y1,x2,y2,r,g,b,a): # straight line up and down if x1 == x2: for y", "= y2, y1 y = float(y1) for x in range(x1, x2+1): if not", "(IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p): global IMG return p.x > 0 and p.x", "if x1 > x2: x1, x2 = x2, x1 y1, y2 = y2,", "IMG file_handle = open(fn, 'wb') w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0],", "for i in xrange(IMG.shape[0]): for j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw all the", "if len(sys.argv) != 2: print(\"%s <campusMap.png>\" % sys.argv[0]) exit() IMG = imageio.imread(sys.argv[1]) Point", "go left to right if x1 > x2: x1, x2 = x2, x1", "+= slope def main(): global IMG # read in the file splitLines =", "imageio import png import numpy as np import sys import math from collections", "start transparent for i in xrange(IMG.shape[0]): for j in xrange(IMG.shape[1]): update_pixel(i,j,0,0,0,0) # draw", "for y in range(y1, y2+1): if not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x += slope", "IMG # read in the file splitLines = [] for line in sys.stdin:", "point1 = splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]), int(point0[2]), \\ int(point1[3]), int(point1[2]), \\ 0, 255,", "x1, x2 = x2, x1 y1, y2 = y2, y1 y = float(y1)", "write_png(fn): global IMG file_handle = open(fn, 'wb') w = png.Writer(IMG.shape[1], IMG.shape[0], alpha=True) w.write(file_handle,", "numpy as np import sys import math from collections import namedtuple if len(sys.argv)", "y1, y2 = y2, y1 y = float(y1) for x in range(x1, x2+1):", "not in_bounds(Point(x,int(y))): break update_pixel(int(math.floor(y)),x,r,g,b,a) y += slope def main(): global IMG # read", "update_pixel(y,x1,r,g,b,a) return slope = float(y2 - y1) / float(x2 - x1) if math.fabs(slope)", "> x2: x1, x2 = x2, x1 y1, y2 = y2, y1 y", "= namedtuple(\"Point\",[\"x\",\"y\"]) def update_pixel(row,col,r,g,b,a): global IMG temp = [r,g,b,a] for i in xrange(len(temp)):", "= splitLines[int(splitLine[1])] point1 = splitLines[int(splitLine[2])] draw_line( \\ int(point0[3]), int(point0[2]), \\ int(point1[3]), int(point1[2]), \\", "alpha=True) w.write(file_handle, np.reshape(IMG, (IMG.shape[0], IMG.shape[1]*IMG.shape[2]))) file_handle.close() def in_bounds(p): global IMG return p.x >", "read in the file splitLines = [] for line in sys.stdin: splitLines +=", "as np import sys import math from collections import namedtuple if len(sys.argv) !=", "= [r,g,b,a] for i in xrange(len(temp)): IMG[row,col,i] = temp[i] def write_png(fn): global IMG", "in the file splitLines = [] for line in sys.stdin: splitLines += [line.strip().split('", "y2+1): if not in_bounds(Point(int(x),y)): break update_pixel(y,int(x),r,g,b,a) x += slope return # make it", "splitLine in splitLines: if splitLine[0] != \"Hallway\": continue point0 = splitLines[int(splitLine[1])] point1 =", "break update_pixel(y,int(x),r,g,b,a) x += slope return # make it definitely go left to", "= temp[i] def write_png(fn): global IMG file_handle = open(fn, 'wb') w = png.Writer(IMG.shape[1]," ]
[ "plt.plot(rewards) plt.xlabel('number of games played') plt.ylabel('reward received per game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of", "= open(filename,'r') text = f.read() f.close() rewards = [] steps = [] for", "text = f.read() f.close() rewards = [] steps = [] for line in", "= ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps = loadFiles(['./SmallAgent/29-01-2018']) rewards = Average(rewards,10) steps = Average(steps,10) plotResults(rewards,steps,\"./test.png\")", "[] for f in files: r,s = loadFile(f) rewards.extend(r) steps.extend(s) return rewards,steps, def", "return rewards,steps def loadFiles(files): rewards = [] steps = [] for f in", "= ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps = loadFiles(['./SmallAgent/29-01-2018']) rewards = Average(rewards,10)", "plt.xlabel('number of games played') plt.ylabel('reward received per game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of games", "loadFile(f) rewards.extend(r) steps.extend(s) return rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of games played')", "game') plt.savefig(outputFile) def Average(rewards,n): return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)] if(__name__ == \"__main__\"):", "games played') plt.ylabel('number of actions taken per game') plt.savefig(outputFile) def Average(rewards,n): return [np.mean(rewards[i:i+n])", "= [] steps = [] for f in files: r,s = loadFile(f) rewards.extend(r)", "range(len(rewards)-n)] if(__name__ == \"__main__\"): LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps", "2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps def loadFiles(files): rewards = [] steps = []", "of games played') plt.ylabel('number of actions taken per game') plt.savefig(outputFile) def Average(rewards,n): return", "i in range(len(rewards)-n)] if(__name__ == \"__main__\"): LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15", "return rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of games played') plt.ylabel('reward received per", "files: r,s = loadFile(f) rewards.extend(r) steps.extend(s) return rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number", "games played') plt.ylabel('reward received per game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of games played') plt.ylabel('number", "[] steps = [] for line in text.split('\\n'): pieces = line.split(',') if(len(pieces) ==", "== 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps def loadFiles(files): rewards = [] steps =", "r,s = loadFile(f) rewards.extend(r) steps.extend(s) return rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of", "= [] for line in text.split('\\n'): pieces = line.split(',') if(len(pieces) == 2): rewards.append(float(pieces[0]))", "def loadFiles(files): rewards = [] steps = [] for f in files: r,s", "received per game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of games played') plt.ylabel('number of actions taken", "plt.savefig(outputFile) def Average(rewards,n): return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)] if(__name__ == \"__main__\"): LargeAgent", "= [] for f in files: r,s = loadFile(f) rewards.extend(r) steps.extend(s) return rewards,steps,", "LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps = loadFiles(['./SmallAgent/29-01-2018']) rewards =", "f.read() f.close() rewards = [] steps = [] for line in text.split('\\n'): pieces", "pyplot as plt def loadFile(filename): f = open(filename,'r') text = f.read() f.close() rewards", "played') plt.ylabel('number of actions taken per game') plt.savefig(outputFile) def Average(rewards,n): return [np.mean(rewards[i:i+n]) for", "matplotlib import pyplot as plt def loadFile(filename): f = open(filename,'r') text = f.read()", "import numpy as np from matplotlib import pyplot as plt def loadFile(filename): f", "open(filename,'r') text = f.read() f.close() rewards = [] steps = [] for line", "in range(len(rewards)-n)] if(__name__ == \"__main__\"): LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147']", "f.close() rewards = [] steps = [] for line in text.split('\\n'): pieces =", "return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)] if(__name__ == \"__main__\"): LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284']", "of games played') plt.ylabel('reward received per game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of games played')", "for line in text.split('\\n'): pieces = line.split(',') if(len(pieces) == 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return", "plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of games played') plt.ylabel('reward received per game') plt.subplot(2,1,2) plt.plot(steps)", "\"__main__\"): LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps = loadFiles(['./SmallAgent/29-01-2018']) rewards", "import pyplot as plt def loadFile(filename): f = open(filename,'r') text = f.read() f.close()", "plt.plot(steps) plt.xlabel('number of games played') plt.ylabel('number of actions taken per game') plt.savefig(outputFile) def", "steps.append(int(pieces[1])) return rewards,steps def loadFiles(files): rewards = [] steps = [] for f", "actions taken per game') plt.savefig(outputFile) def Average(rewards,n): return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)]", "== \"__main__\"): LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps = loadFiles(['./SmallAgent/29-01-2018'])", "SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps = loadFiles(['./SmallAgent/29-01-2018']) rewards = Average(rewards,10) steps = Average(steps,10)", "plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of games played') plt.ylabel('number of actions taken per game') plt.savefig(outputFile)", "plt.ylabel('reward received per game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of games played') plt.ylabel('number of actions", "game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of games played') plt.ylabel('number of actions taken per game')", "= loadFile(f) rewards.extend(r) steps.extend(s) return rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of games", "= f.read() f.close() rewards = [] steps = [] for line in text.split('\\n'):", "plt def loadFile(filename): f = open(filename,'r') text = f.read() f.close() rewards = []", "plt.xlabel('number of games played') plt.ylabel('number of actions taken per game') plt.savefig(outputFile) def Average(rewards,n):", "plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of games played') plt.ylabel('reward received per game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number", "line in text.split('\\n'): pieces = line.split(',') if(len(pieces) == 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps", "= [] steps = [] for line in text.split('\\n'): pieces = line.split(',') if(len(pieces)", "[] steps = [] for f in files: r,s = loadFile(f) rewards.extend(r) steps.extend(s)", "rewards,steps def loadFiles(files): rewards = [] steps = [] for f in files:", "numpy as np from matplotlib import pyplot as plt def loadFile(filename): f =", "as plt def loadFile(filename): f = open(filename,'r') text = f.read() f.close() rewards =", "rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of games played') plt.ylabel('reward received per game')", "in files: r,s = loadFile(f) rewards.extend(r) steps.extend(s) return rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards)", "def loadFile(filename): f = open(filename,'r') text = f.read() f.close() rewards = [] steps", "np from matplotlib import pyplot as plt def loadFile(filename): f = open(filename,'r') text", "['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps = loadFiles(['./SmallAgent/29-01-2018']) rewards = Average(rewards,10) steps", "in text.split('\\n'): pieces = line.split(',') if(len(pieces) == 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps def", "steps.extend(s) return rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of games played') plt.ylabel('reward received", "rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps def loadFiles(files): rewards = [] steps = [] for", "steps = [] for f in files: r,s = loadFile(f) rewards.extend(r) steps.extend(s) return", "if(__name__ == \"__main__\"): LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps =", "rewards = [] steps = [] for f in files: r,s = loadFile(f)", "rewards.extend(r) steps.extend(s) return rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of games played') plt.ylabel('reward", "def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1) plt.plot(rewards) plt.xlabel('number of games played') plt.ylabel('reward received per game') plt.subplot(2,1,2)", "Average(rewards,n): return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)] if(__name__ == \"__main__\"): LargeAgent = ['LargeAgent/2018-01-15", "plt.ylabel('number of actions taken per game') plt.savefig(outputFile) def Average(rewards,n): return [np.mean(rewards[i:i+n]) for i", "if(len(pieces) == 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps def loadFiles(files): rewards = [] steps", "per game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of games played') plt.ylabel('number of actions taken per", "text.split('\\n'): pieces = line.split(',') if(len(pieces) == 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps def loadFiles(files):", "loadFile(filename): f = open(filename,'r') text = f.read() f.close() rewards = [] steps =", "rewards = [] steps = [] for line in text.split('\\n'): pieces = line.split(',')", "pieces = line.split(',') if(len(pieces) == 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps def loadFiles(files): rewards", "taken per game') plt.savefig(outputFile) def Average(rewards,n): return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)] if(__name__", "def Average(rewards,n): return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)] if(__name__ == \"__main__\"): LargeAgent =", "line.split(',') if(len(pieces) == 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps def loadFiles(files): rewards = []", "steps = [] for line in text.split('\\n'): pieces = line.split(',') if(len(pieces) == 2):", "loadFiles(files): rewards = [] steps = [] for f in files: r,s =", "from matplotlib import pyplot as plt def loadFile(filename): f = open(filename,'r') text =", "f in files: r,s = loadFile(f) rewards.extend(r) steps.extend(s) return rewards,steps, def plotResults(rewards,steps,outputFile): plt.subplot(2,1,1)", "of actions taken per game') plt.savefig(outputFile) def Average(rewards,n): return [np.mean(rewards[i:i+n]) for i in", "per game') plt.savefig(outputFile) def Average(rewards,n): return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)] if(__name__ ==", "f = open(filename,'r') text = f.read() f.close() rewards = [] steps = []", "11:50:29.380284'] SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147'] rewards,steps = loadFiles(['./SmallAgent/29-01-2018']) rewards = Average(rewards,10) steps =", "as np from matplotlib import pyplot as plt def loadFile(filename): f = open(filename,'r')", "for f in files: r,s = loadFile(f) rewards.extend(r) steps.extend(s) return rewards,steps, def plotResults(rewards,steps,outputFile):", "[] for line in text.split('\\n'): pieces = line.split(',') if(len(pieces) == 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1]))", "played') plt.ylabel('reward received per game') plt.subplot(2,1,2) plt.plot(steps) plt.xlabel('number of games played') plt.ylabel('number of", "for i in range(len(rewards)-n)] if(__name__ == \"__main__\"): LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent =", "= line.split(',') if(len(pieces) == 2): rewards.append(float(pieces[0])) steps.append(int(pieces[1])) return rewards,steps def loadFiles(files): rewards =", "[np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)] if(__name__ == \"__main__\"): LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284'] SmallAgent" ]
[]
[ "torch import numpy as np from traffic_lights.data.constants import CLASS_LABEL_MAP from traffic_lights.lib.engine import train_one_epoch", "= get_model(num_classes) model.to(device) epochs = parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)", "data_loader_train, device, epoch, print_freq=1000 ) evaluation = evaluate(model, data_loader_val, device, validation_ground_truth) precisions =", "CLASS_LABEL_MAP from traffic_lights.lib.engine import train_one_epoch from .model import get_model, evaluate # TODO: add", "TODO: look at amount of parameters y so much def train( parameterization, num_classes,", "in range(epochs): train_one_epoch( model, optimizer, data_loader_train, device, epoch, print_freq=1000 ) evaluation = evaluate(model,", "train_one_epoch from .model import get_model, evaluate # TODO: add eval boolean flag, which", "import numpy as np from traffic_lights.data.constants import CLASS_LABEL_MAP from traffic_lights.lib.engine import train_one_epoch from", "look at amount of parameters y so much def train( parameterization, num_classes, device,", "device, epoch, print_freq=1000 ) evaluation = evaluate(model, data_loader_val, device, validation_ground_truth) precisions = [", "lr=learning_rate) print(\"Using lr={} and num_epochs={}\".format(learning_rate, epochs)) for epoch in range(epochs): train_one_epoch( model, optimizer,", "train_one_epoch( model, optimizer, data_loader_train, device, epoch, print_freq=1000 ) evaluation = evaluate(model, data_loader_val, device,", "= parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={} and num_epochs={}\".format(learning_rate,", "] mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP) print(\"mAP:\", mean_average_precision) torch.save(model, \"model_lr_{}_epochs_{}.pth\".format(learning_rate, epochs)) return mean_average_precision", "epoch in range(epochs): train_one_epoch( model, optimizer, data_loader_train, device, epoch, print_freq=1000 ) evaluation =", "parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={} and num_epochs={}\".format(learning_rate, epochs))", "evaluation ] mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP) print(\"mAP:\", mean_average_precision) torch.save(model, \"model_lr_{}_epochs_{}.pth\".format(learning_rate, epochs)) return", "model = get_model(num_classes) model.to(device) epochs = parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(),", "from traffic_lights.lib.engine import train_one_epoch from .model import get_model, evaluate # TODO: add eval", "learning_rate = parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={} and num_epochs={}\".format(learning_rate, epochs)) for", "eval stuff if wanted # TODO: look at amount of parameters y so", "model.to(device) epochs = parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={}", "y so much def train( parameterization, num_classes, device, data_loader_train, data_loader_val, validation_ground_truth, ): model", "metric[\"AP\"] for metric in evaluation ] mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP) print(\"mAP:\", mean_average_precision)", "parameterization, num_classes, device, data_loader_train, data_loader_val, validation_ground_truth, ): model = get_model(num_classes) model.to(device) epochs =", "traffic_lights.data.constants import CLASS_LABEL_MAP from traffic_lights.lib.engine import train_one_epoch from .model import get_model, evaluate #", "from traffic_lights.data.constants import CLASS_LABEL_MAP from traffic_lights.lib.engine import train_one_epoch from .model import get_model, evaluate", "import torch import numpy as np from traffic_lights.data.constants import CLASS_LABEL_MAP from traffic_lights.lib.engine import", "wanted # TODO: look at amount of parameters y so much def train(", "print_freq=1000 ) evaluation = evaluate(model, data_loader_val, device, validation_ground_truth) precisions = [ 0 if", "evaluate # TODO: add eval boolean flag, which reports eval stuff if wanted", "data_loader_val, validation_ground_truth, ): model = get_model(num_classes) model.to(device) epochs = parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"]", "at amount of parameters y so much def train( parameterization, num_classes, device, data_loader_train,", "# TODO: look at amount of parameters y so much def train( parameterization,", "evaluate(model, data_loader_val, device, validation_ground_truth) precisions = [ 0 if np.isnan(metric[\"AP\"]) else metric[\"AP\"] for", "): model = get_model(num_classes) model.to(device) epochs = parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"] optimizer =", "get_model, evaluate # TODO: add eval boolean flag, which reports eval stuff if", "= evaluate(model, data_loader_val, device, validation_ground_truth) precisions = [ 0 if np.isnan(metric[\"AP\"]) else metric[\"AP\"]", "numpy as np from traffic_lights.data.constants import CLASS_LABEL_MAP from traffic_lights.lib.engine import train_one_epoch from .model", "get_model(num_classes) model.to(device) epochs = parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using", "= parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={} and num_epochs={}\".format(learning_rate, epochs)) for epoch", "boolean flag, which reports eval stuff if wanted # TODO: look at amount", "of parameters y so much def train( parameterization, num_classes, device, data_loader_train, data_loader_val, validation_ground_truth,", "stuff if wanted # TODO: look at amount of parameters y so much", "validation_ground_truth) precisions = [ 0 if np.isnan(metric[\"AP\"]) else metric[\"AP\"] for metric in evaluation", "num_epochs={}\".format(learning_rate, epochs)) for epoch in range(epochs): train_one_epoch( model, optimizer, data_loader_train, device, epoch, print_freq=1000", "parameters y so much def train( parameterization, num_classes, device, data_loader_train, data_loader_val, validation_ground_truth, ):", "# TODO: add eval boolean flag, which reports eval stuff if wanted #", "else metric[\"AP\"] for metric in evaluation ] mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP) print(\"mAP:\",", "<reponame>aidandunlop/traffic_light_recognition<filename>traffic_lights/training/train.py import torch import numpy as np from traffic_lights.data.constants import CLASS_LABEL_MAP from traffic_lights.lib.engine", "precisions = [ 0 if np.isnan(metric[\"AP\"]) else metric[\"AP\"] for metric in evaluation ]", "traffic_lights.lib.engine import train_one_epoch from .model import get_model, evaluate # TODO: add eval boolean", "which reports eval stuff if wanted # TODO: look at amount of parameters", "device, data_loader_train, data_loader_val, validation_ground_truth, ): model = get_model(num_classes) model.to(device) epochs = parameterization[\"num_epochs\"] learning_rate", "train( parameterization, num_classes, device, data_loader_train, data_loader_val, validation_ground_truth, ): model = get_model(num_classes) model.to(device) epochs", "from .model import get_model, evaluate # TODO: add eval boolean flag, which reports", "print(\"Using lr={} and num_epochs={}\".format(learning_rate, epochs)) for epoch in range(epochs): train_one_epoch( model, optimizer, data_loader_train,", "import get_model, evaluate # TODO: add eval boolean flag, which reports eval stuff", "data_loader_val, device, validation_ground_truth) precisions = [ 0 if np.isnan(metric[\"AP\"]) else metric[\"AP\"] for metric", "evaluation = evaluate(model, data_loader_val, device, validation_ground_truth) precisions = [ 0 if np.isnan(metric[\"AP\"]) else", ".model import get_model, evaluate # TODO: add eval boolean flag, which reports eval", "flag, which reports eval stuff if wanted # TODO: look at amount of", "so much def train( parameterization, num_classes, device, data_loader_train, data_loader_val, validation_ground_truth, ): model =", "in evaluation ] mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP) print(\"mAP:\", mean_average_precision) torch.save(model, \"model_lr_{}_epochs_{}.pth\".format(learning_rate, epochs))", "for epoch in range(epochs): train_one_epoch( model, optimizer, data_loader_train, device, epoch, print_freq=1000 ) evaluation", "= [ 0 if np.isnan(metric[\"AP\"]) else metric[\"AP\"] for metric in evaluation ] mean_average_precision", "amount of parameters y so much def train( parameterization, num_classes, device, data_loader_train, data_loader_val,", "reports eval stuff if wanted # TODO: look at amount of parameters y", "for metric in evaluation ] mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP) print(\"mAP:\", mean_average_precision) torch.save(model,", "import CLASS_LABEL_MAP from traffic_lights.lib.engine import train_one_epoch from .model import get_model, evaluate # TODO:", ") evaluation = evaluate(model, data_loader_val, device, validation_ground_truth) precisions = [ 0 if np.isnan(metric[\"AP\"])", "torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={} and num_epochs={}\".format(learning_rate, epochs)) for epoch in range(epochs): train_one_epoch( model,", "num_classes, device, data_loader_train, data_loader_val, validation_ground_truth, ): model = get_model(num_classes) model.to(device) epochs = parameterization[\"num_epochs\"]", "much def train( parameterization, num_classes, device, data_loader_train, data_loader_val, validation_ground_truth, ): model = get_model(num_classes)", "range(epochs): train_one_epoch( model, optimizer, data_loader_train, device, epoch, print_freq=1000 ) evaluation = evaluate(model, data_loader_val,", "np from traffic_lights.data.constants import CLASS_LABEL_MAP from traffic_lights.lib.engine import train_one_epoch from .model import get_model,", "and num_epochs={}\".format(learning_rate, epochs)) for epoch in range(epochs): train_one_epoch( model, optimizer, data_loader_train, device, epoch,", "epoch, print_freq=1000 ) evaluation = evaluate(model, data_loader_val, device, validation_ground_truth) precisions = [ 0", "metric in evaluation ] mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP) print(\"mAP:\", mean_average_precision) torch.save(model, \"model_lr_{}_epochs_{}.pth\".format(learning_rate,", "eval boolean flag, which reports eval stuff if wanted # TODO: look at", "device, validation_ground_truth) precisions = [ 0 if np.isnan(metric[\"AP\"]) else metric[\"AP\"] for metric in", "optimizer, data_loader_train, device, epoch, print_freq=1000 ) evaluation = evaluate(model, data_loader_val, device, validation_ground_truth) precisions", "TODO: add eval boolean flag, which reports eval stuff if wanted # TODO:", "model, optimizer, data_loader_train, device, epoch, print_freq=1000 ) evaluation = evaluate(model, data_loader_val, device, validation_ground_truth)", "epochs = parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={} and", "np.isnan(metric[\"AP\"]) else metric[\"AP\"] for metric in evaluation ] mean_average_precision = np.sum(precisions) / len(CLASS_LABEL_MAP)", "0 if np.isnan(metric[\"AP\"]) else metric[\"AP\"] for metric in evaluation ] mean_average_precision = np.sum(precisions)", "= torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={} and num_epochs={}\".format(learning_rate, epochs)) for epoch in range(epochs): train_one_epoch(", "if np.isnan(metric[\"AP\"]) else metric[\"AP\"] for metric in evaluation ] mean_average_precision = np.sum(precisions) /", "def train( parameterization, num_classes, device, data_loader_train, data_loader_val, validation_ground_truth, ): model = get_model(num_classes) model.to(device)", "optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={} and num_epochs={}\".format(learning_rate, epochs)) for epoch in range(epochs):", "parameterization[\"lr\"] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) print(\"Using lr={} and num_epochs={}\".format(learning_rate, epochs)) for epoch in", "if wanted # TODO: look at amount of parameters y so much def", "import train_one_epoch from .model import get_model, evaluate # TODO: add eval boolean flag,", "data_loader_train, data_loader_val, validation_ground_truth, ): model = get_model(num_classes) model.to(device) epochs = parameterization[\"num_epochs\"] learning_rate =", "[ 0 if np.isnan(metric[\"AP\"]) else metric[\"AP\"] for metric in evaluation ] mean_average_precision =", "epochs)) for epoch in range(epochs): train_one_epoch( model, optimizer, data_loader_train, device, epoch, print_freq=1000 )", "validation_ground_truth, ): model = get_model(num_classes) model.to(device) epochs = parameterization[\"num_epochs\"] learning_rate = parameterization[\"lr\"] optimizer", "add eval boolean flag, which reports eval stuff if wanted # TODO: look", "as np from traffic_lights.data.constants import CLASS_LABEL_MAP from traffic_lights.lib.engine import train_one_epoch from .model import", "lr={} and num_epochs={}\".format(learning_rate, epochs)) for epoch in range(epochs): train_one_epoch( model, optimizer, data_loader_train, device," ]
[ "len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3] + 1 elif rankOrder <= 59999 and rankOrder >=", "weight='bold') else: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning", "horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white',", "labels = plt.xticks() # remove the first location to get proper heatmap tick", "cofpos = int(cofheight / 2 + topcof) unbpos = int(unbheight / 2 +", "colors from color list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) #", "categories[3] / totalsites * data01.shape[0] cofheight = categories[4] / totalsites * data01.shape[0] unbheight", "cols with equal intervals col_delete = np.linspace(0, N - 1, num=col_delete_num, dtype=int) #", "row_num = data0.shape[0] if col_num == -999: col_num = data0.shape[1] # rebin data0", "this helps to maintain the ticks to be odd ax = plt.axes([0, 0,", "in rec.split('\\t')] sites = sites + 1 if generateColorbar == '1': rankOrder =", "from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import numpy as np matplotlib.use('Agg') \"\"\" Program to", "horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white',", "plt.yticks([]) # to increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) #", "positions for the values print(\"rp: {}, stm: {}, ess : {}, cof :", "1 elif rankOrder <= 29999 and rankOrder >= 20000: dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1]", "ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2, color='black') ax.tick_params(which='minor', length=6, width=2, color='black') # Draw a horizontal", "horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white',", "plot borders plt.setp(ax.spines.values(), linewidth=2) # calculate how long the color box should be", "unbpos = int(unbheight / 2 + topunb) # positions for the values print(\"rp:", "= np.linspace(0, M - 1, num=row_delete_num, dtype=int) # sort the random selected deleted", "value if min(categories) == categories[5]: if categories[5] != 0: plt.text(25, unbpos, categories[5], horizontalalignment='center',", "color='white', rotation=90, weight='bold') # removing all the borders and frame for item in", "data0.shape[0] if col_num == -999: col_num = data0.shape[1] # rebin data0 (compresses the", "= categories[2] / totalsites * data01.shape[0] cycheight = categories[3] / totalsites * data01.shape[0]", "\"cofheight: {}, unbheight : {}\".format(unbheight, cofheight) # now calculate the \"top\" location of", "# Assigning the rotation based on minimum value if min(categories) == categories[2]: if", "and applying an arbitrary offset rppos = int(rpheight / 2) stmpos = int(stmheight", "filename') def cli(tagpileup_cdt, threshold_file, color, height, width, title, xlabel, ticks, dpi, colorbar, out):", "ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the initial ticks locs, labels = plt.xticks() # remove the", "compare the heatmap matrix a_compress = a.reshape((m, int(M / m), n, int(N /", "col_delete = np.linspace(0, N - 1, num=col_delete_num, dtype=int) # sort the random selected", "{} openfile = open(threshold_file, 'r').readlines() for line in openfile: line = line.strip() temp", "sites in the heatmap for rec in data: tmp = [(x.strip()) for x", "N = a.shape # compare the heatmap matrix a_compress = a.reshape((m, int(M /", "each by setting up a ratio: (this site)/(total sites) = (height of unknown", "= plt.figure(figsize=(col_num / 96, row_num / 96), dpi=300) # remove margins , #", "plot heatmap # little trick to create custom tick labels. # [ only", "RP, SAGA and TFIID catergoryCount = [0, 0, 0, 0, 0, 0] sites", "'--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,))", "a heatmap from tagPileUp tabular file and contrast Threshold file. \"\"\" def rebin(a,", "remove columns M, N = a.shape # compare the heatmap matrix a_compress =", "rgb to hex (since matplotlib doesn't support 0-255 format for colors) s =", "# positions for the values print(\"rp: {}, stm: {}, ess : {}, cof", "= categories[5] / totalsites * data01.shape[0] # print \"cofheight: {}, unbheight : {}\".format(unbheight,", ": {} \\n length_locs : {} \\n labels : {} \\n length_labels:{}\\n\".format( locs,", "rankOrder <= 19999: dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0] + 1 elif rankOrder", "default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color') @click.option('-t', '--title', metavar=\"<string>\", default=' ', prompt=True, show_default=' ',", "!= 0: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, rppos,", "xlabel, heatmapTitle, generateColorbar): data = open(input_file, 'r') if header == 'T': data.readline() data0", "as np matplotlib.use('Agg') \"\"\" Program to Create a heatmap from tagPileUp tabular file", "1 # get deleted rows plus position # get deleted rows plus position", "totalsites * data01.shape[0] stmheight = categories[1] / totalsites * data01.shape[0] srgheight = categories[2]", "+ ticks labels[mid] = \"0\" labels[len(labels) - 1] = ticks # display the", "{}, trna : {}\".format( rppos, stmpos, srgpos, cycpos, cofpos, unbpos)) # The default", "plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, rppos, categories[0], horizontalalignment='center',", "a.reshape((m, int(M / m), n, int(N / n))).mean(3).mean(1) return np.array(a_compress) def plot_heatmap(data01, c,", "row_delete, axis=0) # random remove rows if col_delete_num > 0: # select deleted", "deleted cols plus position # get deleted cols plus position (top +1; end", "be odd ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim,", "unbheight = categories[5] / totalsites * data01.shape[0] # print \"cofheight: {}, unbheight :", "srgpos, cycpos, cofpos, unbpos)) # The default transform specifies that text is in", "borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi) def plot_colorbar(data01, c, out_file_name, row_num,", "create custom tick labels. # [ only works if the difference between col", "tick mark value') @click.option('-d', '--dpi', metavar=\"<int>\", type=int, default=100, prompt=True, show_default='100', help='Plot pixel density')", "# get deleted cols plus position (top +1; end -1) col_delete_plus1 = np.append(", "n np.random.seed(seed=0) if row_delete_num > 0: # select deleted rows with equal intervals", "the gene colorbar (0: No, 1: Yes)\") @click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png',", "# compare the heatmap matrix a_compress = a.reshape((m, int(M / m), n, int(N", "\"\"\" Creates YEP Style All Feature heatmap containing genecategories. \\b Generates Colorbar for", "def load_Data(input_file, out_file, upper_lim, lower_lim, color, header, start_col, row_num, col_num, ticks, ddpi, xlabel,", "= mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure plt.figure(figsize=(col_num / 96,", "data # to store counts for RP, SAGA and TFIID catergoryCount = [0,", "open(input_file, 'r') if header == 'T': data.readline() data0 = [] dataGenes = []", "= a.shape m, n = new_shape if m >= M: # repeat rows", "def plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites):", "mean a[:, col_delete_plus1] = ( a[:, col_delete] + a[:, col_delete_plus1]) / 2 a", "= sum(categories) rpheight = categories[0] / totalsites * data01.shape[0] stmheight = categories[1] /", "/ 2 a = np.delete(a, row_delete, axis=0) # random remove rows if col_delete_num", "(300,150) & (300,100) etc] # calculate the major tick locations locaters = col_num", "= rpheight topsrg = topstm + stmheight topcyc = topsrg + srgheight topcof", "the next cols by mean a[:, col_delete_plus1] = ( a[:, col_delete] + a[:,", "= float(params['upper_threshold']) lower_lim = int(params['lower_threshold']) header = params['header'] start_col = int(params['start_col']) load_Data(tagpileup_cdt, out,", "the ticks to be odd ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap,", "and set it to zero, since ax is helping to make sure there", "range(100) assert len(levs) % 2 == 0, 'N levels must be even.' #", "# initialize figure fig = plt.figure(figsize=(col_num / 96, row_num / 96), dpi=300) #", "len(catergoryCount)): if catergoryCount[i] != 0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\", 900, 35, catergoryCount) CONTEXT_SETTINGS", "each box, each top should be the ending position of the previous box", "= np.linspace(0, N - 1, num=col_delete_num, dtype=int) # sort the random selected deleted", "rotation based on minimum value if min(categories) == categories[2]: if categories[2] != 0:", "data0 = np.array(data0, dtype=float) print(\"# sites in the heatmap\", sites) # creating the", "def cli(tagpileup_cdt, threshold_file, color, height, width, title, xlabel, ticks, dpi, colorbar, out): \"\"\"", "0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\", 900, 35, catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>',", "M: # repeat rows in data matrix a = np.repeat(a, math.ceil(float(m) / M),", "for (300,150) & (300,100) etc] # calculate the major tick locations locaters =", "col_num = data0.shape[1] # rebin data0 (compresses the data using treeView compression algorithm)", "= int(len(labels) // 2) labels[0] = \"-\" + ticks labels[mid] = \"0\" labels[len(labels)", "hard-coded the width for colorbar(50) dataGenes = rebin(dataGenes, (row_num, 50)) elif row_num <", "generateColorbar == '1': rankOrder = int(rec.split(\"\\t\")[0]) if rankOrder <= 19999: dataGenes.append([1] * len(tmp[start_col:]))", "data.readline() data0 = [] dataGenes = [] # to store colorbar data #", "show_default=' ', help='Plot Title') @click.option('-xl', '--xlabel', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Label", "the rotation based on minimum value if min(categories) == categories[1]: if categories[1] !=", "(0: No, 1: Yes)\") @click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename') def", "color='white', weight='bold') else: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold') #", "'r') if header == 'T': data.readline() data0 = [] dataGenes = [] #", "value if min(categories) == categories[4]: if categories[4] != 0: plt.text(25, cofpos, categories[4], horizontalalignment='center',", "= \"{:,}\".format(sites) + \" sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18) # to increase the", "# The default transform specifies that text is in data co-ordinates, that is", "2) labels[0] = \"-\" + ticks labels[mid] = \"0\" labels[len(labels) - 1] =", "categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation based on", "resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\", type=int, default=700,", "\\ 1 # get deleted cols plus position # get deleted cols plus", "import matplotlib.colors as mcolors import matplotlib.pyplot as plt from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)", "colors=c, N=len(levs) - 1,) # initialize figure fig = plt.figure(figsize=(col_num / 96, row_num", "dpi, colorbar, out): \"\"\" Creates YEP Style All Feature heatmap containing genecategories. \\b", "= np.delete(a, row_delete, axis=0) # random remove rows if col_delete_num > 0: #", "0: # select deleted rows with equal intervals row_delete = np.linspace(0, M -", "print \"cofheight: {}, unbheight : {}\".format(unbheight, cofheight) # now calculate the \"top\" location", "{}\".format( rppos, stmpos, srgpos, cycpos, cofpos, unbpos)) # The default transform specifies that", "(300,100) etc] # calculate the major tick locations locaters = col_num // 4", "i in range(0, len(catergoryCount)): if catergoryCount[i] != 0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\", 900,", "catergoryCount[5] + 1 data0.append(tmp[start_col:]) data0 = np.array(data0, dtype=float) print(\"# sites in the heatmap\",", "sites in the heatmap\", sites) # creating the np-array to plot the colorbar", "metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color') @click.option('-t', '--title', metavar=\"<string>\", default=' ', prompt=True, show_default='", "labels[mid] = \"0\" labels[len(labels) - 1] = ticks # display the new ticks", "* len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4] + 1 elif rankOrder <= 219999 and rankOrder", "srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center',", "cof : {}, unb : {}, trna : {}\".format( rppos, stmpos, srgpos, cycpos,", "line.split(\":\") if temp[0] not in params.keys(): params[temp[0]] = temp[1] print(\" \\n Parameters for", "numbers in the colored boxes and applying an arbitrary offset rppos = int(rpheight", "plt.xticks([]) plt.yticks([]) # to increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2)", "topcof = topcyc + cycheight topunb = topcof + cofheight # find the", "len(locs), labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel = \"{:,}\".format(sites) + \" sites\" plt.ylabel(ylabel,", "N = a.shape m, n = new_shape row_delete_num = M % m col_delete_num", "= new_shape if m >= M: # repeat rows in data matrix a", "(row_num, 50)) elif row_num < data0.shape[0]: data0 = rebin(data0, (row_num, data0.shape[1])) if generateColorbar", "in the colored boxes and applying an arbitrary offset rppos = int(rpheight /", "the previous box topstm = rpheight topsrg = topstm + stmheight topcyc =", ">= 30000: dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2] + 1 elif rankOrder <=", "ticks # display the new ticks plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2,", "# calculate the major tick locations locaters = col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) #", "np.append( np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1) # put the info of", "plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13, color='black', weight='bold') else: plt.text(25, stmpos, categories[1], horizontalalignment='center',", "in data matrix a = np.repeat(a, math.ceil(float(m) / M), axis=0) M, N =", "= int(cofheight / 2 + topcof) unbpos = int(unbheight / 2 + topunb)", "= params['header'] start_col = int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color, header, start_col, height,", "1, num=col_delete_num, dtype=int) # sort the random selected deleted col ids col_delete =", "rebin(data0, (data0.shape[0], col_num)) if generateColorbar == '1': dataGenes = rebin(dataGenes, (data0.shape[0], 50)) #", "color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c = [\"white\", color] # generate heatmap", "plt.setp(ax.spines.values(), linewidth=2) # calculate how long the color box should be for each", "topsrg = topstm + stmheight topcyc = topsrg + srgheight topcof = topcyc", "data0.shape[1])) if generateColorbar == '1': dataGenes = rebin(dataGenes, (row_num, 50)) elif col_num <", "1,) # initialize figure plt.figure(figsize=(col_num / 96, row_num / 96), dpi=96) # remove", "['#ff2600', '#ffd54f', '#43a047', '#0096ff', '#9437ff', '#9e9e9e'] colors = [] # deciding colors based", "length_labels:{}\\n\".format( locs, len(locs), labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel = \"{:,}\".format(sites) + \"", "colorbar, out): \"\"\" Creates YEP Style All Feature heatmap containing genecategories. \\b Generates", "plot heatmap plt.xticks([]) plt.yticks([]) # to increase the width of the plot borders", "categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation based on", "be for each by setting up a ratio: (this site)/(total sites) = (height", "the mid value and set it to zero, since ax is helping to", "line through the midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2) print(\"\\n DEBUG INFO \\n locs", "col_num < data0.shape[1]: data0 = rebin(data0, (data0.shape[0], col_num)) if generateColorbar == '1': dataGenes", "for item in [fig, ax]: item.patch.set_visible(False) # saving the file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None,", "- 1, num=col_delete_num, dtype=int) # sort the random selected deleted col ids col_delete", "helping to make sure there are odd number of ticks. mid = int(len(labels)", "cmap=my_cmap, interpolation='nearest', aspect='auto') # plot heatmap plt.xticks([]) plt.yticks([]) # to increase the width", "\\n locs : {} \\n length_locs : {} \\n labels : {} \\n", "with equal intervals row_delete = np.linspace(0, M - 1, num=row_delete_num, dtype=int) # sort", "50)) elif row_num < data0.shape[0]: data0 = rebin(data0, (row_num, data0.shape[1])) if generateColorbar ==", "Height') @click.option('-pw', '--width', metavar=\"<int>\", type=int, default=300, prompt=True, show_default='True', help='Plot Width') @click.option('-c', '--color', metavar=\"<string>\",", "sites = sites + 1 if generateColorbar == '1': rankOrder = int(rec.split(\"\\t\")[0]) if", "len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4] + 1 elif rankOrder <= 219999 and rankOrder >=", "0: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cofpos, categories[4],", "to Create a heatmap from tagPileUp tabular file and contrast Threshold file. \"\"\"", "generateColorbar): data = open(input_file, 'r') if header == 'T': data.readline() data0 = []", "'#43a047', '#0096ff', '#9437ff', '#9e9e9e'] colors = [] # deciding colors based on the", "site)/(total sites) = (height of unknown box)/(feature box height) totalsites = sum(categories) rpheight", "etc] # calculate the major tick locations locaters = col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters))", "# remove margins , # this helps to maintain the ticks to be", "on minimum value if min(categories) == categories[0]: if categories[0] != 0: plt.text(25, rppos,", "unbheight : {}\".format(unbheight, cofheight) # now calculate the \"top\" location of each box,", "saving the file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300) def load_Data(input_file, out_file, upper_lim, lower_lim, color,", "into the next cols by mean a[:, col_delete_plus1] = ( a[:, col_delete] +", "value if min(categories) == categories[2]: if categories[2] != 0: plt.text(25, srgpos, categories[2], horizontalalignment='center',", "tick labels. # [ only works if the difference between col and row", "for line in openfile: line = line.strip() temp = line.split(\":\") if temp[0] not", "position (top +1; end -1) row_delete_plus1 = np.append( np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1]", "to increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) # calculate how", "for RP, SAGA and TFIID catergoryCount = [0, 0, 0, 0, 0, 0]", "- 1,) # initialize figure plt.figure(figsize=(col_num / 96, row_num / 96), dpi=96) #", "(row_num, data0.shape[1])) if generateColorbar == '1': dataGenes = rebin(dataGenes, (row_num, 50)) elif col_num", "horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white',", "Assigning the rotation based on minimum value if min(categories) == categories[2]: if categories[2]", "ticks to be odd ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest',", "fontsize=13, color='white', weight='bold') else: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold')", "width for colorbar(50) dataGenes = rebin(dataGenes, (row_num, 50)) elif row_num < data0.shape[0]: data0", "if generateColorbar == '1': # i have hard-coded the width for colorbar(50) dataGenes", "to be odd ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim,", "columns M, N = a.shape # compare the heatmap matrix a_compress = a.reshape((m,", "== categories[4]: if categories[4] != 0: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white',", "in data: tmp = [(x.strip()) for x in rec.split('\\t')] sites = sites +", "plt from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import numpy as np matplotlib.use('Agg') \"\"\" Program", "100), fails for (300,150) & (300,100) etc] # calculate the major tick locations", "box)/(feature box height) totalsites = sum(categories) rpheight = categories[0] / totalsites * data01.shape[0]", "algorithm) if row_num < data0.shape[0] and col_num < data0.shape[1]: data0 = rebin(data0, (row_num,", "weight='bold') else: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning", "0: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13, color='black', weight='bold') else: plt.text(25, stmpos, categories[1],", "colors) s = color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c = [\"white\", color]", "file_okay=True, dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\", type=int, default=700, prompt=True, show_default='True', help='Plot Height') @click.option('-pw', '--width',", "col_num, ticks, ddpi, xlabel, heatmapTitle, generateColorbar): data = open(input_file, 'r') if header ==", "topstm = rpheight topsrg = topstm + stmheight topcyc = topsrg + srgheight", "rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center',", "96, row_num / 96), dpi=300) # remove margins , # this helps to", "(since matplotlib doesn't support 0-255 format for colors) s = color.split(\",\") color =", "ticks. mid = int(len(labels) // 2) labels[0] = \"-\" + ticks labels[mid] =", "35, catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,))", "random selected deleted row ids row_delete = np.sort(row_delete) row_delete_plus1 = row_delete[1:-1] + \\", "default='0', help=\"Generate the gene colorbar (0: No, 1: Yes)\") @click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png',", "out, upper_lim, lower_lim, color, header, start_col, height, width, ticks, dpi, xlabel, title, colorbar)", "the colored boxes and applying an arbitrary offset rppos = int(rpheight / 2)", "np.array(dataGenes, dtype=float) print(\"catergoryCount : {}\".format(catergoryCount)) if row_num == -999: row_num = data0.shape[0] if", "1, num=row_delete_num, dtype=int) # sort the random selected deleted row ids row_delete =", "for the values print(\"rp: {}, stm: {}, ess : {}, cof : {},", "dpi=300) # remove margins , # this helps to maintain the ticks to", "and frame for item in [fig, ax]: item.patch.set_visible(False) # saving the file plt.savefig(out_file_name,", "# get deleted rows plus position (top +1; end -1) row_delete_plus1 = np.append(", "<= 39999 and rankOrder >= 30000: dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2] +", "previous box topstm = rpheight topsrg = topstm + stmheight topcyc = topsrg", "the heatmap matrix a_compress = a.reshape((m, int(M / m), n, int(N / n))).mean(3).mean(1)", "= int(stmheight / 2 + topstm) srgpos = int(srgheight / 2 + topsrg)", "checking if we need to plot the color bar if generateColorbar == '1':", "<= 29999 and rankOrder >= 20000: dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1] +", "TFIID catergoryCount = [0, 0, 0, 0, 0, 0] sites = 0 #", "division import math import pprint import click import matplotlib import matplotlib.colors as mcolors", "0 # to calculate the # of sites in the heatmap for rec", "selected deleted row ids row_delete = np.sort(row_delete) row_delete_plus1 = row_delete[1:-1] + \\ 1", "[] # to store colorbar data # to store counts for RP, SAGA", "19999: dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0] + 1 elif rankOrder <= 29999", "contrast Threshold file. \"\"\" def rebin(a, new_shape): M, N = a.shape m, n", "int(params['lower_threshold']) header = params['header'] start_col = int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color, header,", "categories[2] != 0: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25,", "by mean a[row_delete_plus1, :] = ( a[row_delete, :] + a[row_delete_plus1, :]) / 2", ">= 50000: dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4] + 1 elif rankOrder <=", "cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center',", "xlabel, heatmapTitle, sites): # initialize color levs = range(100) assert len(levs) % 2", "dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\", type=int, default=700, prompt=True, show_default='True',", "get deleted cols plus position # get deleted cols plus position (top +1;", "# now calculate the \"top\" location of each box, each top should be", "print(\"\\n DEBUG INFO \\n locs : {} \\n length_locs : {} \\n labels", "upper_lim = float(params['upper_threshold']) lower_lim = int(params['lower_threshold']) header = params['header'] start_col = int(params['start_col']) load_Data(tagpileup_cdt,", "= rebin(dataGenes, (row_num, 50)) elif row_num < data0.shape[0]: data0 = rebin(data0, (row_num, data0.shape[1]))", "prompt=True, show_default='Heatmap.png', help='output filename') def cli(tagpileup_cdt, threshold_file, color, height, width, title, xlabel, ticks,", "a = np.repeat(a, math.ceil(float(m) / M), axis=0) M, N = a.shape m, n", "= plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot", "cli(tagpileup_cdt, threshold_file, color, height, width, title, xlabel, ticks, dpi, colorbar, out): \"\"\" Creates", "20000: dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1] + 1 elif rankOrder <= 39999", "odd ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto') # plot", "900, 35, catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True,", "= plt.xticks() # remove the first location to get proper heatmap tick position.", "on datapoint in (x,y) like a graph # Assigning the rotation based on", "* len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2] + 1 elif rankOrder <= 49999 and rankOrder", "# random remove rows if col_delete_num > 0: # select deleted cols with", "row ids row_delete = np.sort(row_delete) row_delete_plus1 = row_delete[1:-1] + \\ 1 # get", "rankOrder >= 40000: dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3] + 1 elif rankOrder", "minimum value if min(categories) == categories[2]: if categories[2] != 0: plt.text(25, srgpos, categories[2],", "ticks, dpi, colorbar, out): \"\"\" Creates YEP Style All Feature heatmap containing genecategories.", "= np.sort(row_delete) row_delete_plus1 = row_delete[1:-1] + \\ 1 # get deleted rows plus", "rotation based on minimum value if min(categories) == categories[3]: if categories[3] != 0:", "(data0.shape[0], col_num)) if generateColorbar == '1': dataGenes = rebin(dataGenes, (data0.shape[0], 50)) # set", "the heatmap for rec in data: tmp = [(x.strip()) for x in rec.split('\\t')]", "- 1,) # initialize figure fig = plt.figure(figsize=(col_num / 96, row_num / 96),", "colorbar data # to store counts for RP, SAGA and TFIID catergoryCount =", "lower_lim, color, header, start_col, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, generateColorbar): data =", "row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, generateColorbar): data = open(input_file, 'r') if header", "metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Label under X-axis') @click.option('-k', '--ticks', metavar=\"<string>\", default='2',", "elif rankOrder <= 219999 and rankOrder >= 210000: dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5] =", "row_num / 96), dpi=300) # remove margins , # this helps to maintain", "= [] # deciding colors based on the catergory values. for i in", "@click.option('-xl', '--xlabel', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Label under X-axis') @click.option('-k', '--ticks',", "heatmap from tagPileUp tabular file and contrast Threshold file. \"\"\" def rebin(a, new_shape):", "my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure fig =", "list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure plt.figure(figsize=(col_num", "0, 0] sites = 0 # to calculate the # of sites in", "all the borders and frame for item in [fig, ax]: item.patch.set_visible(False) # saving", "rankOrder <= 39999 and rankOrder >= 30000: dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2]", "row_delete = np.sort(row_delete) row_delete_plus1 = row_delete[1:-1] + \\ 1 # get deleted rows", "open(threshold_file, 'r').readlines() for line in openfile: line = line.strip() temp = line.split(\":\") if", "deleted col ids col_delete = np.sort(col_delete) col_delete_plus1 = col_delete[1:-1] + \\ 1 #", "to store counts for RP, SAGA and TFIID catergoryCount = [0, 0, 0,", "'#9e9e9e'] colors = [] # deciding colors based on the catergory values. for", "heatmap\", sites) # creating the np-array to plot the colorbar dataGenes = np.array(dataGenes,", "/ 2 + topcof) unbpos = int(unbheight / 2 + topunb) # positions", "print(\" \\n Parameters for the heatmap\") pprint.pprint(params) upper_lim = float(params['upper_threshold']) lower_lim = int(params['lower_threshold'])", "* data01.shape[0] cycheight = categories[3] / totalsites * data01.shape[0] cofheight = categories[4] /", "# rebin data0 (compresses the data using treeView compression algorithm) if row_num <", "n = new_shape row_delete_num = M % m col_delete_num = N % n", "'#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c = [\"white\", color] # generate heatmap plot_heatmap(data0, c, out_file,", "catergoryCount[1] = catergoryCount[1] + 1 elif rankOrder <= 39999 and rankOrder >= 30000:", "categories[0] / totalsites * data01.shape[0] stmheight = categories[1] / totalsites * data01.shape[0] srgheight", "intervals col_delete = np.linspace(0, N - 1, num=col_delete_num, dtype=int) # sort the random", "initialize color levs = range(100) assert len(levs) % 2 == 0, 'N levels", "the rotation based on minimum value if min(categories) == categories[5]: if categories[5] !=", "30000: dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2] + 1 elif rankOrder <= 49999", "import division import math import pprint import click import matplotlib import matplotlib.colors as", "to plot the color bar if generateColorbar == '1': print(\"Creating the colobar\") mycolors", "2 + topstm) srgpos = int(srgheight / 2 + topsrg) cycpos = int(cycheight", "@click.option('-pw', '--width', metavar=\"<int>\", type=int, default=300, prompt=True, show_default='True', help='Plot Width') @click.option('-c', '--color', metavar=\"<string>\", default='0,0,0',", "topcof + cofheight # find the actual position of the numbers by centering", "% n np.random.seed(seed=0) if row_delete_num > 0: # select deleted rows with equal", "totalsites * data01.shape[0] srgheight = categories[2] / totalsites * data01.shape[0] cycheight = categories[3]", "# to increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight',", "if categories[5] != 0: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else:", "the colorbar dataGenes = np.array(dataGenes, dtype=float) print(\"catergoryCount : {}\".format(catergoryCount)) if row_num == -999:", "'--color', metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color') @click.option('-t', '--title', metavar=\"<string>\", default=' ', prompt=True,", "get proper heatmap tick position. locs = np.delete(locs, 0) labels.pop() # find the", "a[:, col_delete] + a[:, col_delete_plus1]) / 2 a = np.delete(a, col_delete, axis=1) #", "+ a[row_delete_plus1, :]) / 2 a = np.delete(a, row_delete, axis=0) # random remove", "// 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the initial ticks locs, labels = plt.xticks() #", "plot the colorbar dataGenes = np.array(dataGenes, dtype=float) print(\"catergoryCount : {}\".format(catergoryCount)) if row_num ==", "box height) totalsites = sum(categories) rpheight = categories[0] / totalsites * data01.shape[0] stmheight", "elif rankOrder <= 49999 and rankOrder >= 40000: dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3] =", "import pprint import click import matplotlib import matplotlib.colors as mcolors import matplotlib.pyplot as", "color='black', weight='bold') else: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold') #", "location to get proper heatmap tick position. locs = np.delete(locs, 0) labels.pop() #", "0] sites = 0 # to calculate the # of sites in the", "new_shape): M, N = a.shape m, n = new_shape if m >= M:", "trna : {}\".format( rppos, stmpos, srgpos, cycpos, cofpos, unbpos)) # The default transform", "mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure fig = plt.figure(figsize=(col_num /", "# select deleted cols with equal intervals col_delete = np.linspace(0, N - 1,", "catergoryCount = [0, 0, 0, 0, 0, 0] sites = 0 # to", "= rebin(data0, (row_num, data0.shape[1])) if generateColorbar == '1': dataGenes = rebin(dataGenes, (row_num, 50))", "'--title', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Plot Title') @click.option('-xl', '--xlabel', metavar=\"<string>\", default='", "fontsize=13, color='white', weight='bold') else: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold')", "based on minimum value if min(categories) == categories[0]: if categories[0] != 0: plt.text(25,", "of the plot borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi) def plot_colorbar(data01,", "+ 1 elif rankOrder <= 49999 and rankOrder >= 40000: dataGenes.append([4] * len(tmp[start_col:]))", "if temp[0] not in params.keys(): params[temp[0]] = temp[1] print(\" \\n Parameters for the", "plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation", "@click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\", type=int, default=700, prompt=True, show_default='True', help='Plot", "== -999: col_num = data0.shape[1] # rebin data0 (compresses the data using treeView", "header = params['header'] start_col = int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color, header, start_col,", "the new ticks plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2, color='black') ax.tick_params(which='minor', length=6,", "length=6, width=2, color='black') # Draw a horizontal line through the midpoint. plt.axvline(color='black', linestyle='--',", "'#9437ff', '#9e9e9e'] colors = [] # deciding colors based on the catergory values.", "int(rec.split(\"\\t\")[0]) if rankOrder <= 19999: dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0] + 1", "default transform specifies that text is in data co-ordinates, that is even though", "help='Plot Width') @click.option('-c', '--color', metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color') @click.option('-t', '--title', metavar=\"<string>\",", "m, n = new_shape row_delete_num = M % m col_delete_num = N %", "my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure plt.figure(figsize=(col_num /", "int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color, header, start_col, height, width, ticks, dpi, xlabel,", "1) # put the info of deleted cols into the next cols by", "file. \"\"\" def rebin(a, new_shape): M, N = a.shape m, n = new_shape", "support 0-255 format for colors) s = color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2]))", "and rankOrder >= 210000: dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5] + 1 data0.append(tmp[start_col:])", "2) stmpos = int(stmheight / 2 + topstm) srgpos = int(srgheight / 2", "sites = 0 # to calculate the # of sites in the heatmap", "= int(rec.split(\"\\t\")[0]) if rankOrder <= 19999: dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0] +", "= topstm + stmheight topcyc = topsrg + srgheight topcof = topcyc +", "categories[2] / totalsites * data01.shape[0] cycheight = categories[3] / totalsites * data01.shape[0] cofheight", "col_num)) if generateColorbar == '1': dataGenes = rebin(dataGenes, (data0.shape[0], 50)) # set color", "equal intervals row_delete = np.linspace(0, M - 1, num=row_delete_num, dtype=int) # sort the", "rpheight topsrg = topstm + stmheight topcyc = topsrg + srgheight topcof =", "maintain the ticks to be odd ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01,", "= np.array(dataGenes, dtype=float) print(\"catergoryCount : {}\".format(catergoryCount)) if row_num == -999: row_num = data0.shape[0]", "fontsize=14) ylabel = \"{:,}\".format(sites) + \" sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18) # to", "YEP Style All Feature heatmap containing genecategories. \\b Generates Colorbar for the gene", "color, height, width, title, xlabel, ticks, dpi, colorbar, out): \"\"\" Creates YEP Style", "are plotted based on datapoint in (x,y) like a graph # Assigning the", "figure fig = plt.figure(figsize=(col_num / 96, row_num / 96), dpi=300) # remove margins", "Yes)\") @click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename') def cli(tagpileup_cdt, threshold_file, color,", "/ 2 + topsrg) cycpos = int(cycheight / 2 + topcyc) cofpos =", "s = color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c = [\"white\", color] #", "ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto') # plot heatmap", "locs : {} \\n length_locs : {} \\n labels : {} \\n length_labels:{}\\n\".format(", "help='Label under X-axis') @click.option('-k', '--ticks', metavar=\"<string>\", default='2', prompt=True, show_default='2', help='X-axis tick mark value')", "totalsites * data01.shape[0] cofheight = categories[4] / totalsites * data01.shape[0] unbheight = categories[5]", "= topcof + cofheight # find the actual position of the numbers by", "calculate how long the color box should be for each by setting up", "ddpi, xlabel, heatmapTitle, sites): # initialize color levs = range(100) assert len(levs) %", "+ cofheight # find the actual position of the numbers by centering the", "type=int, default=300, prompt=True, show_default='True', help='Plot Width') @click.option('-c', '--color', metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot", "xlabel, ticks, dpi, colorbar, out): \"\"\" Creates YEP Style All Feature heatmap containing", "data: tmp = [(x.strip()) for x in rec.split('\\t')] sites = sites + 1", "a[:, col_delete_plus1]) / 2 a = np.delete(a, col_delete, axis=1) # random remove columns", "@click.option('-ph', '--height', metavar=\"<int>\", type=int, default=700, prompt=True, show_default='True', help='Plot Height') @click.option('-pw', '--width', metavar=\"<int>\", type=int,", "cols plus position # get deleted cols plus position (top +1; end -1)", "= [0, 0, 0, 0, 0, 0] sites = 0 # to calculate", "= catergoryCount[1] + 1 elif rankOrder <= 39999 and rankOrder >= 30000: dataGenes.append([3]", "np.delete(a, col_delete, axis=1) # random remove columns M, N = a.shape # compare", "rppos, stmpos, srgpos, cycpos, cofpos, unbpos)) # The default transform specifies that text", "\"-\" + ticks labels[mid] = \"0\" labels[len(labels) - 1] = ticks # display", "- 1) # put the info of deleted rows into the next rows", "color='black') # Draw a horizontal line through the midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2)", "= [\"white\", color] # generate heatmap plot_heatmap(data0, c, out_file, upper_lim, lower_lim, row_num, col_num,", "colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\", 900, 35, catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS)", "verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', rotation=90,", "header, start_col, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, generateColorbar): data = open(input_file, 'r')", "plt.figure(figsize=(col_num / 96, row_num / 96), dpi=96) # remove margins , # this", "Colorbar for the gene categories. \"\"\" click.echo('\\n' + '.' * 50) params =", "'.' * 50) params = {} openfile = open(threshold_file, 'r').readlines() for line in", "matplotlib doesn't support 0-255 format for colors) s = color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]),", "# get deleted rows plus position # get deleted rows plus position (top", "plus position (top +1; end -1) col_delete_plus1 = np.append( np.append(col_delete[0] + 1, col_delete_plus1),", "metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename') def cli(tagpileup_cdt, threshold_file, color, height, width, title,", "219999 and rankOrder >= 210000: dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5] + 1", "data0.shape[1]: data0 = rebin(data0, (data0.shape[0], col_num)) if generateColorbar == '1': dataGenes = rebin(dataGenes,", "labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel = \"{:,}\".format(sites) + \" sites\" plt.ylabel(ylabel, fontsize=14)", "load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color, header, start_col, height, width, ticks, dpi, xlabel, title,", "# this helps to maintain the ticks to be odd ax = plt.axes([0,", "if rankOrder <= 19999: dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0] + 1 elif", "color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c = [\"white\", color] # generate heatmap plot_heatmap(data0,", "'N levels must be even.' # select colors from color list my_cmap =", "matplotlib.colors as mcolors import matplotlib.pyplot as plt from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import", "plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold') # removing all the", "end -1) col_delete_plus1 = np.append( np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1) #", "== '1': rankOrder = int(rec.split(\"\\t\")[0]) if rankOrder <= 19999: dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0]", "plt.title(heatmapTitle, fontsize=18) # to increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2)", "4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the initial ticks locs, labels = plt.xticks() # remove", "ax]: item.patch.set_visible(False) # saving the file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300) def load_Data(input_file, out_file,", "generateColorbar == '1': dataGenes = rebin(dataGenes, (row_num, 50)) elif col_num < data0.shape[1]: data0", "fontsize=10, color='white', weight='bold') # Assigning the rotation based on minimum value if min(categories)", "the major tick locations locaters = col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the", "the rotation based on minimum value if min(categories) == categories[2]: if categories[2] !=", "vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot heatmap # little trick to create custom tick", "and rankOrder >= 20000: dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1] + 1 elif", "categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold') # Assigning the rotation based on", "into the next rows by mean a[row_delete_plus1, :] = ( a[row_delete, :] +", "color='black', rotation=90, weight='bold') # Assigning the rotation based on minimum value if min(categories)", "categories[4] / totalsites * data01.shape[0] unbheight = categories[5] / totalsites * data01.shape[0] #", "* len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0] + 1 elif rankOrder <= 29999 and rankOrder", "horizontalalignment='center', verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold') # Assigning the rotation based on minimum", "= rebin(data0, (data0.shape[0], col_num)) if generateColorbar == '1': dataGenes = rebin(dataGenes, (data0.shape[0], 50))", "a.shape # compare the heatmap matrix a_compress = a.reshape((m, int(M / m), n,", "col_delete, axis=1) # random remove columns M, N = a.shape # compare the", "= catergoryCount[2] + 1 elif rankOrder <= 49999 and rankOrder >= 40000: dataGenes.append([4]", ">= 40000: dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3] + 1 elif rankOrder <=", "next rows by mean a[row_delete_plus1, :] = ( a[row_delete, :] + a[row_delete_plus1, :])", "* data01.shape[0] cofheight = categories[4] / totalsites * data01.shape[0] unbheight = categories[5] /", "+ topunb) # positions for the values print(\"rp: {}, stm: {}, ess :", "categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13, color='black', weight='bold') else: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16,", "rows by mean a[row_delete_plus1, :] = ( a[row_delete, :] + a[row_delete_plus1, :]) /", "set color here # convert rgb to hex (since matplotlib doesn't support 0-255", "deleted cols plus position (top +1; end -1) col_delete_plus1 = np.append( np.append(col_delete[0] +", "plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel = \"{:,}\".format(sites) + \" sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18)", "& (300,100) etc] # calculate the major tick locations locaters = col_num //", "based on minimum value if min(categories) == categories[5]: if categories[5] != 0: plt.text(25,", "offset rppos = int(rpheight / 2) stmpos = int(stmheight / 2 + topstm)", "the rotation based on minimum value if min(categories) == categories[4]: if categories[4] !=", "\\n Parameters for the heatmap\") pprint.pprint(params) upper_lim = float(params['upper_threshold']) lower_lim = int(params['lower_threshold']) header", "= np.append( np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1) # put the info", "= plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto') # plot heatmap plt.xticks([])", "np.append( np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1) # put the info of", "# display the new ticks plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2, color='black')", "out_file, upper_lim, lower_lim, color, header, start_col, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, generateColorbar):", "in [fig, ax]: item.patch.set_visible(False) # saving the file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300) def", "verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation based on minimum value", "deleted rows with equal intervals row_delete = np.linspace(0, M - 1, num=row_delete_num, dtype=int)", "a[:, col_delete_plus1] = ( a[:, col_delete] + a[:, col_delete_plus1]) / 2 a =", "fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation based on minimum value if", "M - 1, num=row_delete_num, dtype=int) # sort the random selected deleted row ids", "matplotlib.pyplot as plt from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import numpy as np matplotlib.use('Agg')", "dpi=ddpi) def plot_colorbar(data01, c, out_file_name, row_num, col_num, categories): # initialize color levs =", "Assigning the rotation based on minimum value if min(categories) == categories[1]: if categories[1]", "interpolation='nearest', aspect='auto') # plot heatmap plt.xticks([]) plt.yticks([]) # to increase the width of", "new_shape row_delete_num = M % m col_delete_num = N % n np.random.seed(seed=0) if", "dpi=96) # remove margins , # this helps to maintain the ticks to", "linestyle='--', x=locs[mid], linewidth=2) print(\"\\n DEBUG INFO \\n locs : {} \\n length_locs :", "and TFIID catergoryCount = [0, 0, 0, 0, 0, 0] sites = 0", "pixel density') @click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0', help=\"Generate the gene colorbar", "how long the color box should be for each by setting up a", "# plot heatmap # little trick to create custom tick labels. # [", "fontsize=10, color='white', weight='bold') else: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') #", "# calculate how long the color box should be for each by setting", "the random selected deleted row ids row_delete = np.sort(row_delete) row_delete_plus1 = row_delete[1:-1] +", "upper_lim, lower_lim, color, header, start_col, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, generateColorbar): data", "lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites): # initialize color levs =", "up a ratio: (this site)/(total sites) = (height of unknown box)/(feature box height)", "the width for colorbar(50) dataGenes = rebin(dataGenes, (row_num, 50)) elif row_num < data0.shape[0]:", "numpy as np matplotlib.use('Agg') \"\"\" Program to Create a heatmap from tagPileUp tabular", "col_delete = np.sort(col_delete) col_delete_plus1 = col_delete[1:-1] + \\ 1 # get deleted cols", "for rec in data: tmp = [(x.strip()) for x in rec.split('\\t')] sites =", "help='Plot Height') @click.option('-pw', '--width', metavar=\"<int>\", type=int, default=300, prompt=True, show_default='True', help='Plot Width') @click.option('-c', '--color',", "arbitrary offset rppos = int(rpheight / 2) stmpos = int(stmheight / 2 +", "stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold') # Assigning the rotation based", "= '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c = [\"white\", color] # generate heatmap plot_heatmap(data0, c,", "import numpy as np matplotlib.use('Agg') \"\"\" Program to Create a heatmap from tagPileUp", "the info of deleted cols into the next cols by mean a[:, col_delete_plus1]", ": {}, trna : {}\".format( rppos, stmpos, srgpos, cycpos, cofpos, unbpos)) # The", "categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold') # removing all the borders and", "plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto') # plot heatmap plt.xticks([]) plt.yticks([]) # to increase the", "position of the numbers by centering the numbers in the colored boxes and", "= np.repeat(a, math.ceil(float(m) / M), axis=0) M, N = a.shape m, n =", "# Assigning the rotation based on minimum value if min(categories) == categories[3]: if", "categories[4] != 0: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25,", "frame for item in [fig, ax]: item.patch.set_visible(False) # saving the file plt.savefig(out_file_name, bbox_inches='tight',", "@click.option('-k', '--ticks', metavar=\"<string>\", default='2', prompt=True, show_default='2', help='X-axis tick mark value') @click.option('-d', '--dpi', metavar=\"<int>\",", "totalsites * data01.shape[0] # print \"cofheight: {}, unbheight : {}\".format(unbheight, cofheight) # now", "X-axis') @click.option('-k', '--ticks', metavar=\"<string>\", default='2', prompt=True, show_default='2', help='X-axis tick mark value') @click.option('-d', '--dpi',", "cofheight # find the actual position of the numbers by centering the numbers", "cofpos, unbpos)) # The default transform specifies that text is in data co-ordinates,", "categories): # initialize color levs = range(100) assert len(levs) % 2 == 0,", "n))).mean(3).mean(1) return np.array(a_compress) def plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim, row_num, col_num, ticks, ddpi,", ": {}\".format( rppos, stmpos, srgpos, cycpos, cofpos, unbpos)) # The default transform specifies", "rankOrder = int(rec.split(\"\\t\")[0]) if rankOrder <= 19999: dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0]", "prompt=True, show_default='True', help='Plot Width') @click.option('-c', '--color', metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color') @click.option('-t',", "int(N / n))).mean(3).mean(1) return np.array(a_compress) def plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim, row_num, col_num,", "data0 = rebin(data0, (data0.shape[0], col_num)) if generateColorbar == '1': dataGenes = rebin(dataGenes, (data0.shape[0],", "N - 1, num=col_delete_num, dtype=int) # sort the random selected deleted col ids", "[fig, ax]: item.patch.set_visible(False) # saving the file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300) def load_Data(input_file,", "stmpos = int(stmheight / 2 + topstm) srgpos = int(srgheight / 2 +", "1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto') # plot heatmap plt.xticks([]) plt.yticks([]) # to", "2 a = np.delete(a, row_delete, axis=0) # random remove rows if col_delete_num >", "math import pprint import click import matplotlib import matplotlib.colors as mcolors import matplotlib.pyplot", "'--dpi', metavar=\"<int>\", type=int, default=100, prompt=True, show_default='100', help='Plot pixel density') @click.option('-cb', '--colorbar', type=click.Choice(['0', '1'],", "long the color box should be for each by setting up a ratio:", "col_delete_num > 0: # select deleted cols with equal intervals col_delete = np.linspace(0,", "rec.split('\\t')] sites = sites + 1 if generateColorbar == '1': rankOrder = int(rec.split(\"\\t\")[0])", "heatmap containing genecategories. \\b Generates Colorbar for the gene categories. \"\"\" click.echo('\\n' +", "start_col, height, width, ticks, dpi, xlabel, title, colorbar) click.echo('\\n' + '.' * 50)", "categories[5]: if categories[5] != 0: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold')", "tagPileUp tabular file and contrast Threshold file. \"\"\" def rebin(a, new_shape): M, N", "> 0: # select deleted cols with equal intervals col_delete = np.linspace(0, N", "= int(cycheight / 2 + topcyc) cofpos = int(cofheight / 2 + topcof)", "store counts for RP, SAGA and TFIID catergoryCount = [0, 0, 0, 0,", "1 data0.append(tmp[start_col:]) data0 = np.array(data0, dtype=float) print(\"# sites in the heatmap\", sites) #", "format for colors) s = color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c =", "prompt=True, show_default=' ', help='Label under X-axis') @click.option('-k', '--ticks', metavar=\"<string>\", default='2', prompt=True, show_default='2', help='X-axis", "the random selected deleted col ids col_delete = np.sort(col_delete) col_delete_plus1 = col_delete[1:-1] +", "deleted cols into the next cols by mean a[:, col_delete_plus1] = ( a[:,", "file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\", type=int, default=700, prompt=True,", "levs = range(100) assert len(levs) % 2 == 0, 'N levels must be", "{}\".format(unbheight, cofheight) # now calculate the \"top\" location of each box, each top", "import (AutoMinorLocator, MultipleLocator) import numpy as np matplotlib.use('Agg') \"\"\" Program to Create a", "matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import numpy as np matplotlib.use('Agg') \"\"\" Program to Create", "+ 1 elif rankOrder <= 59999 and rankOrder >= 50000: dataGenes.append([5] * len(tmp[start_col:]))", "= catergoryCount[0] + 1 elif rankOrder <= 29999 and rankOrder >= 20000: dataGenes.append([2]", "else: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the", "+ topcof) unbpos = int(unbheight / 2 + topunb) # positions for the", "odd ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto')", "np.delete(a, row_delete, axis=0) # random remove rows if col_delete_num > 0: # select", "* data01.shape[0] stmheight = categories[1] / totalsites * data01.shape[0] srgheight = categories[2] /", "find the mid value and set it to zero, since ax is helping", "+ '.' * 50) params = {} openfile = open(threshold_file, 'r').readlines() for line", "the # image is compressed , the point are plotted based on datapoint", "2 a = np.delete(a, col_delete, axis=1) # random remove columns M, N =", "transform specifies that text is in data co-ordinates, that is even though the", "# Assigning the rotation based on minimum value if min(categories) == categories[0]: if", "M, N = a.shape m, n = new_shape if m >= M: #", "(row_num, col_num)) if generateColorbar == '1': # i have hard-coded the width for", "'T': data.readline() data0 = [] dataGenes = [] # to store colorbar data", "intervals row_delete = np.linspace(0, M - 1, num=row_delete_num, dtype=int) # sort the random", "hex (since matplotlib doesn't support 0-255 format for colors) s = color.split(\",\") color", "to make sure there are odd number of ticks. mid = int(len(labels) //", "rebin data0 (compresses the data using treeView compression algorithm) if row_num < data0.shape[0]", "of deleted rows into the next rows by mean a[row_delete_plus1, :] = (", "locs = np.delete(locs, 0) labels.pop() # find the mid value and set it", "for each by setting up a ratio: (this site)/(total sites) = (height of", "cofheight) # now calculate the \"top\" location of each box, each top should", "metavar=\"<int>\", type=int, default=100, prompt=True, show_default='100', help='Plot pixel density') @click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False),", "# saving the file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300) def load_Data(input_file, out_file, upper_lim, lower_lim,", "a[row_delete_plus1, :]) / 2 a = np.delete(a, row_delete, axis=0) # random remove rows", "topcof) unbpos = int(unbheight / 2 + topunb) # positions for the values", "row_num, col_num, categories): # initialize color levs = range(100) assert len(levs) % 2", "+ cycheight topunb = topcof + cofheight # find the actual position of", "the color bar if generateColorbar == '1': print(\"Creating the colobar\") mycolors = ['#ff2600',", "prompt=True, show_default=' ', help='Plot Title') @click.option('-xl', '--xlabel', metavar=\"<string>\", default=' ', prompt=True, show_default=' ',", "[] # deciding colors based on the catergory values. for i in range(0,", "initial ticks locs, labels = plt.xticks() # remove the first location to get", "data co-ordinates, that is even though the # image is compressed , the", "categories[3] != 0: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25,", "== '1': dataGenes = rebin(dataGenes, (data0.shape[0], 50)) # set color here # convert", "== '1': print(\"Creating the colobar\") mycolors = ['#ff2600', '#ffd54f', '#43a047', '#0096ff', '#9437ff', '#9e9e9e']", "click.echo('\\n' + '.' * 50) params = {} openfile = open(threshold_file, 'r').readlines() for", "= range(100) assert len(levs) % 2 == 0, 'N levels must be even.'", "# i have hard-coded the width for colorbar(50) dataGenes = rebin(dataGenes, (row_num, 50))", "== '1': dataGenes = rebin(dataGenes, (row_num, 50)) elif col_num < data0.shape[1]: data0 =", "deleted rows plus position (top +1; end -1) row_delete_plus1 = np.append( np.append(row_delete[0] +", "/ 96), dpi=300) # remove margins , # this helps to maintain the", "show_default='Heatmap.png', help='output filename') def cli(tagpileup_cdt, threshold_file, color, height, width, title, xlabel, ticks, dpi,", "2 == 0, 'N levels must be even.' # select colors from color", "color box should be for each by setting up a ratio: (this site)/(total", "figure plt.figure(figsize=(col_num / 96, row_num / 96), dpi=96) # remove margins , #", "mark value') @click.option('-d', '--dpi', metavar=\"<int>\", type=int, default=100, prompt=True, show_default='100', help='Plot pixel density') @click.option('-cb',", "39999 and rankOrder >= 30000: dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2] + 1", "40000: dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3] + 1 elif rankOrder <= 59999", "100 (cols - rows = 100), fails for (300,150) & (300,100) etc] #", "width, title, xlabel, ticks, dpi, colorbar, out): \"\"\" Creates YEP Style All Feature", ":] + a[row_delete_plus1, :]) / 2 a = np.delete(a, row_delete, axis=0) # random", "!= 0: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, unbpos,", "metavar=\"<string>\", default='2', prompt=True, show_default='2', help='X-axis tick mark value') @click.option('-d', '--dpi', metavar=\"<int>\", type=int, default=100,", "list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure fig", "ax.tick_params(which='major', length=10, width=2, color='black') ax.tick_params(which='minor', length=6, width=2, color='black') # Draw a horizontal line", "dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True,", "prompt=True, show_default='100', help='Plot pixel density') @click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0', help=\"Generate", "col_delete[-1] - 1) # put the info of deleted cols into the next", "rppos = int(rpheight / 2) stmpos = int(stmheight / 2 + topstm) srgpos", "with equal intervals col_delete = np.linspace(0, N - 1, num=col_delete_num, dtype=int) # sort", "interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot heatmap # little trick to create custom", "locaters = col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the initial ticks locs, labels", "facecolor=None, dpi=ddpi) def plot_colorbar(data01, c, out_file_name, row_num, col_num, categories): # initialize color levs", "pad_inches=0.05, facecolor=None, dpi=ddpi) def plot_colorbar(data01, c, out_file_name, row_num, col_num, categories): # initialize color", "topunb = topcof + cofheight # find the actual position of the numbers", "temp[1] print(\" \\n Parameters for the heatmap\") pprint.pprint(params) upper_lim = float(params['upper_threshold']) lower_lim =", "<= 19999: dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0] + 1 elif rankOrder <=", "for x in rec.split('\\t')] sites = sites + 1 if generateColorbar == '1':", "'--width', metavar=\"<int>\", type=int, default=300, prompt=True, show_default='True', help='Plot Width') @click.option('-c', '--color', metavar=\"<string>\", default='0,0,0', prompt=True,", "the first location to get proper heatmap tick position. locs = np.delete(locs, 0)", "/ m), n, int(N / n))).mean(3).mean(1) return np.array(a_compress) def plot_heatmap(data01, c, out_file_name, upper_lim,", "# Assigning the rotation based on minimum value if min(categories) == categories[4]: if", "the plot borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi) def plot_colorbar(data01, c,", "value if min(categories) == categories[1]: if categories[1] != 0: plt.text(25, stmpos, categories[1], horizontalalignment='center',", "to get proper heatmap tick position. locs = np.delete(locs, 0) labels.pop() # find", "plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites): #", "/ totalsites * data01.shape[0] # print \"cofheight: {}, unbheight : {}\".format(unbheight, cofheight) #", "<= 219999 and rankOrder >= 210000: dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5] +", "on minimum value if min(categories) == categories[1]: if categories[1] != 0: plt.text(25, stmpos,", "ratio: (this site)/(total sites) = (height of unknown box)/(feature box height) totalsites =", "\"{:,}\".format(sites) + \" sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18) # to increase the width", "MultipleLocator) import numpy as np matplotlib.use('Agg') \"\"\" Program to Create a heatmap from", "data = open(input_file, 'r') if header == 'T': data.readline() data0 = [] dataGenes", "50) params = {} openfile = open(threshold_file, 'r').readlines() for line in openfile: line", "Creates YEP Style All Feature heatmap containing genecategories. \\b Generates Colorbar for the", "that text is in data co-ordinates, that is even though the # image", "bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi) def plot_colorbar(data01, c, out_file_name, row_num, col_num, categories): # initialize", "(data0.shape[0], 50)) # set color here # convert rgb to hex (since matplotlib", "default=' ', prompt=True, show_default=' ', help='Plot Title') @click.option('-xl', '--xlabel', metavar=\"<string>\", default=' ', prompt=True,", "catergoryCount[5] = catergoryCount[5] + 1 data0.append(tmp[start_col:]) data0 = np.array(data0, dtype=float) print(\"# sites in", "= np.array(data0, dtype=float) print(\"# sites in the heatmap\", sites) # creating the np-array", "and col_num < data0.shape[1]: data0 = rebin(data0, (row_num, col_num)) if generateColorbar == '1':", "# find the mid value and set it to zero, since ax is", "from color list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize", "@click.option('-d', '--dpi', metavar=\"<int>\", type=int, default=100, prompt=True, show_default='100', help='Plot pixel density') @click.option('-cb', '--colorbar', type=click.Choice(['0',", "np.linspace(0, M - 1, num=row_delete_num, dtype=int) # sort the random selected deleted row", "params.keys(): params[temp[0]] = temp[1] print(\" \\n Parameters for the heatmap\") pprint.pprint(params) upper_lim =", "location of each box, each top should be the ending position of the", "help='Plot pixel density') @click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0', help=\"Generate the gene", "np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1) # put the info of deleted", "creating the np-array to plot the colorbar dataGenes = np.array(dataGenes, dtype=float) print(\"catergoryCount :", "a.shape m, n = new_shape if m >= M: # repeat rows in", "len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2] + 1 elif rankOrder <= 49999 and rankOrder >=", "first location to get proper heatmap tick position. locs = np.delete(locs, 0) labels.pop()", "there are odd number of ticks. mid = int(len(labels) // 2) labels[0] =", "name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure plt.figure(figsize=(col_num / 96, row_num /", "x in rec.split('\\t')] sites = sites + 1 if generateColorbar == '1': rankOrder", "openfile: line = line.strip() temp = line.split(\":\") if temp[0] not in params.keys(): params[temp[0]]", "data0 (compresses the data using treeView compression algorithm) if row_num < data0.shape[0] and", "a horizontal line through the midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2) print(\"\\n DEBUG INFO", "put the info of deleted cols into the next cols by mean a[:,", "+ srgheight topcof = topcyc + cycheight topunb = topcof + cofheight #", "we need to plot the color bar if generateColorbar == '1': print(\"Creating the", "math.ceil(float(m) / M), axis=0) M, N = a.shape m, n = new_shape row_delete_num", "file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300) def load_Data(input_file, out_file, upper_lim, lower_lim, color, header, start_col,", "plt.xticks() # remove the first location to get proper heatmap tick position. locs", "colorbar (0: No, 1: Yes)\") @click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename')", "plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cycpos, categories[3], horizontalalignment='center',", "categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation based on", "= data0.shape[1] # rebin data0 (compresses the data using treeView compression algorithm) if", "plt.figure(figsize=(col_num / 96, row_num / 96), dpi=300) # remove margins , # this", "np.sort(row_delete) row_delete_plus1 = row_delete[1:-1] + \\ 1 # get deleted rows plus position", "plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto') # plot heatmap plt.xticks([]) plt.yticks([])", "plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, unbpos, categories[5], horizontalalignment='center',", "Parameters for the heatmap\") pprint.pprint(params) upper_lim = float(params['upper_threshold']) lower_lim = int(params['lower_threshold']) header =", "unbpos)) # The default transform specifies that text is in data co-ordinates, that", "stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13, color='black', weight='bold') else: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center',", "col_delete_plus1] = ( a[:, col_delete] + a[:, col_delete_plus1]) / 2 a = np.delete(a,", "Width') @click.option('-c', '--color', metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color') @click.option('-t', '--title', metavar=\"<string>\", default='", "rebin(dataGenes, (row_num, 50)) elif row_num < data0.shape[0]: data0 = rebin(data0, (row_num, data0.shape[1])) if", "compression algorithm) if row_num < data0.shape[0] and col_num < data0.shape[1]: data0 = rebin(data0,", "= int(srgheight / 2 + topsrg) cycpos = int(cycheight / 2 + topcyc)", "n, int(N / n))).mean(3).mean(1) return np.array(a_compress) def plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim, row_num,", "and rankOrder >= 50000: dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4] + 1 elif", "position of the previous box topstm = rpheight topsrg = topstm + stmheight", "bar if generateColorbar == '1': print(\"Creating the colobar\") mycolors = ['#ff2600', '#ffd54f', '#43a047',", "show_default='2', help='X-axis tick mark value') @click.option('-d', '--dpi', metavar=\"<int>\", type=int, default=100, prompt=True, show_default='100', help='Plot", "np-array to plot the colorbar dataGenes = np.array(dataGenes, dtype=float) print(\"catergoryCount : {}\".format(catergoryCount)) if", "fig = plt.figure(figsize=(col_num / 96, row_num / 96), dpi=300) # remove margins ,", "ddpi, xlabel, heatmapTitle, generateColorbar): data = open(input_file, 'r') if header == 'T': data.readline()", "int(len(labels) // 2) labels[0] = \"-\" + ticks labels[mid] = \"0\" labels[len(labels) -", "= \"-\" + ticks labels[mid] = \"0\" labels[len(labels) - 1] = ticks #", "color] # generate heatmap plot_heatmap(data0, c, out_file, upper_lim, lower_lim, row_num, col_num, ticks, ddpi,", "the ending position of the previous box topstm = rpheight topsrg = topstm", "tmp = [(x.strip()) for x in rec.split('\\t')] sites = sites + 1 if", "dataGenes = [] # to store colorbar data # to store counts for", "fontsize=10, color='white', rotation=90, weight='bold') # removing all the borders and frame for item", "zero, since ax is helping to make sure there are odd number of", "ddpi, xlabel, heatmapTitle, sites) # checking if we need to plot the color", "< data0.shape[0] and col_num < data0.shape[1]: data0 = rebin(data0, (row_num, col_num)) if generateColorbar", "topcyc) cofpos = int(cofheight / 2 + topcof) unbpos = int(unbheight / 2", "1 # get deleted cols plus position # get deleted cols plus position", "== categories[2]: if categories[2] != 0: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white',", "of deleted cols into the next cols by mean a[:, col_delete_plus1] = (", "(row_num, 50)) elif col_num < data0.shape[1]: data0 = rebin(data0, (data0.shape[0], col_num)) if generateColorbar", "weight='bold') # Assigning the rotation based on minimum value if min(categories) == categories[4]:", "each top should be the ending position of the previous box topstm =", "data0.shape[0]: data0 = rebin(data0, (row_num, data0.shape[1])) if generateColorbar == '1': dataGenes = rebin(dataGenes,", "the borders and frame for item in [fig, ax]: item.patch.set_visible(False) # saving the", "must be even.' # select colors from color list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth',", "new ticks plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2, color='black') ax.tick_params(which='minor', length=6, width=2,", "# convert rgb to hex (since matplotlib doesn't support 0-255 format for colors)", ": {} \\n labels : {} \\n length_labels:{}\\n\".format( locs, len(locs), labels, len(labels))) plt.yticks([])", "if min(categories) == categories[4]: if categories[4] != 0: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center',", "should be for each by setting up a ratio: (this site)/(total sites) =", "tick position. locs = np.delete(locs, 0) labels.pop() # find the mid value and", "calculate the # of sites in the heatmap for rec in data: tmp", "generateColorbar == '1': print(\"Creating the colobar\") mycolors = ['#ff2600', '#ffd54f', '#43a047', '#0096ff', '#9437ff',", "\"top\" location of each box, each top should be the ending position of", "out_file_name, row_num, col_num, categories): # initialize color levs = range(100) assert len(levs) %", "c = [\"white\", color] # generate heatmap plot_heatmap(data0, c, out_file, upper_lim, lower_lim, row_num,", "trick to create custom tick labels. # [ only works if the difference", "need to plot the color bar if generateColorbar == '1': print(\"Creating the colobar\")", "c, out_file, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites) # checking", "{}, unb : {}, trna : {}\".format( rppos, stmpos, srgpos, cycpos, cofpos, unbpos))", "elif row_num < data0.shape[0]: data0 = rebin(data0, (row_num, data0.shape[1])) if generateColorbar == '1':", "of the numbers by centering the numbers in the colored boxes and applying", "numbers by centering the numbers in the colored boxes and applying an arbitrary", "int(unbheight / 2 + topunb) # positions for the values print(\"rp: {}, stm:", "fontsize=18) # to increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name,", "50)) # set color here # convert rgb to hex (since matplotlib doesn't", "dtype=float) print(\"catergoryCount : {}\".format(catergoryCount)) if row_num == -999: row_num = data0.shape[0] if col_num", "through the midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2) print(\"\\n DEBUG INFO \\n locs :", "min(categories) == categories[4]: if categories[4] != 0: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13,", "works if the difference between col and row is 100 (cols - rows", "weight='bold') # Assigning the rotation based on minimum value if min(categories) == categories[5]:", "the next rows by mean a[row_delete_plus1, :] = ( a[row_delete, :] + a[row_delete_plus1,", "fontsize=13, color='white', weight='bold') else: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold')", "width of the plot borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi) def", "\\n labels : {} \\n length_labels:{}\\n\".format( locs, len(locs), labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14)", "topcyc + cycheight topunb = topcof + cofheight # find the actual position", "catergoryCount[3] = catergoryCount[3] + 1 elif rankOrder <= 59999 and rankOrder >= 50000:", "# to calculate the # of sites in the heatmap for rec in", ":]) / 2 a = np.delete(a, row_delete, axis=0) # random remove rows if", "minimum value if min(categories) == categories[1]: if categories[1] != 0: plt.text(25, stmpos, categories[1],", "', prompt=True, show_default=' ', help='Plot Title') @click.option('-xl', '--xlabel', metavar=\"<string>\", default=' ', prompt=True, show_default='", "dpi=300) def load_Data(input_file, out_file, upper_lim, lower_lim, color, header, start_col, row_num, col_num, ticks, ddpi,", "the np-array to plot the colorbar dataGenes = np.array(dataGenes, dtype=float) print(\"catergoryCount : {}\".format(catergoryCount))", "== -999: row_num = data0.shape[0] if col_num == -999: col_num = data0.shape[1] #", "and row is 100 (cols - rows = 100), fails for (300,150) &", "is even though the # image is compressed , the point are plotted", "dataGenes = rebin(dataGenes, (row_num, 50)) elif col_num < data0.shape[1]: data0 = rebin(data0, (data0.shape[0],", "to plot the colorbar dataGenes = np.array(dataGenes, dtype=float) print(\"catergoryCount : {}\".format(catergoryCount)) if row_num", "if col_delete_num > 0: # select deleted cols with equal intervals col_delete =", "CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True,", "random remove rows if col_delete_num > 0: # select deleted cols with equal", "centering the numbers in the colored boxes and applying an arbitrary offset rppos", "increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) # calculate how long", "categories[3]: if categories[3] != 0: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold')", "rebin(dataGenes, (row_num, 50)) elif col_num < data0.shape[1]: data0 = rebin(data0, (data0.shape[0], col_num)) if", "!= 0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\", 900, 35, catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])", "show_default='0,0,0', help='Plot Color') @click.option('-t', '--title', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Plot Title')", "weight='bold') else: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold') # Assigning", "1) # put the info of deleted rows into the next rows by", "plus position # get deleted cols plus position (top +1; end -1) col_delete_plus1", "position. locs = np.delete(locs, 0) labels.pop() # find the mid value and set", "labels[len(labels) - 1] = ticks # display the new ticks plt.xticks(locs, labels, fontsize=14)", "fontsize=14) plt.title(heatmapTitle, fontsize=18) # to increase the width of the plot borders plt.setp(ax.spines.values(),", "Style All Feature heatmap containing genecategories. \\b Generates Colorbar for the gene categories.", "the heatmap\") pprint.pprint(params) upper_lim = float(params['upper_threshold']) lower_lim = int(params['lower_threshold']) header = params['header'] start_col", "labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2, color='black') ax.tick_params(which='minor', length=6, width=2, color='black') # Draw", "= row_delete[1:-1] + \\ 1 # get deleted rows plus position # get", "0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot heatmap #", "ticks plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2, color='black') ax.tick_params(which='minor', length=6, width=2, color='black')", "/ 96, row_num / 96), dpi=300) # remove margins , # this helps", "topstm) srgpos = int(srgheight / 2 + topsrg) cycpos = int(cycheight / 2", "a = np.delete(a, col_delete, axis=1) # random remove columns M, N = a.shape", "to calculate the # of sites in the heatmap for rec in data:", "96), dpi=96) # remove margins , # this helps to maintain the ticks", "catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file',", "metavar=\"<int>\", type=int, default=300, prompt=True, show_default='True', help='Plot Width') @click.option('-c', '--color', metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0',", "Assigning the rotation based on minimum value if min(categories) == categories[0]: if categories[0]", "# to increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) # calculate", "row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites) # checking if we need to", "== categories[0]: if categories[0] != 0: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white',", "heatmap # little trick to create custom tick labels. # [ only works", "dataGenes = rebin(dataGenes, (row_num, 50)) elif row_num < data0.shape[0]: data0 = rebin(data0, (row_num,", "totalsites = sum(categories) rpheight = categories[0] / totalsites * data01.shape[0] stmheight = categories[1]", "else: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the", "catergory values. for i in range(0, len(catergoryCount)): if catergoryCount[i] != 0: colors.append(mycolors[i]) plot_colorbar(dataGenes,", "width=2, color='black') # Draw a horizontal line through the midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid],", "{} \\n length_labels:{}\\n\".format( locs, len(locs), labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel = \"{:,}\".format(sites)", "ticks locs, labels = plt.xticks() # remove the first location to get proper", "INFO \\n locs : {} \\n length_locs : {} \\n labels : {}", "xlabel, heatmapTitle, sites) # checking if we need to plot the color bar", "for colors) s = color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c = [\"white\",", "gene colorbar (0: No, 1: Yes)\") @click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output", "'--xlabel', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Label under X-axis') @click.option('-k', '--ticks', metavar=\"<string>\",", "// 2) labels[0] = \"-\" + ticks labels[mid] = \"0\" labels[len(labels) - 1]", "- 1) # put the info of deleted cols into the next cols", "text is in data co-ordinates, that is even though the # image is", "topsrg) cycpos = int(cycheight / 2 + topcyc) cofpos = int(cofheight / 2", "labels.pop() # find the mid value and set it to zero, since ax", "elif rankOrder <= 29999 and rankOrder >= 20000: dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1] =", "1 elif rankOrder <= 219999 and rankOrder >= 210000: dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5]", "a = np.delete(a, row_delete, axis=0) # random remove rows if col_delete_num > 0:", "* len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5] + 1 data0.append(tmp[start_col:]) data0 = np.array(data0, dtype=float) print(\"#", "= ( a[:, col_delete] + a[:, col_delete_plus1]) / 2 a = np.delete(a, col_delete,", "min(categories) == categories[0]: if categories[0] != 0: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10,", "- 1, num=row_delete_num, dtype=int) # sort the random selected deleted row ids row_delete", "% 2 == 0, 'N levels must be even.' # select colors from", "c, out_file_name, row_num, col_num, categories): # initialize color levs = range(100) assert len(levs)", "dataGenes = rebin(dataGenes, (data0.shape[0], 50)) # set color here # convert rgb to", "len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1] + 1 elif rankOrder <= 39999 and rankOrder >=", "the # of sites in the heatmap for rec in data: tmp =", "+ 1 elif rankOrder <= 39999 and rankOrder >= 30000: dataGenes.append([3] * len(tmp[start_col:]))", "data0.append(tmp[start_col:]) data0 = np.array(data0, dtype=float) print(\"# sites in the heatmap\", sites) # creating", "cols plus position (top +1; end -1) col_delete_plus1 = np.append( np.append(col_delete[0] + 1,", "by centering the numbers in the colored boxes and applying an arbitrary offset", "srgheight = categories[2] / totalsites * data01.shape[0] cycheight = categories[3] / totalsites *", "repeat rows in data matrix a = np.repeat(a, math.ceil(float(m) / M), axis=0) M,", "counts for RP, SAGA and TFIID catergoryCount = [0, 0, 0, 0, 0,", "rpheight = categories[0] / totalsites * data01.shape[0] stmheight = categories[1] / totalsites *", "value if min(categories) == categories[3]: if categories[3] != 0: plt.text(25, cycpos, categories[3], horizontalalignment='center',", "the rotation based on minimum value if min(categories) == categories[0]: if categories[0] !=", "50)) elif col_num < data0.shape[1]: data0 = rebin(data0, (data0.shape[0], col_num)) if generateColorbar ==", "matrix a_compress = a.reshape((m, int(M / m), n, int(N / n))).mean(3).mean(1) return np.array(a_compress)", "col_num, categories): # initialize color levs = range(100) assert len(levs) % 2 ==", "2 + topunb) # positions for the values print(\"rp: {}, stm: {}, ess", "= categories[3] / totalsites * data01.shape[0] cofheight = categories[4] / totalsites * data01.shape[0]", "like a graph # Assigning the rotation based on minimum value if min(categories)", "matrix a = np.repeat(a, math.ceil(float(m) / M), axis=0) M, N = a.shape m,", "if generateColorbar == '1': dataGenes = rebin(dataGenes, (data0.shape[0], 50)) # set color here", "ax is helping to make sure there are odd number of ticks. mid", ":] = ( a[row_delete, :] + a[row_delete_plus1, :]) / 2 a = np.delete(a,", "# plot heatmap plt.xticks([]) plt.yticks([]) # to increase the width of the plot", "# select colors from color list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) -", "is compressed , the point are plotted based on datapoint in (x,y) like", "/ 2 + topunb) # positions for the values print(\"rp: {}, stm: {},", "'1'], case_sensitive=False), prompt=True, default='0', help=\"Generate the gene colorbar (0: No, 1: Yes)\") @click.option('-o',", "unknown box)/(feature box height) totalsites = sum(categories) rpheight = categories[0] / totalsites *", "of sites in the heatmap for rec in data: tmp = [(x.strip()) for", "rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') # Assigning the rotation based on", "(AutoMinorLocator, MultipleLocator) import numpy as np matplotlib.use('Agg') \"\"\" Program to Create a heatmap", "print(\"rp: {}, stm: {}, ess : {}, cof : {}, unb : {},", "% m col_delete_num = N % n np.random.seed(seed=0) if row_delete_num > 0: #", "1 elif rankOrder <= 59999 and rankOrder >= 50000: dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4]", "catergoryCount[i] != 0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\", 900, 35, catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h',", "metavar=\"<int>\", type=int, default=700, prompt=True, show_default='True', help='Plot Height') @click.option('-pw', '--width', metavar=\"<int>\", type=int, default=300, prompt=True,", "= catergoryCount[5] + 1 data0.append(tmp[start_col:]) data0 = np.array(data0, dtype=float) print(\"# sites in the", "categories[1]: if categories[1] != 0: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13, color='black', weight='bold')", "2 + topsrg) cycpos = int(cycheight / 2 + topcyc) cofpos = int(cofheight", "calculate the major tick locations locaters = col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get", "row_num < data0.shape[0]: data0 = rebin(data0, (row_num, data0.shape[1])) if generateColorbar == '1': dataGenes", "'--out', metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename') def cli(tagpileup_cdt, threshold_file, color, height, width,", "major tick locations locaters = col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the initial", "end -1) row_delete_plus1 = np.append( np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1) #", "= \"0\" labels[len(labels) - 1] = ticks # display the new ticks plt.xticks(locs,", "the \"top\" location of each box, each top should be the ending position", "= int(unbheight / 2 + topunb) # positions for the values print(\"rp: {},", "though the # image is compressed , the point are plotted based on", "__future__ import division import math import pprint import click import matplotlib import matplotlib.colors", "= catergoryCount[4] + 1 elif rankOrder <= 219999 and rankOrder >= 210000: dataGenes.append([6]", "boxes and applying an arbitrary offset rppos = int(rpheight / 2) stmpos =", "!= 0: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cofpos,", "position # get deleted rows plus position (top +1; end -1) row_delete_plus1 =", "remove the first location to get proper heatmap tick position. locs = np.delete(locs,", "value and set it to zero, since ax is helping to make sure", "= topsrg + srgheight topcof = topcyc + cycheight topunb = topcof +", "210000: dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5] + 1 data0.append(tmp[start_col:]) data0 = np.array(data0,", "new_shape if m >= M: # repeat rows in data matrix a =", "position (top +1; end -1) col_delete_plus1 = np.append( np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1]", "= line.strip() temp = line.split(\":\") if temp[0] not in params.keys(): params[temp[0]] = temp[1]", "ids col_delete = np.sort(col_delete) col_delete_plus1 = col_delete[1:-1] + \\ 1 # get deleted", "in range(0, len(catergoryCount)): if catergoryCount[i] != 0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\", 900, 35,", ">= 210000: dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5] + 1 data0.append(tmp[start_col:]) data0 =", "= data0.shape[0] if col_num == -999: col_num = data0.shape[1] # rebin data0 (compresses", "/ n))).mean(3).mean(1) return np.array(a_compress) def plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim, row_num, col_num, ticks,", "a ratio: (this site)/(total sites) = (height of unknown box)/(feature box height) totalsites", "line = line.strip() temp = line.split(\":\") if temp[0] not in params.keys(): params[temp[0]] =", "/ totalsites * data01.shape[0] stmheight = categories[1] / totalsites * data01.shape[0] srgheight =", "of unknown box)/(feature box height) totalsites = sum(categories) rpheight = categories[0] / totalsites", "+ 1 elif rankOrder <= 219999 and rankOrder >= 210000: dataGenes.append([6] * len(tmp[start_col:]))", "= 100), fails for (300,150) & (300,100) etc] # calculate the major tick", "@click.option('-t', '--title', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Plot Title') @click.option('-xl', '--xlabel', metavar=\"<string>\",", "borders plt.setp(ax.spines.values(), linewidth=2) # calculate how long the color box should be for", "/ totalsites * data01.shape[0] srgheight = categories[2] / totalsites * data01.shape[0] cycheight =", "N = a.shape m, n = new_shape if m >= M: # repeat", "default=' ', prompt=True, show_default=' ', help='Label under X-axis') @click.option('-k', '--ticks', metavar=\"<string>\", default='2', prompt=True,", "generateColorbar == '1': dataGenes = rebin(dataGenes, (data0.shape[0], 50)) # set color here #", "categories[5] != 0: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25,", "rankOrder >= 30000: dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2] + 1 elif rankOrder", "have hard-coded the width for colorbar(50) dataGenes = rebin(dataGenes, (row_num, 50)) elif row_num", "m, n = new_shape if m >= M: # repeat rows in data", "\\b Generates Colorbar for the gene categories. \"\"\" click.echo('\\n' + '.' * 50)", "if categories[2] != 0: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else:", "header, start_col, height, width, ticks, dpi, xlabel, title, colorbar) click.echo('\\n' + '.' *", "@click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\",", "= np.delete(locs, 0) labels.pop() # find the mid value and set it to", "== categories[3]: if categories[3] != 0: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white',", "verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90,", "/ totalsites * data01.shape[0] cycheight = categories[3] / totalsites * data01.shape[0] cofheight =", ">= M: # repeat rows in data matrix a = np.repeat(a, math.ceil(float(m) /", "using treeView compression algorithm) if row_num < data0.shape[0] and col_num < data0.shape[1]: data0", "= 0 # to calculate the # of sites in the heatmap for", ": {} \\n length_labels:{}\\n\".format( locs, len(locs), labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel =", "# set color here # convert rgb to hex (since matplotlib doesn't support", "initialize figure fig = plt.figure(figsize=(col_num / 96, row_num / 96), dpi=300) # remove", "* data01.shape[0] srgheight = categories[2] / totalsites * data01.shape[0] cycheight = categories[3] /", "== '1': # i have hard-coded the width for colorbar(50) dataGenes = rebin(dataGenes,", "rankOrder <= 29999 and rankOrder >= 20000: dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1]", "of each box, each top should be the ending position of the previous", "', help='Plot Title') @click.option('-xl', '--xlabel', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Label under", "+ 1 data0.append(tmp[start_col:]) data0 = np.array(data0, dtype=float) print(\"# sites in the heatmap\", sites)", "based on minimum value if min(categories) == categories[2]: if categories[2] != 0: plt.text(25,", "the difference between col and row is 100 (cols - rows = 100),", "image is compressed , the point are plotted based on datapoint in (x,y)", "/ 2 + topcyc) cofpos = int(cofheight / 2 + topcof) unbpos =", "if generateColorbar == '1': dataGenes = rebin(dataGenes, (row_num, 50)) elif col_num < data0.shape[1]:", "horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold') # removing all the borders and frame", "weight='bold') else: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning", "'r').readlines() for line in openfile: line = line.strip() temp = line.split(\":\") if temp[0]", "is 100 (cols - rows = 100), fails for (300,150) & (300,100) etc]", "since ax is helping to make sure there are odd number of ticks.", "color, header, start_col, height, width, ticks, dpi, xlabel, title, colorbar) click.echo('\\n' + '.'", "temp = line.split(\":\") if temp[0] not in params.keys(): params[temp[0]] = temp[1] print(\" \\n", "in data co-ordinates, that is even though the # image is compressed ,", "dtype=float) print(\"# sites in the heatmap\", sites) # creating the np-array to plot", "2 + topcyc) cofpos = int(cofheight / 2 + topcof) unbpos = int(unbheight", "equal intervals col_delete = np.linspace(0, N - 1, num=col_delete_num, dtype=int) # sort the", "num=col_delete_num, dtype=int) # sort the random selected deleted col ids col_delete = np.sort(col_delete)", "\"\"\" Program to Create a heatmap from tagPileUp tabular file and contrast Threshold", "data using treeView compression algorithm) if row_num < data0.shape[0] and col_num < data0.shape[1]:", "now calculate the \"top\" location of each box, each top should be the", "row_num == -999: row_num = data0.shape[0] if col_num == -999: col_num = data0.shape[1]", "< data0.shape[0]: data0 = rebin(data0, (row_num, data0.shape[1])) if generateColorbar == '1': dataGenes =", "totalsites * data01.shape[0] unbheight = categories[5] / totalsites * data01.shape[0] # print \"cofheight:", "if min(categories) == categories[2]: if categories[2] != 0: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center',", "minimum value if min(categories) == categories[3]: if categories[3] != 0: plt.text(25, cycpos, categories[3],", "axis=0) M, N = a.shape m, n = new_shape row_delete_num = M %", "random selected deleted col ids col_delete = np.sort(col_delete) col_delete_plus1 = col_delete[1:-1] + \\", "rec in data: tmp = [(x.strip()) for x in rec.split('\\t')] sites = sites", "plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation", "gene categories. \"\"\" click.echo('\\n' + '.' * 50) params = {} openfile =", "ids row_delete = np.sort(row_delete) row_delete_plus1 = row_delete[1:-1] + \\ 1 # get deleted", "+ \" sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18) # to increase the width of", "categories[5] / totalsites * data01.shape[0] # print \"cofheight: {}, unbheight : {}\".format(unbheight, cofheight)", "minimum value if min(categories) == categories[0]: if categories[0] != 0: plt.text(25, rppos, categories[0],", "should be the ending position of the previous box topstm = rpheight topsrg", "stmpos, srgpos, cycpos, cofpos, unbpos)) # The default transform specifies that text is", "verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90,", "co-ordinates, that is even though the # image is compressed , the point", "categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10,", "axis=1) # random remove columns M, N = a.shape # compare the heatmap", "select deleted rows with equal intervals row_delete = np.linspace(0, M - 1, num=row_delete_num,", "weight='bold') else: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') # Assigning the", "col_delete_plus1 = np.append( np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1) # put the", "= M % m col_delete_num = N % n np.random.seed(seed=0) if row_delete_num >", "horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') # Assigning the rotation based on minimum value", "unb : {}, trna : {}\".format( rppos, stmpos, srgpos, cycpos, cofpos, unbpos)) #", "ess : {}, cof : {}, unb : {}, trna : {}\".format( rppos,", "(cols - rows = 100), fails for (300,150) & (300,100) etc] # calculate", "plus position # get deleted rows plus position (top +1; end -1) row_delete_plus1", "setting up a ratio: (this site)/(total sites) = (height of unknown box)/(feature box", "out_file, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites) # checking if", "the colobar\") mycolors = ['#ff2600', '#ffd54f', '#43a047', '#0096ff', '#9437ff', '#9e9e9e'] colors = []", "if m >= M: # repeat rows in data matrix a = np.repeat(a,", "the gene categories. \"\"\" click.echo('\\n' + '.' * 50) params = {} openfile", "prompt=True, default='0', help=\"Generate the gene colorbar (0: No, 1: Yes)\") @click.option('-o', '--out', metavar=\"<string>\",", "graph # Assigning the rotation based on minimum value if min(categories) == categories[0]:", "data01.shape[0] cycheight = categories[3] / totalsites * data01.shape[0] cofheight = categories[4] / totalsites", "# get the initial ticks locs, labels = plt.xticks() # remove the first", "Assigning the rotation based on minimum value if min(categories) == categories[4]: if categories[4]", "else: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold') # Assigning the", "'#0096ff', '#9437ff', '#9e9e9e'] colors = [] # deciding colors based on the catergory", "<= 49999 and rankOrder >= 40000: dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3] +", "= a.shape # compare the heatmap matrix a_compress = a.reshape((m, int(M / m),", "default='2', prompt=True, show_default='2', help='X-axis tick mark value') @click.option('-d', '--dpi', metavar=\"<int>\", type=int, default=100, prompt=True,", "SAGA and TFIID catergoryCount = [0, 0, 0, 0, 0, 0] sites =", "Title') @click.option('-xl', '--xlabel', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Label under X-axis') @click.option('-k',", "# remove the first location to get proper heatmap tick position. locs =", "# to store counts for RP, SAGA and TFIID catergoryCount = [0, 0,", "the file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300) def load_Data(input_file, out_file, upper_lim, lower_lim, color, header,", "'--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0', help=\"Generate the gene colorbar (0: No, 1:", "select colors from color list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,)", "between col and row is 100 (cols - rows = 100), fails for", "context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph', '--height',", "+ \\ 1 # get deleted cols plus position # get deleted cols", "color='white', weight='bold') else: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') # Assigning", "select deleted cols with equal intervals col_delete = np.linspace(0, N - 1, num=col_delete_num,", "removing all the borders and frame for item in [fig, ax]: item.patch.set_visible(False) #", "even.' # select colors from color list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs)", "plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot heatmap # little trick to", "1 elif rankOrder <= 49999 and rankOrder >= 40000: dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3]", "range(0, len(catergoryCount)): if catergoryCount[i] != 0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\", 900, 35, catergoryCount)", "if min(categories) == categories[0]: if categories[0] != 0: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center',", "Assigning the rotation based on minimum value if min(categories) == categories[5]: if categories[5]", "a[row_delete_plus1, :] = ( a[row_delete, :] + a[row_delete_plus1, :]) / 2 a =", "np.linspace(0, N - 1, num=col_delete_num, dtype=int) # sort the random selected deleted col", "and contrast Threshold file. \"\"\" def rebin(a, new_shape): M, N = a.shape m,", "dtype=int) # sort the random selected deleted row ids row_delete = np.sort(row_delete) row_delete_plus1", "the info of deleted rows into the next rows by mean a[row_delete_plus1, :]", "rows in data matrix a = np.repeat(a, math.ceil(float(m) / M), axis=0) M, N", "96), dpi=300) # remove margins , # this helps to maintain the ticks", "plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') # Assigning the rotation based", "29999 and rankOrder >= 20000: dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1] + 1", "', prompt=True, show_default=' ', help='Label under X-axis') @click.option('-k', '--ticks', metavar=\"<string>\", default='2', prompt=True, show_default='2',", "if col_num == -999: col_num = data0.shape[1] # rebin data0 (compresses the data", "- 1] = ticks # display the new ticks plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2))", "threshold_file, color, height, width, title, xlabel, ticks, dpi, colorbar, out): \"\"\" Creates YEP", "dtype=int) # sort the random selected deleted col ids col_delete = np.sort(col_delete) col_delete_plus1", "n = new_shape if m >= M: # repeat rows in data matrix", "horizontal line through the midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2) print(\"\\n DEBUG INFO \\n", "lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites) # checking if we need", "row_delete[-1] - 1) # put the info of deleted rows into the next", "line.strip() temp = line.split(\":\") if temp[0] not in params.keys(): params[temp[0]] = temp[1] print(\"", "the numbers by centering the numbers in the colored boxes and applying an", "plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, srgpos, categories[2], horizontalalignment='center',", "params['header'] start_col = int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color, header, start_col, height, width,", "import matplotlib.pyplot as plt from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import numpy as np", "if row_num == -999: row_num = data0.shape[0] if col_num == -999: col_num =", "dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\", type=int, default=700, prompt=True, show_default='True', help='Plot Height') @click.option('-pw', '--width', metavar=\"<int>\",", "show_default='True', help='Plot Width') @click.option('-c', '--color', metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color') @click.option('-t', '--title',", "= int(params['lower_threshold']) header = params['header'] start_col = int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color,", "* data01.shape[0] # print \"cofheight: {}, unbheight : {}\".format(unbheight, cofheight) # now calculate", "midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2) print(\"\\n DEBUG INFO \\n locs : {} \\n", "if min(categories) == categories[3]: if categories[3] != 0: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center',", "+ 1, col_delete_plus1), col_delete[-1] - 1) # put the info of deleted cols", "= open(threshold_file, 'r').readlines() for line in openfile: line = line.strip() temp = line.split(\":\")", "store colorbar data # to store counts for RP, SAGA and TFIID catergoryCount", "type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\", type=int,", "/ 2) stmpos = int(stmheight / 2 + topstm) srgpos = int(srgheight /", "data01.shape[0] # print \"cofheight: {}, unbheight : {}\".format(unbheight, cofheight) # now calculate the", "+ stmheight topcyc = topsrg + srgheight topcof = topcyc + cycheight topunb", "fails for (300,150) & (300,100) etc] # calculate the major tick locations locaters", "show_default='100', help='Plot pixel density') @click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0', help=\"Generate the", "= topcyc + cycheight topunb = topcof + cofheight # find the actual", "resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\", type=int, default=700, prompt=True, show_default='True', help='Plot Height') @click.option('-pw',", "col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the initial ticks locs, labels = plt.xticks()", "int(cofheight / 2 + topcof) unbpos = int(unbheight / 2 + topunb) #", "based on the catergory values. for i in range(0, len(catergoryCount)): if catergoryCount[i] !=", "# find the actual position of the numbers by centering the numbers in", "col_num, ticks, ddpi, xlabel, heatmapTitle, sites): # initialize color levs = range(100) assert", "( a[:, col_delete] + a[:, col_delete_plus1]) / 2 a = np.delete(a, col_delete, axis=1)", "No, 1: Yes)\") @click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename') def cli(tagpileup_cdt,", "sum(categories) rpheight = categories[0] / totalsites * data01.shape[0] stmheight = categories[1] / totalsites", "(top +1; end -1) row_delete_plus1 = np.append( np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] -", "row_num / 96), dpi=96) # remove margins , # this helps to maintain", "\\ 1 # get deleted rows plus position # get deleted rows plus", "custom tick labels. # [ only works if the difference between col and", "show_default=' ', help='Label under X-axis') @click.option('-k', '--ticks', metavar=\"<string>\", default='2', prompt=True, show_default='2', help='X-axis tick", "minimum value if min(categories) == categories[4]: if categories[4] != 0: plt.text(25, cofpos, categories[4],", "colored boxes and applying an arbitrary offset rppos = int(rpheight / 2) stmpos", "helps to maintain the ticks to be odd ax = plt.axes([0, 0, 1,", "row is 100 (cols - rows = 100), fails for (300,150) & (300,100)", "data01.shape[0] cofheight = categories[4] / totalsites * data01.shape[0] unbheight = categories[5] / totalsites", "help='Plot Color') @click.option('-t', '--title', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Plot Title') @click.option('-xl',", "rows plus position (top +1; end -1) row_delete_plus1 = np.append( np.append(row_delete[0] + 1,", "ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto') #", "1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto') # plot heatmap plt.xticks([]) plt.yticks([]) # to increase", "be odd ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto') #", "int(srgheight / 2 + topsrg) cycpos = int(cycheight / 2 + topcyc) cofpos", "in openfile: line = line.strip() temp = line.split(\":\") if temp[0] not in params.keys():", "fontsize=13, color='black', weight='bold') else: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold')", "\\n length_locs : {} \\n labels : {} \\n length_labels:{}\\n\".format( locs, len(locs), labels,", "0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto') # plot heatmap plt.xticks([]) plt.yticks([]) #", "cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation based", "the color box should be for each by setting up a ratio: (this", "(height of unknown box)/(feature box height) totalsites = sum(categories) rpheight = categories[0] /", "0: # select deleted cols with equal intervals col_delete = np.linspace(0, N -", "the data using treeView compression algorithm) if row_num < data0.shape[0] and col_num <", "if categories[3] != 0: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else:", "[ only works if the difference between col and row is 100 (cols", "row_delete_plus1 = row_delete[1:-1] + \\ 1 # get deleted rows plus position #", "verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold') # Assigning the rotation based on minimum value", "cofheight = categories[4] / totalsites * data01.shape[0] unbheight = categories[5] / totalsites *", "rotation=90, weight='bold') # removing all the borders and frame for item in [fig,", "weight='bold') # Assigning the rotation based on minimum value if min(categories) == categories[3]:", "0: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, unbpos, categories[5],", "rows with equal intervals row_delete = np.linspace(0, M - 1, num=row_delete_num, dtype=int) #", "and rankOrder >= 30000: dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2] + 1 elif", "rankOrder >= 50000: dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4] + 1 elif rankOrder", "# get deleted cols plus position # get deleted cols plus position (top", "np.repeat(a, math.ceil(float(m) / M), axis=0) M, N = a.shape m, n = new_shape", "srgpos = int(srgheight / 2 + topsrg) cycpos = int(cycheight / 2 +", "if catergoryCount[i] != 0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\", 900, 35, catergoryCount) CONTEXT_SETTINGS =", "i have hard-coded the width for colorbar(50) dataGenes = rebin(dataGenes, (row_num, 50)) elif", "little trick to create custom tick labels. # [ only works if the", "colors = [] # deciding colors based on the catergory values. for i", "the point are plotted based on datapoint in (x,y) like a graph #", "stmheight = categories[1] / totalsites * data01.shape[0] srgheight = categories[2] / totalsites *", "categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16,", "height, width, title, xlabel, ticks, dpi, colorbar, out): \"\"\" Creates YEP Style All", "col_delete_plus1]) / 2 a = np.delete(a, col_delete, axis=1) # random remove columns M,", "display the new ticks plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2, color='black') ax.tick_params(which='minor',", "# repeat rows in data matrix a = np.repeat(a, math.ceil(float(m) / M), axis=0)", "deleted row ids row_delete = np.sort(row_delete) row_delete_plus1 = row_delete[1:-1] + \\ 1 #", "# Assigning the rotation based on minimum value if min(categories) == categories[5]: if", "sort the random selected deleted col ids col_delete = np.sort(col_delete) col_delete_plus1 = col_delete[1:-1]", "ax.tick_params(which='minor', length=6, width=2, color='black') # Draw a horizontal line through the midpoint. plt.axvline(color='black',", "for i in range(0, len(catergoryCount)): if catergoryCount[i] != 0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors, \"colorbar.png\",", "c, out_file_name, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites): # initialize", "@click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph',", "'1': print(\"Creating the colobar\") mycolors = ['#ff2600', '#ffd54f', '#43a047', '#0096ff', '#9437ff', '#9e9e9e'] colors", "treeView compression algorithm) if row_num < data0.shape[0] and col_num < data0.shape[1]: data0 =", "plot borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi) def plot_colorbar(data01, c, out_file_name,", "50000: dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4] + 1 elif rankOrder <= 219999", "locs, labels = plt.xticks() # remove the first location to get proper heatmap", "heatmap plt.xticks([]) plt.yticks([]) # to increase the width of the plot borders plt.setp(ax.spines.values(),", "remove margins , # this helps to maintain the ticks to be odd", "genecategories. \\b Generates Colorbar for the gene categories. \"\"\" click.echo('\\n' + '.' *", "= col_delete[1:-1] + \\ 1 # get deleted cols plus position # get", "aspect='auto') # plot heatmap # little trick to create custom tick labels. #", "( a[row_delete, :] + a[row_delete_plus1, :]) / 2 a = np.delete(a, row_delete, axis=0)", "line in openfile: line = line.strip() temp = line.split(\":\") if temp[0] not in", "color bar if generateColorbar == '1': print(\"Creating the colobar\") mycolors = ['#ff2600', '#ffd54f',", "1: Yes)\") @click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename') def cli(tagpileup_cdt, threshold_file,", "on minimum value if min(categories) == categories[5]: if categories[5] != 0: plt.text(25, unbpos,", "start_col, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, generateColorbar): data = open(input_file, 'r') if", "int(s[1]), int(s[2])) c = [\"white\", color] # generate heatmap plot_heatmap(data0, c, out_file, upper_lim,", "= dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.argument('threshold-file', type=click.Path(exists=True, resolve_path=True,", "dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0] + 1 elif rankOrder <= 29999 and", "show_default='True', help='Plot Height') @click.option('-pw', '--width', metavar=\"<int>\", type=int, default=300, prompt=True, show_default='True', help='Plot Width') @click.option('-c',", "catergoryCount[3] + 1 elif rankOrder <= 59999 and rankOrder >= 50000: dataGenes.append([5] *", "'1': # i have hard-coded the width for colorbar(50) dataGenes = rebin(dataGenes, (row_num,", "Assigning the rotation based on minimum value if min(categories) == categories[3]: if categories[3]", "/ totalsites * data01.shape[0] cofheight = categories[4] / totalsites * data01.shape[0] unbheight =", "categories[2]: if categories[2] != 0: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold')", "color='white', rotation=90, weight='bold') # Assigning the rotation based on minimum value if min(categories)", "else: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') # Assigning the rotation", "verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold')", "load_Data(input_file, out_file, upper_lim, lower_lim, color, header, start_col, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle,", "openfile = open(threshold_file, 'r').readlines() for line in openfile: line = line.strip() temp =", "the catergory values. for i in range(0, len(catergoryCount)): if catergoryCount[i] != 0: colors.append(mycolors[i])", "+ topcyc) cofpos = int(cofheight / 2 + topcof) unbpos = int(unbheight /", "default=700, prompt=True, show_default='True', help='Plot Height') @click.option('-pw', '--width', metavar=\"<int>\", type=int, default=300, prompt=True, show_default='True', help='Plot", "out): \"\"\" Creates YEP Style All Feature heatmap containing genecategories. \\b Generates Colorbar", "specifies that text is in data co-ordinates, that is even though the #", "ticks, ddpi, xlabel, heatmapTitle, sites): # initialize color levs = range(100) assert len(levs)", "put the info of deleted rows into the next rows by mean a[row_delete_plus1,", "by mean a[:, col_delete_plus1] = ( a[:, col_delete] + a[:, col_delete_plus1]) / 2", "= a.reshape((m, int(M / m), n, int(N / n))).mean(3).mean(1) return np.array(a_compress) def plot_heatmap(data01,", "length_locs : {} \\n labels : {} \\n length_labels:{}\\n\".format( locs, len(locs), labels, len(labels)))", "matplotlib import matplotlib.colors as mcolors import matplotlib.pyplot as plt from matplotlib.ticker import (AutoMinorLocator,", "@click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0', help=\"Generate the gene colorbar (0: No,", "colorbar(50) dataGenes = rebin(dataGenes, (row_num, 50)) elif row_num < data0.shape[0]: data0 = rebin(data0,", "to increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05,", "color='white', weight='bold') else: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') #", "the heatmap\", sites) # creating the np-array to plot the colorbar dataGenes =", "ticks, ddpi, xlabel, heatmapTitle, generateColorbar): data = open(input_file, 'r') if header == 'T':", "data0.shape[1] # rebin data0 (compresses the data using treeView compression algorithm) if row_num", ": {}\".format(unbheight, cofheight) # now calculate the \"top\" location of each box, each", ">= 20000: dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1] + 1 elif rankOrder <=", "labels[0] = \"-\" + ticks labels[mid] = \"0\" labels[len(labels) - 1] = ticks", "density') @click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0', help=\"Generate the gene colorbar (0:", "unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold') # removing all the borders", "return np.array(a_compress) def plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel,", "# sort the random selected deleted row ids row_delete = np.sort(row_delete) row_delete_plus1 =", "to maintain the ticks to be odd ax = plt.axes([0, 0, 1, 1])", "color, header, start_col, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, generateColorbar): data = open(input_file,", "* data01.shape[0] unbheight = categories[5] / totalsites * data01.shape[0] # print \"cofheight: {},", "type=int, default=100, prompt=True, show_default='100', help='Plot pixel density') @click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True,", "start_col = int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color, header, start_col, height, width, ticks,", "catergoryCount[2] = catergoryCount[2] + 1 elif rankOrder <= 49999 and rankOrder >= 40000:", "only works if the difference between col and row is 100 (cols -", "= int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim, lower_lim, color, header, start_col, height, width, ticks, dpi,", "DEBUG INFO \\n locs : {} \\n length_locs : {} \\n labels :", "= [] dataGenes = [] # to store colorbar data # to store", "0) labels.pop() # find the mid value and set it to zero, since", "plt.xlabel(xlabel, fontsize=14) ylabel = \"{:,}\".format(sites) + \" sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18) #", "box, each top should be the ending position of the previous box topstm", "print(\"catergoryCount : {}\".format(catergoryCount)) if row_num == -999: row_num = data0.shape[0] if col_num ==", "data matrix a = np.repeat(a, math.ceil(float(m) / M), axis=0) M, N = a.shape", "tabular file and contrast Threshold file. \"\"\" def rebin(a, new_shape): M, N =", "M, N = a.shape m, n = new_shape row_delete_num = M % m", "sort the random selected deleted row ids row_delete = np.sort(row_delete) row_delete_plus1 = row_delete[1:-1]", "-1) row_delete_plus1 = np.append( np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1) # put", "help='X-axis tick mark value') @click.option('-d', '--dpi', metavar=\"<int>\", type=int, default=100, prompt=True, show_default='100', help='Plot pixel", "cols into the next cols by mean a[:, col_delete_plus1] = ( a[:, col_delete]", "upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites): # initialize color levs", "categories. \"\"\" click.echo('\\n' + '.' * 50) params = {} openfile = open(threshold_file,", "if header == 'T': data.readline() data0 = [] dataGenes = [] # to", "import matplotlib import matplotlib.colors as mcolors import matplotlib.pyplot as plt from matplotlib.ticker import", "selected deleted col ids col_delete = np.sort(col_delete) col_delete_plus1 = col_delete[1:-1] + \\ 1", "+1; end -1) col_delete_plus1 = np.append( np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1)", "M), axis=0) M, N = a.shape m, n = new_shape row_delete_num = M", "unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center',", "1 elif rankOrder <= 39999 and rankOrder >= 30000: dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2]", "on minimum value if min(categories) == categories[4]: if categories[4] != 0: plt.text(25, cofpos,", "type=int, default=700, prompt=True, show_default='True', help='Plot Height') @click.option('-pw', '--width', metavar=\"<int>\", type=int, default=300, prompt=True, show_default='True',", "pprint.pprint(params) upper_lim = float(params['upper_threshold']) lower_lim = int(params['lower_threshold']) header = params['header'] start_col = int(params['start_col'])", "col_delete_num = N % n np.random.seed(seed=0) if row_delete_num > 0: # select deleted", "{}, cof : {}, unb : {}, trna : {}\".format( rppos, stmpos, srgpos,", "margins , # this helps to maintain the ticks to be odd ax", "categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10,", "# [ only works if the difference between col and row is 100", "convert rgb to hex (since matplotlib doesn't support 0-255 format for colors) s", "def rebin(a, new_shape): M, N = a.shape m, n = new_shape if m", "an arbitrary offset rppos = int(rpheight / 2) stmpos = int(stmheight / 2", "fontsize=10, color='white', weight='bold') else: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold')", "elif rankOrder <= 59999 and rankOrder >= 50000: dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4] =", "if we need to plot the color bar if generateColorbar == '1': print(\"Creating", "49999 and rankOrder >= 40000: dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3] + 1", "sites) # checking if we need to plot the color bar if generateColorbar", "(x,y) like a graph # Assigning the rotation based on minimum value if", "ticks labels[mid] = \"0\" labels[len(labels) - 1] = ticks # display the new", "row_delete[1:-1] + \\ 1 # get deleted rows plus position # get deleted", "if min(categories) == categories[1]: if categories[1] != 0: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center',", "weight='bold') else: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold') # removing", "cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center',", "= rebin(dataGenes, (row_num, 50)) elif col_num < data0.shape[1]: data0 = rebin(data0, (data0.shape[0], col_num))", "M, N = a.shape # compare the heatmap matrix a_compress = a.reshape((m, int(M", "the midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2) print(\"\\n DEBUG INFO \\n locs : {}", "\"\"\" click.echo('\\n' + '.' * 50) params = {} openfile = open(threshold_file, 'r').readlines()", "{} \\n length_locs : {} \\n labels : {} \\n length_labels:{}\\n\".format( locs, len(locs),", "plus position (top +1; end -1) row_delete_plus1 = np.append( np.append(row_delete[0] + 1, row_delete_plus1),", "'1': rankOrder = int(rec.split(\"\\t\")[0]) if rankOrder <= 19999: dataGenes.append([1] * len(tmp[start_col:])) catergoryCount[0] =", "as mcolors import matplotlib.pyplot as plt from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import numpy", "All Feature heatmap containing genecategories. \\b Generates Colorbar for the gene categories. \"\"\"", "in the heatmap for rec in data: tmp = [(x.strip()) for x in", "help='Plot Title') @click.option('-xl', '--xlabel', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Label under X-axis')", "default=300, prompt=True, show_default='True', help='Plot Width') @click.option('-c', '--color', metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color')", "plot_colorbar(data01, c, out_file_name, row_num, col_num, categories): # initialize color levs = range(100) assert", "data0 = [] dataGenes = [] # to store colorbar data # to", "plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2, color='black') ax.tick_params(which='minor', length=6, width=2, color='black') #", "== 'T': data.readline() data0 = [] dataGenes = [] # to store colorbar", "if generateColorbar == '1': print(\"Creating the colobar\") mycolors = ['#ff2600', '#ffd54f', '#43a047', '#0096ff',", "> 0: # select deleted rows with equal intervals row_delete = np.linspace(0, M", "labels. # [ only works if the difference between col and row is", "\"\"\" def rebin(a, new_shape): M, N = a.shape m, n = new_shape if", "data0 = rebin(data0, (row_num, col_num)) if generateColorbar == '1': # i have hard-coded", "'--height', metavar=\"<int>\", type=int, default=700, prompt=True, show_default='True', help='Plot Height') @click.option('-pw', '--width', metavar=\"<int>\", type=int, default=300,", "0-255 format for colors) s = color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c", "verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90,", "* len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1] + 1 elif rankOrder <= 39999 and rankOrder", "compressed , the point are plotted based on datapoint in (x,y) like a", "== categories[1]: if categories[1] != 0: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13, color='black',", "locs, len(locs), labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel = \"{:,}\".format(sites) + \" sites\"", "print(\"Creating the colobar\") mycolors = ['#ff2600', '#ffd54f', '#43a047', '#0096ff', '#9437ff', '#9e9e9e'] colors =", "rotation based on minimum value if min(categories) == categories[5]: if categories[5] != 0:", "0, 'N levels must be even.' # select colors from color list my_cmap", "row_delete_plus1 = np.append( np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1) # put the", "float(params['upper_threshold']) lower_lim = int(params['lower_threshold']) header = params['header'] start_col = int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim,", "on minimum value if min(categories) == categories[2]: if categories[2] != 0: plt.text(25, srgpos,", "heatmap plot_heatmap(data0, c, out_file, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites)", "2 + topcof) unbpos = int(unbheight / 2 + topunb) # positions for", "heatmap for rec in data: tmp = [(x.strip()) for x in rec.split('\\t')] sites", "'#ffd54f', '#43a047', '#0096ff', '#9437ff', '#9e9e9e'] colors = [] # deciding colors based on", "= (height of unknown box)/(feature box height) totalsites = sum(categories) rpheight = categories[0]", "stmheight topcyc = topsrg + srgheight topcof = topcyc + cycheight topunb =", "cycpos = int(cycheight / 2 + topcyc) cofpos = int(cofheight / 2 +", "based on datapoint in (x,y) like a graph # Assigning the rotation based", "plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300) def load_Data(input_file, out_file, upper_lim, lower_lim, color, header, start_col, row_num,", "data0.shape[1]: data0 = rebin(data0, (row_num, col_num)) if generateColorbar == '1': # i have", "mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure plt.figure(figsize=(col_num / 96, row_num", "# checking if we need to plot the color bar if generateColorbar ==", "file and contrast Threshold file. \"\"\" def rebin(a, new_shape): M, N = a.shape", "0: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25, rppos, categories[0],", "the initial ticks locs, labels = plt.xticks() # remove the first location to", "= ( a[row_delete, :] + a[row_delete_plus1, :]) / 2 a = np.delete(a, row_delete,", "by setting up a ratio: (this site)/(total sites) = (height of unknown box)/(feature", "[\"white\", color] # generate heatmap plot_heatmap(data0, c, out_file, upper_lim, lower_lim, row_num, col_num, ticks,", "the actual position of the numbers by centering the numbers in the colored", ": {}, cof : {}, unb : {}, trna : {}\".format( rppos, stmpos,", "int(s[2])) c = [\"white\", color] # generate heatmap plot_heatmap(data0, c, out_file, upper_lim, lower_lim,", "increase the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None,", "mycolors = ['#ff2600', '#ffd54f', '#43a047', '#0096ff', '#9437ff', '#9e9e9e'] colors = [] # deciding", "of ticks. mid = int(len(labels) // 2) labels[0] = \"-\" + ticks labels[mid]", "color='black') ax.tick_params(which='minor', length=6, width=2, color='black') # Draw a horizontal line through the midpoint.", "/ 2 a = np.delete(a, col_delete, axis=1) # random remove columns M, N", "lower_lim = int(params['lower_threshold']) header = params['header'] start_col = int(params['start_col']) load_Data(tagpileup_cdt, out, upper_lim, lower_lim,", "colobar\") mycolors = ['#ff2600', '#ffd54f', '#43a047', '#0096ff', '#9437ff', '#9e9e9e'] colors = [] #", "59999 and rankOrder >= 50000: dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4] + 1", "tick locations locaters = col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the initial ticks", "/ 2 + topstm) srgpos = int(srgheight / 2 + topsrg) cycpos =", "random remove columns M, N = a.shape # compare the heatmap matrix a_compress", "# put the info of deleted rows into the next rows by mean", "topcyc = topsrg + srgheight topcof = topcyc + cycheight topunb = topcof", "/ 96, row_num / 96), dpi=96) # remove margins , # this helps", "def plot_colorbar(data01, c, out_file_name, row_num, col_num, categories): # initialize color levs = range(100)", "verticalalignment='center', fontsize=13, color='black', weight='bold') else: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16, color='black', rotation=90,", "horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white',", "is helping to make sure there are odd number of ticks. mid =", "\" sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18) # to increase the width of the", "= mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure fig = plt.figure(figsize=(col_num", "1, col_delete_plus1), col_delete[-1] - 1) # put the info of deleted cols into", "even though the # image is compressed , the point are plotted based", "case_sensitive=False), prompt=True, default='0', help=\"Generate the gene colorbar (0: No, 1: Yes)\") @click.option('-o', '--out',", "next cols by mean a[:, col_delete_plus1] = ( a[:, col_delete] + a[:, col_delete_plus1])", "{}, stm: {}, ess : {}, cof : {}, unb : {}, trna", "# to store colorbar data # to store counts for RP, SAGA and", "the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) # calculate how long the", "!= 0: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13, color='black', weight='bold') else: plt.text(25, stmpos,", "[(x.strip()) for x in rec.split('\\t')] sites = sites + 1 if generateColorbar ==", "== categories[5]: if categories[5] != 0: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white',", "header == 'T': data.readline() data0 = [] dataGenes = [] # to store", "levels must be even.' # select colors from color list my_cmap = mcolors.LinearSegmentedColormap.from_list(", "len(levs) % 2 == 0, 'N levels must be even.' # select colors", "containing genecategories. \\b Generates Colorbar for the gene categories. \"\"\" click.echo('\\n' + '.'", "that is even though the # image is compressed , the point are", "actual position of the numbers by centering the numbers in the colored boxes", "0: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, srgpos, categories[2],", "params = {} openfile = open(threshold_file, 'r').readlines() for line in openfile: line =", "< data0.shape[1]: data0 = rebin(data0, (row_num, col_num)) if generateColorbar == '1': # i", "x=locs[mid], linewidth=2) print(\"\\n DEBUG INFO \\n locs : {} \\n length_locs : {}", "Feature heatmap containing genecategories. \\b Generates Colorbar for the gene categories. \"\"\" click.echo('\\n'", "heatmap matrix a_compress = a.reshape((m, int(M / m), n, int(N / n))).mean(3).mean(1) return", "stm: {}, ess : {}, cof : {}, unb : {}, trna :", "col_num < data0.shape[1]: data0 = rebin(data0, (row_num, col_num)) if generateColorbar == '1': #", "= categories[4] / totalsites * data01.shape[0] unbheight = categories[5] / totalsites * data01.shape[0]", "plotted based on datapoint in (x,y) like a graph # Assigning the rotation", "row_delete_num = M % m col_delete_num = N % n np.random.seed(seed=0) if row_delete_num", "for the gene categories. \"\"\" click.echo('\\n' + '.' * 50) params = {}", "mid = int(len(labels) // 2) labels[0] = \"-\" + ticks labels[mid] = \"0\"", "len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5] + 1 data0.append(tmp[start_col:]) data0 = np.array(data0, dtype=float) print(\"# sites", "import math import pprint import click import matplotlib import matplotlib.colors as mcolors import", "get deleted rows plus position (top +1; end -1) row_delete_plus1 = np.append( np.append(row_delete[0]", "catergoryCount[4] = catergoryCount[4] + 1 elif rankOrder <= 219999 and rankOrder >= 210000:", "num=row_delete_num, dtype=int) # sort the random selected deleted row ids row_delete = np.sort(row_delete)", "axis=0) # random remove rows if col_delete_num > 0: # select deleted cols", "if row_delete_num > 0: # select deleted rows with equal intervals row_delete =", "on minimum value if min(categories) == categories[3]: if categories[3] != 0: plt.text(25, cycpos,", "rankOrder >= 210000: dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5] + 1 data0.append(tmp[start_col:]) data0", "the width of the plot borders plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi)", "cycheight topunb = topcof + cofheight # find the actual position of the", "= catergoryCount[3] + 1 elif rankOrder <= 59999 and rankOrder >= 50000: dataGenes.append([5]", ": {}\".format(catergoryCount)) if row_num == -999: row_num = data0.shape[0] if col_num == -999:", "in params.keys(): params[temp[0]] = temp[1] print(\" \\n Parameters for the heatmap\") pprint.pprint(params) upper_lim", "horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation based on minimum", "np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1) # put the info of deleted", "if categories[0] != 0: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else:", "Color') @click.option('-t', '--title', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Plot Title') @click.option('-xl', '--xlabel',", ", # this helps to maintain the ticks to be odd ax =", "srgheight topcof = topcyc + cycheight topunb = topcof + cofheight # find", "plot_colorbar(dataGenes, colors, \"colorbar.png\", 900, 35, catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt',", "sites): # initialize color levs = range(100) assert len(levs) % 2 == 0,", "(this site)/(total sites) = (height of unknown box)/(feature box height) totalsites = sum(categories)", "length=10, width=2, color='black') ax.tick_params(which='minor', length=6, width=2, color='black') # Draw a horizontal line through", "type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0', help=\"Generate the gene colorbar (0: No, 1: Yes)\")", "plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18) # to increase the width of the plot borders", "generate heatmap plot_heatmap(data0, c, out_file, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle,", "if min(categories) == categories[5]: if categories[5] != 0: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center',", "if the difference between col and row is 100 (cols - rows =", "[] dataGenes = [] # to store colorbar data # to store counts", "{} \\n labels : {} \\n length_labels:{}\\n\".format( locs, len(locs), labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel,", "linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi) def plot_colorbar(data01, c, out_file_name, row_num, col_num, categories):", "here # convert rgb to hex (since matplotlib doesn't support 0-255 format for", "Create a heatmap from tagPileUp tabular file and contrast Threshold file. \"\"\" def", "plt.setp(ax.spines.values(), linewidth=2) plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi) def plot_colorbar(data01, c, out_file_name, row_num, col_num,", "row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites): # initialize color levs = range(100)", "= ['#ff2600', '#ffd54f', '#43a047', '#0096ff', '#9437ff', '#9e9e9e'] colors = [] # deciding colors", "in (x,y) like a graph # Assigning the rotation based on minimum value", "cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation based", "+ topsrg) cycpos = int(cycheight / 2 + topcyc) cofpos = int(cofheight /", "topsrg + srgheight topcof = topcyc + cycheight topunb = topcof + cofheight", "vmax=upper_lim, aspect='auto') # plot heatmap # little trick to create custom tick labels.", "srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation based", "col_num)) if generateColorbar == '1': # i have hard-coded the width for colorbar(50)", "fontsize=16, color='black', rotation=90, weight='bold') # Assigning the rotation based on minimum value if", "based on minimum value if min(categories) == categories[3]: if categories[3] != 0: plt.text(25,", "+ 1 elif rankOrder <= 29999 and rankOrder >= 20000: dataGenes.append([2] * len(tmp[start_col:]))", "* len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3] + 1 elif rankOrder <= 59999 and rankOrder", "rows plus position # get deleted rows plus position (top +1; end -1)", "np.delete(locs, 0) labels.pop() # find the mid value and set it to zero,", "/ totalsites * data01.shape[0] unbheight = categories[5] / totalsites * data01.shape[0] # print", "# removing all the borders and frame for item in [fig, ax]: item.patch.set_visible(False)", "to be odd ax = plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', aspect='auto')", "get deleted cols plus position (top +1; end -1) col_delete_plus1 = np.append( np.append(col_delete[0]", "odd number of ticks. mid = int(len(labels) // 2) labels[0] = \"-\" +", "value') @click.option('-d', '--dpi', metavar=\"<int>\", type=int, default=100, prompt=True, show_default='100', help='Plot pixel density') @click.option('-cb', '--colorbar',", "np.random.seed(seed=0) if row_delete_num > 0: # select deleted rows with equal intervals row_delete", "minimum value if min(categories) == categories[5]: if categories[5] != 0: plt.text(25, unbpos, categories[5],", "based on minimum value if min(categories) == categories[1]: if categories[1] != 0: plt.text(25,", "rankOrder >= 20000: dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1] + 1 elif rankOrder", "rotation=90, weight='bold') # Assigning the rotation based on minimum value if min(categories) ==", "deleted rows into the next rows by mean a[row_delete_plus1, :] = ( a[row_delete,", "rotation based on minimum value if min(categories) == categories[1]: if categories[1] != 0:", "are odd number of ticks. mid = int(len(labels) // 2) labels[0] = \"-\"", "col_num == -999: col_num = data0.shape[1] # rebin data0 (compresses the data using", "out_file_name, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites): # initialize color", "/ M), axis=0) M, N = a.shape m, n = new_shape row_delete_num =", "type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,)) @click.option('-ph', '--height', metavar=\"<int>\", type=int, default=700, prompt=True, show_default='True', help='Plot Height')", "int(rpheight / 2) stmpos = int(stmheight / 2 + topstm) srgpos = int(srgheight", "96, row_num / 96), dpi=96) # remove margins , # this helps to", "borders and frame for item in [fig, ax]: item.patch.set_visible(False) # saving the file", "rebin(dataGenes, (data0.shape[0], 50)) # set color here # convert rgb to hex (since", "based on minimum value if min(categories) == categories[4]: if categories[4] != 0: plt.text(25,", "data01.shape[0] stmheight = categories[1] / totalsites * data01.shape[0] srgheight = categories[2] / totalsites", "categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16,", "row_num < data0.shape[0] and col_num < data0.shape[1]: data0 = rebin(data0, (row_num, col_num)) if", "item.patch.set_visible(False) # saving the file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300) def load_Data(input_file, out_file, upper_lim,", "/ 96), dpi=96) # remove margins , # this helps to maintain the", "deciding colors based on the catergory values. for i in range(0, len(catergoryCount)): if", "m >= M: # repeat rows in data matrix a = np.repeat(a, math.ceil(float(m)", ", the point are plotted based on datapoint in (x,y) like a graph", "default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename') def cli(tagpileup_cdt, threshold_file, color, height, width, title, xlabel,", "point are plotted based on datapoint in (x,y) like a graph # Assigning", "prompt=True, show_default='0,0,0', help='Plot Color') @click.option('-t', '--title', metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Plot", "= N % n np.random.seed(seed=0) if row_delete_num > 0: # select deleted rows", "< data0.shape[1]: data0 = rebin(data0, (data0.shape[0], col_num)) if generateColorbar == '1': dataGenes =", "click import matplotlib import matplotlib.colors as mcolors import matplotlib.pyplot as plt from matplotlib.ticker", "col_delete_plus1 = col_delete[1:-1] + \\ 1 # get deleted cols plus position #", "= temp[1] print(\" \\n Parameters for the heatmap\") pprint.pprint(params) upper_lim = float(params['upper_threshold']) lower_lim", "', help='Label under X-axis') @click.option('-k', '--ticks', metavar=\"<string>\", default='2', prompt=True, show_default='2', help='X-axis tick mark", "to create custom tick labels. # [ only works if the difference between", "col_delete] + a[:, col_delete_plus1]) / 2 a = np.delete(a, col_delete, axis=1) # random", "labels : {} \\n length_labels:{}\\n\".format( locs, len(locs), labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel", "top should be the ending position of the previous box topstm = rpheight", "= np.delete(a, col_delete, axis=1) # random remove columns M, N = a.shape #", "item in [fig, ax]: item.patch.set_visible(False) # saving the file plt.savefig(out_file_name, bbox_inches='tight', facecolor=None, dpi=300)", "colors=c, N=len(levs) - 1,) # initialize figure plt.figure(figsize=(col_num / 96, row_num / 96),", "applying an arbitrary offset rppos = int(rpheight / 2) stmpos = int(stmheight /", "- rows = 100), fails for (300,150) & (300,100) etc] # calculate the", "categories[0] != 0: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') else: plt.text(25,", "= np.sort(col_delete) col_delete_plus1 = col_delete[1:-1] + \\ 1 # get deleted cols plus", "difference between col and row is 100 (cols - rows = 100), fails", "metavar=\"<string>\", default=' ', prompt=True, show_default=' ', help='Plot Title') @click.option('-xl', '--xlabel', metavar=\"<string>\", default=' ',", "to hex (since matplotlib doesn't support 0-255 format for colors) s = color.split(\",\")", "make sure there are odd number of ticks. mid = int(len(labels) // 2)", "row_delete = np.linspace(0, M - 1, num=row_delete_num, dtype=int) # sort the random selected", "color list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure", "default=100, prompt=True, show_default='100', help='Plot pixel density') @click.option('-cb', '--colorbar', type=click.Choice(['0', '1'], case_sensitive=False), prompt=True, default='0',", "box should be for each by setting up a ratio: (this site)/(total sites)", "rows if col_delete_num > 0: # select deleted cols with equal intervals col_delete", "Draw a horizontal line through the midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2) print(\"\\n DEBUG", "# deciding colors based on the catergory values. for i in range(0, len(catergoryCount)):", "a[row_delete, :] + a[row_delete_plus1, :]) / 2 a = np.delete(a, row_delete, axis=0) #", "= np.append( np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1) # put the info", "min(categories) == categories[1]: if categories[1] != 0: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13,", "# put the info of deleted cols into the next cols by mean", "categories[4]: if categories[4] != 0: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold')", "values print(\"rp: {}, stm: {}, ess : {}, cof : {}, unb :", "categories[1] / totalsites * data01.shape[0] srgheight = categories[2] / totalsites * data01.shape[0] cycheight", "temp[0] not in params.keys(): params[temp[0]] = temp[1] print(\" \\n Parameters for the heatmap\")", "colorbar dataGenes = np.array(dataGenes, dtype=float) print(\"catergoryCount : {}\".format(catergoryCount)) if row_num == -999: row_num", "catergoryCount[0] = catergoryCount[0] + 1 elif rankOrder <= 29999 and rankOrder >= 20000:", "if categories[1] != 0: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13, color='black', weight='bold') else:", "elif col_num < data0.shape[1]: data0 = rebin(data0, (data0.shape[0], col_num)) if generateColorbar == '1':", "= {} openfile = open(threshold_file, 'r').readlines() for line in openfile: line = line.strip()", "pprint import click import matplotlib import matplotlib.colors as mcolors import matplotlib.pyplot as plt", "len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel = \"{:,}\".format(sites) + \" sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle,", "1,) # initialize figure fig = plt.figure(figsize=(col_num / 96, row_num / 96), dpi=300)", "len(tmp[start_col:])) catergoryCount[0] = catergoryCount[0] + 1 elif rankOrder <= 29999 and rankOrder >=", "# of sites in the heatmap for rec in data: tmp = [(x.strip())", "sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18) # to increase the width of the plot", "proper heatmap tick position. locs = np.delete(locs, 0) labels.pop() # find the mid", "* 50) params = {} openfile = open(threshold_file, 'r').readlines() for line in openfile:", "color='white', weight='bold') # Assigning the rotation based on minimum value if min(categories) ==", "set it to zero, since ax is helping to make sure there are", "# Assigning the rotation based on minimum value if min(categories) == categories[1]: if", "on the catergory values. for i in range(0, len(catergoryCount)): if catergoryCount[i] != 0:", "find the actual position of the numbers by centering the numbers in the", "+ 1 if generateColorbar == '1': rankOrder = int(rec.split(\"\\t\")[0]) if rankOrder <= 19999:", "sure there are odd number of ticks. mid = int(len(labels) // 2) labels[0]", "cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot heatmap # little trick to create", "cycheight = categories[3] / totalsites * data01.shape[0] cofheight = categories[4] / totalsites *", "\"colorbar.png\", 900, 35, catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True,", "@click.option('-c', '--color', metavar=\"<string>\", default='0,0,0', prompt=True, show_default='0,0,0', help='Plot Color') @click.option('-t', '--title', metavar=\"<string>\", default=' ',", "col ids col_delete = np.sort(col_delete) col_delete_plus1 = col_delete[1:-1] + \\ 1 # get", "<= 59999 and rankOrder >= 50000: dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4] +", "int(M / m), n, int(N / n))).mean(3).mean(1) return np.array(a_compress) def plot_heatmap(data01, c, out_file_name,", "= sites + 1 if generateColorbar == '1': rankOrder = int(rec.split(\"\\t\")[0]) if rankOrder", "rankOrder <= 219999 and rankOrder >= 210000: dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5]", "# Draw a horizontal line through the midpoint. plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2) print(\"\\n", "weight='bold') # removing all the borders and frame for item in [fig, ax]:", "horizontalalignment='center', verticalalignment='center', fontsize=13, color='black', weight='bold') else: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16, color='black',", "{}, ess : {}, cof : {}, unb : {}, trna : {}\".format(", "#!/usr/bin/python from __future__ import division import math import pprint import click import matplotlib", "colors, \"colorbar.png\", 900, 35, catergoryCount) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS) @click.argument('tagpileup-cdt', type=click.Path(exists=True,", "aspect='auto') # plot heatmap plt.xticks([]) plt.yticks([]) # to increase the width of the", "= a.shape m, n = new_shape row_delete_num = M % m col_delete_num =", "dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4] + 1 elif rankOrder <= 219999 and", "# little trick to create custom tick labels. # [ only works if", "'1': dataGenes = rebin(dataGenes, (row_num, 50)) elif col_num < data0.shape[1]: data0 = rebin(data0,", "from tagPileUp tabular file and contrast Threshold file. \"\"\" def rebin(a, new_shape): M,", "if row_num < data0.shape[0] and col_num < data0.shape[1]: data0 = rebin(data0, (row_num, col_num))", "a_compress = a.reshape((m, int(M / m), n, int(N / n))).mean(3).mean(1) return np.array(a_compress) def", "calculate the \"top\" location of each box, each top should be the ending", "rebin(data0, (row_num, data0.shape[1])) if generateColorbar == '1': dataGenes = rebin(dataGenes, (row_num, 50)) elif", "# sort the random selected deleted col ids col_delete = np.sort(col_delete) col_delete_plus1 =", "number of ticks. mid = int(len(labels) // 2) labels[0] = \"-\" + ticks", "generateColorbar == '1': # i have hard-coded the width for colorbar(50) dataGenes =", "1, row_delete_plus1), row_delete[-1] - 1) # put the info of deleted rows into", "+ a[:, col_delete_plus1]) / 2 a = np.delete(a, col_delete, axis=1) # random remove", "values. for i in range(0, len(catergoryCount)): if catergoryCount[i] != 0: colors.append(mycolors[i]) plot_colorbar(dataGenes, colors,", "-1) col_delete_plus1 = np.append( np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1) # put", "'1': dataGenes = rebin(dataGenes, (data0.shape[0], 50)) # set color here # convert rgb", "# print \"cofheight: {}, unbheight : {}\".format(unbheight, cofheight) # now calculate the \"top\"", "min(categories) == categories[2]: if categories[2] != 0: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13,", "np.array(a_compress) def plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle,", "min(categories) == categories[5]: if categories[5] != 0: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10,", "help='output filename') def cli(tagpileup_cdt, threshold_file, color, height, width, title, xlabel, ticks, dpi, colorbar,", "Generates Colorbar for the gene categories. \"\"\" click.echo('\\n' + '.' * 50) params", "box topstm = rpheight topsrg = topstm + stmheight topcyc = topsrg +", "= rebin(dataGenes, (data0.shape[0], 50)) # set color here # convert rgb to hex", "upper_lim, lower_lim, color, header, start_col, height, width, ticks, dpi, xlabel, title, colorbar) click.echo('\\n'", "dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2] = catergoryCount[2] + 1 elif rankOrder <= 49999 and", "matplotlib.use('Agg') \"\"\" Program to Create a heatmap from tagPileUp tabular file and contrast", "cycpos, cofpos, unbpos)) # The default transform specifies that text is in data", "-999: row_num = data0.shape[0] if col_num == -999: col_num = data0.shape[1] # rebin", "ylabel = \"{:,}\".format(sites) + \" sites\" plt.ylabel(ylabel, fontsize=14) plt.title(heatmapTitle, fontsize=18) # to increase", "ending position of the previous box topstm = rpheight topsrg = topstm +", "else: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the", "height) totalsites = sum(categories) rpheight = categories[0] / totalsites * data01.shape[0] stmheight =", "data01.shape[0] srgheight = categories[2] / totalsites * data01.shape[0] cycheight = categories[3] / totalsites", "help=\"Generate the gene colorbar (0: No, 1: Yes)\") @click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png', prompt=True,", "mcolors import matplotlib.pyplot as plt from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import numpy as", "plot_heatmap(data0, c, out_file, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites) #", "deleted cols with equal intervals col_delete = np.linspace(0, N - 1, num=col_delete_num, dtype=int)", "of the plot borders plt.setp(ax.spines.values(), linewidth=2) # calculate how long the color box", "plt.savefig(out_file_name, bbox_inches='tight', pad_inches=0.05, facecolor=None, dpi=ddpi) def plot_colorbar(data01, c, out_file_name, row_num, col_num, categories): #", "!= 0: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cycpos,", "= [(x.strip()) for x in rec.split('\\t')] sites = sites + 1 if generateColorbar", "categories[0]: if categories[0] != 0: plt.text(25, rppos, categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold')", "catergoryCount[4] + 1 elif rankOrder <= 219999 and rankOrder >= 210000: dataGenes.append([6] *", "heatmap tick position. locs = np.delete(locs, 0) labels.pop() # find the mid value", "weight='bold') # Assigning the rotation based on minimum value if min(categories) == categories[1]:", "is in data co-ordinates, that is even though the # image is compressed", "if generateColorbar == '1': rankOrder = int(rec.split(\"\\t\")[0]) if rankOrder <= 19999: dataGenes.append([1] *", "remove rows if col_delete_num > 0: # select deleted cols with equal intervals", "doesn't support 0-255 format for colors) s = color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]),", "heatmap\") pprint.pprint(params) upper_lim = float(params['upper_threshold']) lower_lim = int(params['lower_threshold']) header = params['header'] start_col =", "data01.shape[0] unbheight = categories[5] / totalsites * data01.shape[0] # print \"cofheight: {}, unbheight", "heatmapTitle, sites): # initialize color levs = range(100) assert len(levs) % 2 ==", "The default transform specifies that text is in data co-ordinates, that is even", "= open(input_file, 'r') if header == 'T': data.readline() data0 = [] dataGenes =", "# random remove columns M, N = a.shape # compare the heatmap matrix", "linewidth=2) print(\"\\n DEBUG INFO \\n locs : {} \\n length_locs : {} \\n", "to zero, since ax is helping to make sure there are odd number", "in the heatmap\", sites) # creating the np-array to plot the colorbar dataGenes", "a graph # Assigning the rotation based on minimum value if min(categories) ==", "= categories[0] / totalsites * data01.shape[0] stmheight = categories[1] / totalsites * data01.shape[0]", "{}\".format(catergoryCount)) if row_num == -999: row_num = data0.shape[0] if col_num == -999: col_num", "initialize figure plt.figure(figsize=(col_num / 96, row_num / 96), dpi=96) # remove margins ,", "topunb) # positions for the values print(\"rp: {}, stm: {}, ess : {},", "plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') # Assigning the rotation", "data0 = rebin(data0, (row_num, data0.shape[1])) if generateColorbar == '1': dataGenes = rebin(dataGenes, (row_num,", "value if min(categories) == categories[0]: if categories[0] != 0: plt.text(25, rppos, categories[0], horizontalalignment='center',", "'--ticks', metavar=\"<string>\", default='2', prompt=True, show_default='2', help='X-axis tick mark value') @click.option('-d', '--dpi', metavar=\"<int>\", type=int,", "verticalalignment='center', fontsize=10, color='white', weight='bold') # Assigning the rotation based on minimum value if", "m col_delete_num = N % n np.random.seed(seed=0) if row_delete_num > 0: # select", "# creating the np-array to plot the colorbar dataGenes = np.array(dataGenes, dtype=float) print(\"catergoryCount", "colors based on the catergory values. for i in range(0, len(catergoryCount)): if catergoryCount[i]", "min(categories) == categories[3]: if categories[3] != 0: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13,", "weight='bold') # Assigning the rotation based on minimum value if min(categories) == categories[2]:", "M % m col_delete_num = N % n np.random.seed(seed=0) if row_delete_num > 0:", "assert len(levs) % 2 == 0, 'N levels must be even.' # select", "prompt=True, show_default='2', help='X-axis tick mark value') @click.option('-d', '--dpi', metavar=\"<int>\", type=int, default=100, prompt=True, show_default='100',", "Threshold file. \"\"\" def rebin(a, new_shape): M, N = a.shape m, n =", "= categories[1] / totalsites * data01.shape[0] srgheight = categories[2] / totalsites * data01.shape[0]", "= [] # to store colorbar data # to store counts for RP,", "and rankOrder >= 40000: dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3] + 1 elif", "locations locaters = col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the initial ticks locs,", "# generate heatmap plot_heatmap(data0, c, out_file, upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel,", "plt.axvline(color='black', linestyle='--', x=locs[mid], linewidth=2) print(\"\\n DEBUG INFO \\n locs : {} \\n length_locs", "dataGenes = np.array(dataGenes, dtype=float) print(\"catergoryCount : {}\".format(catergoryCount)) if row_num == -999: row_num =", "color='white', weight='bold') else: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') #", "fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10, width=2, color='black') ax.tick_params(which='minor', length=6, width=2, color='black') # Draw a", "= new_shape row_delete_num = M % m col_delete_num = N % n np.random.seed(seed=0)", "be even.' # select colors from color list my_cmap = mcolors.LinearSegmentedColormap.from_list( name='white_sth', colors=c,", "1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot heatmap # little", "col and row is 100 (cols - rows = 100), fails for (300,150)", "rotation based on minimum value if min(categories) == categories[4]: if categories[4] != 0:", "linewidth=2) # calculate how long the color box should be for each by", "-999: col_num = data0.shape[1] # rebin data0 (compresses the data using treeView compression", "{}, unbheight : {}\".format(unbheight, cofheight) # now calculate the \"top\" location of each", "0, 0, 0, 0, 0] sites = 0 # to calculate the #", "== 0, 'N levels must be even.' # select colors from color list", "1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot heatmap # little trick", "+ \\ 1 # get deleted rows plus position # get deleted rows", "width=2, color='black') ax.tick_params(which='minor', length=6, width=2, color='black') # Draw a horizontal line through the", ": {}, unb : {}, trna : {}\".format( rppos, stmpos, srgpos, cycpos, cofpos,", "bbox_inches='tight', facecolor=None, dpi=300) def load_Data(input_file, out_file, upper_lim, lower_lim, color, header, start_col, row_num, col_num,", "color='white', weight='bold') else: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16, color='white', rotation=90, weight='bold') #", "ticks, ddpi, xlabel, heatmapTitle, sites) # checking if we need to plot the", "rebin(a, new_shape): M, N = a.shape m, n = new_shape if m >=", "totalsites * data01.shape[0] cycheight = categories[3] / totalsites * data01.shape[0] cofheight = categories[4]", "topstm + stmheight topcyc = topsrg + srgheight topcof = topcyc + cycheight", "+ 1, row_delete_plus1), row_delete[-1] - 1) # put the info of deleted rows", "catergoryCount[1] + 1 elif rankOrder <= 39999 and rankOrder >= 30000: dataGenes.append([3] *", "= ticks # display the new ticks plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major', length=10,", "under X-axis') @click.option('-k', '--ticks', metavar=\"<string>\", default='2', prompt=True, show_default='2', help='X-axis tick mark value') @click.option('-d',", "color here # convert rgb to hex (since matplotlib doesn't support 0-255 format", "for the heatmap\") pprint.pprint(params) upper_lim = float(params['upper_threshold']) lower_lim = int(params['lower_threshold']) header = params['header']", "width of the plot borders plt.setp(ax.spines.values(), linewidth=2) # calculate how long the color", "categories[1] != 0: plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=13, color='black', weight='bold') else: plt.text(25,", "rankOrder <= 59999 and rankOrder >= 50000: dataGenes.append([5] * len(tmp[start_col:])) catergoryCount[4] = catergoryCount[4]", "sites + 1 if generateColorbar == '1': rankOrder = int(rec.split(\"\\t\")[0]) if rankOrder <=", "col_delete_plus1), col_delete[-1] - 1) # put the info of deleted cols into the", "= line.split(\":\") if temp[0] not in params.keys(): params[temp[0]] = temp[1] print(\" \\n Parameters", "of the previous box topstm = rpheight topsrg = topstm + stmheight topcyc", "!= 0: plt.text(25, srgpos, categories[2], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, srgpos,", "plot the color bar if generateColorbar == '1': print(\"Creating the colobar\") mycolors =", "dataGenes.append([2] * len(tmp[start_col:])) catergoryCount[1] = catergoryCount[1] + 1 elif rankOrder <= 39999 and", "facecolor=None, dpi=300) def load_Data(input_file, out_file, upper_lim, lower_lim, color, header, start_col, row_num, col_num, ticks,", "# initialize color levs = range(100) assert len(levs) % 2 == 0, 'N", "N % n np.random.seed(seed=0) if row_delete_num > 0: # select deleted rows with", "position # get deleted cols plus position (top +1; end -1) col_delete_plus1 =", "dataGenes.append([6] * len(tmp[start_col:])) catergoryCount[5] = catergoryCount[5] + 1 data0.append(tmp[start_col:]) data0 = np.array(data0, dtype=float)", "it to zero, since ax is helping to make sure there are odd", "catergoryCount[2] + 1 elif rankOrder <= 49999 and rankOrder >= 40000: dataGenes.append([4] *", "mid value and set it to zero, since ax is helping to make", "@click.option('-o', '--out', metavar=\"<string>\", default='Heatmap.png', prompt=True, show_default='Heatmap.png', help='output filename') def cli(tagpileup_cdt, threshold_file, color, height,", "+1; end -1) row_delete_plus1 = np.append( np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1)", "+ topstm) srgpos = int(srgheight / 2 + topsrg) cycpos = int(cycheight /", "import click import matplotlib import matplotlib.colors as mcolors import matplotlib.pyplot as plt from", "from __future__ import division import math import pprint import click import matplotlib import", "sites) # creating the np-array to plot the colorbar dataGenes = np.array(dataGenes, dtype=float)", "= col_num // 4 ax.xaxis.set_major_locator(MultipleLocator(locaters)) # get the initial ticks locs, labels =", "plt.text(25, stmpos, categories[1], horizontalalignment='center', verticalalignment='center', fontsize=16, color='black', rotation=90, weight='bold') # Assigning the rotation", "<reponame>CEGRcode/exo #!/usr/bin/python from __future__ import division import math import pprint import click import", "data0.shape[0] and col_num < data0.shape[1]: data0 = rebin(data0, (row_num, col_num)) if generateColorbar ==", "rankOrder <= 49999 and rankOrder >= 40000: dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3]", "rotation based on minimum value if min(categories) == categories[0]: if categories[0] != 0:", "get the initial ticks locs, labels = plt.xticks() # remove the first location", "prompt=True, show_default='True', help='Plot Height') @click.option('-pw', '--width', metavar=\"<int>\", type=int, default=300, prompt=True, show_default='True', help='Plot Width')", "N=len(levs) - 1,) # initialize figure fig = plt.figure(figsize=(col_num / 96, row_num /", "rebin(data0, (row_num, col_num)) if generateColorbar == '1': # i have hard-coded the width", "1] = ticks # display the new ticks plt.xticks(locs, labels, fontsize=14) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) ax.tick_params(which='major',", "Program to Create a heatmap from tagPileUp tabular file and contrast Threshold file.", "= int(rpheight / 2) stmpos = int(stmheight / 2 + topstm) srgpos =", "verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold') # removing all the borders and frame for", "(compresses the data using treeView compression algorithm) if row_num < data0.shape[0] and col_num", "info of deleted rows into the next rows by mean a[row_delete_plus1, :] =", "for colorbar(50) dataGenes = rebin(dataGenes, (row_num, 50)) elif row_num < data0.shape[0]: data0 =", "elif rankOrder <= 39999 and rankOrder >= 30000: dataGenes.append([3] * len(tmp[start_col:])) catergoryCount[2] =", "\\n length_labels:{}\\n\".format( locs, len(locs), labels, len(labels))) plt.yticks([]) plt.xlabel(xlabel, fontsize=14) ylabel = \"{:,}\".format(sites) +", "categories[0], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', weight='bold') # Assigning the rotation based on minimum", "the plot borders plt.setp(ax.spines.values(), linewidth=2) # calculate how long the color box should", "# select deleted rows with equal intervals row_delete = np.linspace(0, M - 1,", "np.sort(col_delete) col_delete_plus1 = col_delete[1:-1] + \\ 1 # get deleted cols plus position", "info of deleted cols into the next cols by mean a[:, col_delete_plus1] =", "deleted rows plus position # get deleted rows plus position (top +1; end", "as plt from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import numpy as np matplotlib.use('Agg') \"\"\"", "mean a[row_delete_plus1, :] = ( a[row_delete, :] + a[row_delete_plus1, :]) / 2 a", "a.shape m, n = new_shape row_delete_num = M % m col_delete_num = N", "m), n, int(N / n))).mean(3).mean(1) return np.array(a_compress) def plot_heatmap(data01, c, out_file_name, upper_lim, lower_lim,", "the rotation based on minimum value if min(categories) == categories[3]: if categories[3] !=", "row_delete_plus1), row_delete[-1] - 1) # put the info of deleted rows into the", "0, 0, 0, 0] sites = 0 # to calculate the # of", "sites) = (height of unknown box)/(feature box height) totalsites = sum(categories) rpheight =", "not in params.keys(): params[temp[0]] = temp[1] print(\" \\n Parameters for the heatmap\") pprint.pprint(params)", "np matplotlib.use('Agg') \"\"\" Program to Create a heatmap from tagPileUp tabular file and", "0, 0, 0] sites = 0 # to calculate the # of sites", "color levs = range(100) assert len(levs) % 2 == 0, 'N levels must", "rows = 100), fails for (300,150) & (300,100) etc] # calculate the major", "col_num, ticks, ddpi, xlabel, heatmapTitle, sites) # checking if we need to plot", "\"0\" labels[len(labels) - 1] = ticks # display the new ticks plt.xticks(locs, labels,", "datapoint in (x,y) like a graph # Assigning the rotation based on minimum", "the numbers in the colored boxes and applying an arbitrary offset rppos =", "if categories[4] != 0: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else:", "print(\"# sites in the heatmap\", sites) # creating the np-array to plot the", "int(stmheight / 2 + topstm) srgpos = int(srgheight / 2 + topsrg) cycpos", "cols by mean a[:, col_delete_plus1] = ( a[:, col_delete] + a[:, col_delete_plus1]) /", "1 if generateColorbar == '1': rankOrder = int(rec.split(\"\\t\")[0]) if rankOrder <= 19999: dataGenes.append([1]", "catergoryCount[0] + 1 elif rankOrder <= 29999 and rankOrder >= 20000: dataGenes.append([2] *", "plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cofpos, categories[4], horizontalalignment='center',", "plt.axes([0, 0, 1, 1]) plt.imshow(data01, cmap=my_cmap, interpolation='nearest', vmin=lower_lim, vmax=upper_lim, aspect='auto') # plot heatmap", "N=len(levs) - 1,) # initialize figure plt.figure(figsize=(col_num / 96, row_num / 96), dpi=96)", "= color.split(\",\") color = '#{:02X}{:02X}{:02X}'.format(int(s[0]), int(s[1]), int(s[2])) c = [\"white\", color] # generate", "heatmapTitle, generateColorbar): data = open(input_file, 'r') if header == 'T': data.readline() data0 =", "row_delete_num > 0: # select deleted rows with equal intervals row_delete = np.linspace(0,", "np.array(data0, dtype=float) print(\"# sites in the heatmap\", sites) # creating the np-array to", "(top +1; end -1) col_delete_plus1 = np.append( np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] -", "categories[4], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cofpos, categories[4], horizontalalignment='center', verticalalignment='center', fontsize=16,", "# image is compressed , the point are plotted based on datapoint in", "0: plt.text(25, cycpos, categories[3], horizontalalignment='center', verticalalignment='center', fontsize=13, color='white', weight='bold') else: plt.text(25, cycpos, categories[3],", "[0, 0, 0, 0, 0, 0] sites = 0 # to calculate the", "title, xlabel, ticks, dpi, colorbar, out): \"\"\" Creates YEP Style All Feature heatmap", "# initialize figure plt.figure(figsize=(col_num / 96, row_num / 96), dpi=96) # remove margins", "to store colorbar data # to store counts for RP, SAGA and TFIID", "heatmapTitle, sites) # checking if we need to plot the color bar if", "params[temp[0]] = temp[1] print(\" \\n Parameters for the heatmap\") pprint.pprint(params) upper_lim = float(params['upper_threshold'])", "be the ending position of the previous box topstm = rpheight topsrg =", "= rebin(data0, (row_num, col_num)) if generateColorbar == '1': # i have hard-coded the", "the values print(\"rp: {}, stm: {}, ess : {}, cof : {}, unb", "dataGenes.append([4] * len(tmp[start_col:])) catergoryCount[3] = catergoryCount[3] + 1 elif rankOrder <= 59999 and", "else: plt.text(25, unbpos, categories[5], horizontalalignment='center', verticalalignment='center', fontsize=10, color='white', rotation=90, weight='bold') # removing all", "upper_lim, lower_lim, row_num, col_num, ticks, ddpi, xlabel, heatmapTitle, sites) # checking if we", "name='white_sth', colors=c, N=len(levs) - 1,) # initialize figure fig = plt.figure(figsize=(col_num / 96,", "get deleted rows plus position # get deleted rows plus position (top +1;", "int(cycheight / 2 + topcyc) cofpos = int(cofheight / 2 + topcof) unbpos", "lower_lim, color, header, start_col, height, width, ticks, dpi, xlabel, title, colorbar) click.echo('\\n' +", "rows into the next rows by mean a[row_delete_plus1, :] = ( a[row_delete, :]", "col_delete[1:-1] + \\ 1 # get deleted cols plus position # get deleted" ]
[ "print(data) rtp_list.append(data.split(\":\")) except: pass for rtp_packet in rtp_list: packet = \" \".join(rtp_packet) print(packet)", "i in cap: try: rtp = i[3] #data = rtp.get_field_value('DATA') data = rtp.payload", "outfile def scraper(self): rtp_list =[] pcap_file = self.pcap out_file = self.outfile print(\"Scraping: \"", "try: rtp = i[3] #data = rtp.get_field_value('DATA') data = rtp.payload if \":\" in", "self.filter = filter self.outfile = outfile def scraper(self): rtp_list =[] pcap_file = self.pcap", "pcap_file = self.pcap out_file = self.outfile print(\"Scraping: \" + pcap_file) filter_type = self.filter", "bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing raw audio: %s\" % out_file) # pcap_test = Audio_Scraper(\"my.pcap\",\"rtp\",\"my_audio.raw\").scraper()", "import pyshark class Audio_Scraper: def __init__(self, pcap, filter, outfile): self.pcap = pcap self.filter", "self.pcap = pcap self.filter = filter self.outfile = outfile def scraper(self): rtp_list =[]", "+ pcap_file) filter_type = self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb') for i", "filter self.outfile = outfile def scraper(self): rtp_list =[] pcap_file = self.pcap out_file =", "pyshark class Audio_Scraper: def __init__(self, pcap, filter, outfile): self.pcap = pcap self.filter =", "= self.outfile print(\"Scraping: \" + pcap_file) filter_type = self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio", "i[3] #data = rtp.get_field_value('DATA') data = rtp.payload if \":\" in data: print(data) rtp_list.append(data.split(\":\"))", "class Audio_Scraper: def __init__(self, pcap, filter, outfile): self.pcap = pcap self.filter = filter", "pcap, filter, outfile): self.pcap = pcap self.filter = filter self.outfile = outfile def", "__init__(self, pcap, filter, outfile): self.pcap = pcap self.filter = filter self.outfile = outfile", "\" + pcap_file) filter_type = self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb') for", "= self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb') for i in cap: try:", "cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb') for i in cap: try: rtp =", "data = rtp.payload if \":\" in data: print(data) rtp_list.append(data.split(\":\")) except: pass for rtp_packet", "open(out_file,'wb') for i in cap: try: rtp = i[3] #data = rtp.get_field_value('DATA') data", "filter_type = self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb') for i in cap:", "for rtp_packet in rtp_list: packet = \" \".join(rtp_packet) print(packet) audio = bytearray.fromhex(packet) raw_audio.write(audio)", "\" \".join(rtp_packet) print(packet) audio = bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing raw audio: %s\" %", "print(packet) audio = bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing raw audio: %s\" % out_file) #", "=[] pcap_file = self.pcap out_file = self.outfile print(\"Scraping: \" + pcap_file) filter_type =", "= i[3] #data = rtp.get_field_value('DATA') data = rtp.payload if \":\" in data: print(data)", "def scraper(self): rtp_list =[] pcap_file = self.pcap out_file = self.outfile print(\"Scraping: \" +", "print(\"Scraping: \" + pcap_file) filter_type = self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb')", "= rtp.get_field_value('DATA') data = rtp.payload if \":\" in data: print(data) rtp_list.append(data.split(\":\")) except: pass", "rtp_list.append(data.split(\":\")) except: pass for rtp_packet in rtp_list: packet = \" \".join(rtp_packet) print(packet) audio", "pcap self.filter = filter self.outfile = outfile def scraper(self): rtp_list =[] pcap_file =", "Audio_Scraper: def __init__(self, pcap, filter, outfile): self.pcap = pcap self.filter = filter self.outfile", "outfile): self.pcap = pcap self.filter = filter self.outfile = outfile def scraper(self): rtp_list", "#data = rtp.get_field_value('DATA') data = rtp.payload if \":\" in data: print(data) rtp_list.append(data.split(\":\")) except:", "pass for rtp_packet in rtp_list: packet = \" \".join(rtp_packet) print(packet) audio = bytearray.fromhex(packet)", "packet = \" \".join(rtp_packet) print(packet) audio = bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing raw audio:", "pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb') for i in cap: try: rtp = i[3] #data", "= filter self.outfile = outfile def scraper(self): rtp_list =[] pcap_file = self.pcap out_file", "self.outfile = outfile def scraper(self): rtp_list =[] pcap_file = self.pcap out_file = self.outfile", "self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb') for i in cap: try: rtp", "except: pass for rtp_packet in rtp_list: packet = \" \".join(rtp_packet) print(packet) audio =", "out_file = self.outfile print(\"Scraping: \" + pcap_file) filter_type = self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type)", "\":\" in data: print(data) rtp_list.append(data.split(\":\")) except: pass for rtp_packet in rtp_list: packet =", "rtp_list: packet = \" \".join(rtp_packet) print(packet) audio = bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing raw", "rtp_list =[] pcap_file = self.pcap out_file = self.outfile print(\"Scraping: \" + pcap_file) filter_type", "for i in cap: try: rtp = i[3] #data = rtp.get_field_value('DATA') data =", "\".join(rtp_packet) print(packet) audio = bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing raw audio: %s\" % out_file)", "scraper(self): rtp_list =[] pcap_file = self.pcap out_file = self.outfile print(\"Scraping: \" + pcap_file)", "rtp = i[3] #data = rtp.get_field_value('DATA') data = rtp.payload if \":\" in data:", "filter, outfile): self.pcap = pcap self.filter = filter self.outfile = outfile def scraper(self):", "= open(out_file,'wb') for i in cap: try: rtp = i[3] #data = rtp.get_field_value('DATA')", "= self.pcap out_file = self.outfile print(\"Scraping: \" + pcap_file) filter_type = self.filter cap", "rtp.payload if \":\" in data: print(data) rtp_list.append(data.split(\":\")) except: pass for rtp_packet in rtp_list:", "= pcap self.filter = filter self.outfile = outfile def scraper(self): rtp_list =[] pcap_file", "if \":\" in data: print(data) rtp_list.append(data.split(\":\")) except: pass for rtp_packet in rtp_list: packet", "= \" \".join(rtp_packet) print(packet) audio = bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing raw audio: %s\"", "= pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb') for i in cap: try: rtp = i[3]", "= bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing raw audio: %s\" % out_file) # pcap_test =", "def __init__(self, pcap, filter, outfile): self.pcap = pcap self.filter = filter self.outfile =", "cap: try: rtp = i[3] #data = rtp.get_field_value('DATA') data = rtp.payload if \":\"", "in cap: try: rtp = i[3] #data = rtp.get_field_value('DATA') data = rtp.payload if", "data: print(data) rtp_list.append(data.split(\":\")) except: pass for rtp_packet in rtp_list: packet = \" \".join(rtp_packet)", "self.outfile print(\"Scraping: \" + pcap_file) filter_type = self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio =", "= rtp.payload if \":\" in data: print(data) rtp_list.append(data.split(\":\")) except: pass for rtp_packet in", "in rtp_list: packet = \" \".join(rtp_packet) print(packet) audio = bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing", "pcap_file) filter_type = self.filter cap = pyshark.FileCapture(pcap_file,display_filter=filter_type) raw_audio = open(out_file,'wb') for i in", "self.pcap out_file = self.outfile print(\"Scraping: \" + pcap_file) filter_type = self.filter cap =", "rtp_packet in rtp_list: packet = \" \".join(rtp_packet) print(packet) audio = bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished", "audio = bytearray.fromhex(packet) raw_audio.write(audio) print(\"\\nFinished outputing raw audio: %s\" % out_file) # pcap_test", "raw_audio = open(out_file,'wb') for i in cap: try: rtp = i[3] #data =", "= outfile def scraper(self): rtp_list =[] pcap_file = self.pcap out_file = self.outfile print(\"Scraping:", "rtp.get_field_value('DATA') data = rtp.payload if \":\" in data: print(data) rtp_list.append(data.split(\":\")) except: pass for", "in data: print(data) rtp_list.append(data.split(\":\")) except: pass for rtp_packet in rtp_list: packet = \"" ]
[ "attempted to be decoded as UTF8 string, # this would lead to an", "the raw response should be used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with", "error. Instead, the raw response should be used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant',", "project is created and saved as GAEB file try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project", "{ \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\", \"itemNumber\": { \"stringRepresentation\": \"01.\" }, \"elements\": [", "import os import json client_id = 'use_your_own_value' client_secret = '<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token'", "\"PositionDto\", \"shortText\": \"Hello Position!\", \"itemNumber\": { \"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\": 10, \"unitPriceOverride\": 5", "_preload_content=False # If the _preload_content parameter is not set to False, the binary", "_preload_content parameter is not set to False, the binary response content (file) will", "OAuth2 access token for authorization: Dangl.Identity configuration = avacloud_client_python.Configuration() configuration.access_token = access_token #", "\"Group\" }, { \"length\": 4, \"tierType\": \"Position\" } ] } }, \"serviceSpecifications\": [", "{'grant_type': 'client_credentials', 'scope': 'avacloud'} response = requests.post(url, data=payload, auth=(client_id, client_secret)) access_token = response.json()['access_token']", "\"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello Position!\", \"itemNumber\": { \"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\": 10, \"unitPriceOverride\":", "\"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\", \"itemNumber\": { \"stringRepresentation\": \"01.02.\" }, \"elements\": [ { \"elementTypeDiscriminator\":", "binary response content (file) will be attempted to be decoded as UTF8 string,", "content (file) will be attempted to be decoded as UTF8 string, # this", "open(\"./NewProject.X86\", \"wb\") as gaeb_file: gaeb_file.write(api_response.data) except ApiException as e: print(\"Exception when calling AvaConversionApi->ava_conversion_convert_to_gaeb:", "as GAEB file try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\":", "api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\", \"wb\") as gaeb_file: gaeb_file.write(api_response.data) except", "a very small project is created and saved as GAEB file try: ava_api_instance", "{ \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 4, \"tierType\": \"Position\" } ]", "\"length\": 2, \"tierType\": \"Group\" }, { \"length\": 4, \"tierType\": \"Position\" } ] }", "info about why you should use _preload_content=False # If the _preload_content parameter is", "response content (file) will be attempted to be decoded as UTF8 string, #", "avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\": { \"tiers\": [ { \"length\": 2,", "{ \"tiers\": [ { \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 2, \"tierType\":", "print_function import time import avacloud_client_python from avacloud_client_python.rest import ApiException import requests import os", "import ApiException import requests import os import json client_id = 'use_your_own_value' client_secret =", "client_secret)) access_token = response.json()['access_token'] # Configure OAuth2 access token for authorization: Dangl.Identity configuration", "} ] } ] } ] } ] }\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305 for", "use _preload_content=False # If the _preload_content parameter is not set to False, the", "False, the binary response content (file) will be attempted to be decoded as", "this would lead to an error. Instead, the raw response should be used", "response = requests.post(url, data=payload, auth=(client_id, client_secret)) access_token = response.json()['access_token'] # Configure OAuth2 access", "\"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\", \"itemNumber\": { \"stringRepresentation\": \"01.\" },", "import avacloud_client_python from avacloud_client_python.rest import ApiException import requests import os import json client_id", "json client_id = 'use_your_own_value' client_secret = '<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token' payload = {'grant_type':", "{ \"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\": 10, \"unitPriceOverride\": 5 } ] } ] }", "ava_project = json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\": { \"tiers\": [ { \"length\": 2, \"tierType\":", "very small project is created and saved as GAEB file try: ava_api_instance =", "= '<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token' payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'} response =", "Configure OAuth2 access token for authorization: Dangl.Identity configuration = avacloud_client_python.Configuration() configuration.access_token = access_token", "If the _preload_content parameter is not set to False, the binary response content", "configuration = avacloud_client_python.Configuration() configuration.access_token = access_token # Here, a very small project is", "= avacloud_client_python.Configuration() configuration.access_token = access_token # Here, a very small project is created", "import print_function import time import avacloud_client_python from avacloud_client_python.rest import ApiException import requests import", "__future__ import print_function import time import avacloud_client_python from avacloud_client_python.rest import ApiException import requests", "about why you should use _preload_content=False # If the _preload_content parameter is not", "decoded as UTF8 string, # this would lead to an error. Instead, the", "Position!\", \"itemNumber\": { \"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\": 10, \"unitPriceOverride\": 5 } ] }", "to an error. Instead, the raw response should be used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project,", "you should use _preload_content=False # If the _preload_content parameter is not set to", "string, # this would lead to an error. Instead, the raw response should", "}, { \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 4, \"tierType\": \"Position\" }", "\"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\", \"itemNumber\": { \"stringRepresentation\": \"01.02.\" },", "should use _preload_content=False # If the _preload_content parameter is not set to False,", "2, \"tierType\": \"Group\" }, { \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 4,", "\"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\", \"itemNumber\": { \"stringRepresentation\": \"01.\" }, \"elements\": [ {", "UTF8 string, # this would lead to an error. Instead, the raw response", "Instead, the raw response should be used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False)", "'<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token' payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'} response = requests.post(url,", "gaeb_file: gaeb_file.write(api_response.data) except ApiException as e: print(\"Exception when calling AvaConversionApi->ava_conversion_convert_to_gaeb: %s\\n\" % e)", "_preload_content=False) with open(\"./NewProject.X86\", \"wb\") as gaeb_file: gaeb_file.write(api_response.data) except ApiException as e: print(\"Exception when", "} ] } ] }\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why", "will be attempted to be decoded as UTF8 string, # this would lead", "= response.json()['access_token'] # Configure OAuth2 access token for authorization: Dangl.Identity configuration = avacloud_client_python.Configuration()", "= ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\", \"wb\") as gaeb_file: gaeb_file.write(api_response.data) except ApiException", "\"01.02.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello Position!\", \"itemNumber\": { \"stringRepresentation\":", "# Configure OAuth2 access token for authorization: Dangl.Identity configuration = avacloud_client_python.Configuration() configuration.access_token =", "authorization: Dangl.Identity configuration = avacloud_client_python.Configuration() configuration.access_token = access_token # Here, a very small", "0.19, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\", \"itemNumber\": { \"stringRepresentation\": \"01.\"", "with open(\"./NewProject.X86\", \"wb\") as gaeb_file: gaeb_file.write(api_response.data) except ApiException as e: print(\"Exception when calling", "\"shortText\": \"Parent Group\", \"itemNumber\": { \"stringRepresentation\": \"01.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\",", "auth=(client_id, client_secret)) access_token = response.json()['access_token'] # Configure OAuth2 access token for authorization: Dangl.Identity", "created and saved as GAEB file try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{", "\"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\": 10, \"unitPriceOverride\": 5 } ] } ] } ]", "\"wb\") as gaeb_file: gaeb_file.write(api_response.data) except ApiException as e: print(\"Exception when calling AvaConversionApi->ava_conversion_convert_to_gaeb: %s\\n\"", "5 } ] } ] } ] } ] }\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305", "Group\", \"itemNumber\": { \"stringRepresentation\": \"01.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub", "ApiException import requests import os import json client_id = 'use_your_own_value' client_secret = '<PASSWORD>_your_own_value'", "\"Sub Group\", \"itemNumber\": { \"stringRepresentation\": \"01.02.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\":", "access_token # Here, a very small project is created and saved as GAEB", "\"Parent Group\", \"itemNumber\": { \"stringRepresentation\": \"01.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\":", "\"elements\": [ { \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello Position!\", \"itemNumber\": { \"stringRepresentation\": \"01.02.0500\" },", "'use_your_own_value' client_secret = '<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token' payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'}", "}\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why you should use _preload_content=False", "payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'} response = requests.post(url, data=payload, auth=(client_id, client_secret)) access_token", "an error. Instead, the raw response should be used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2',", "avacloud_client_python.Configuration() configuration.access_token = access_token # Here, a very small project is created and", "{ \"projectTaxRate\": 0.19, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\", \"itemNumber\": {", "url = 'https://identity.dangl-it.com/connect/token' payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'} response = requests.post(url, data=payload,", "token for authorization: Dangl.Identity configuration = avacloud_client_python.Configuration() configuration.access_token = access_token # Here, a", "] }\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why you should use", "= avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\": { \"tiers\": [ { \"length\":", "requests.post(url, data=payload, auth=(client_id, client_secret)) access_token = response.json()['access_token'] # Configure OAuth2 access token for", "# See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why you should use _preload_content=False #", "# this would lead to an error. Instead, the raw response should be", "\"Position\" } ] } }, \"serviceSpecifications\": [ { \"projectTaxRate\": 0.19, \"elements\": [ {", "import requests import os import json client_id = 'use_your_own_value' client_secret = '<PASSWORD>_your_own_value' url", "more info about why you should use _preload_content=False # If the _preload_content parameter", "data=payload, auth=(client_id, client_secret)) access_token = response.json()['access_token'] # Configure OAuth2 access token for authorization:", "for authorization: Dangl.Identity configuration = avacloud_client_python.Configuration() configuration.access_token = access_token # Here, a very", "] } ] } ] } ] }\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305 for more", "is not set to False, the binary response content (file) will be attempted", "\"tiers\": [ { \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 2, \"tierType\": \"Group\"", "be attempted to be decoded as UTF8 string, # this would lead to", "why you should use _preload_content=False # If the _preload_content parameter is not set", "} ] }\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why you should", "to False, the binary response content (file) will be attempted to be decoded", "See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why you should use _preload_content=False # If", "Dangl.Identity configuration = avacloud_client_python.Configuration() configuration.access_token = access_token # Here, a very small project", "] } ] } ] }\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about", "} }, \"serviceSpecifications\": [ { \"projectTaxRate\": 0.19, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\":", "= access_token # Here, a very small project is created and saved as", "'scope': 'avacloud'} response = requests.post(url, data=payload, auth=(client_id, client_secret)) access_token = response.json()['access_token'] # Configure", "os import json client_id = 'use_your_own_value' client_secret = '<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token' payload", "file try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\": { \"tiers\":", "\"tierType\": \"Group\" }, { \"length\": 4, \"tierType\": \"Position\" } ] } }, \"serviceSpecifications\":", "= 'use_your_own_value' client_secret = '<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token' payload = {'grant_type': 'client_credentials', 'scope':", "} ] } ] } ] }\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info", "destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\", \"wb\") as gaeb_file: gaeb_file.write(api_response.data) except ApiException as e:", "from avacloud_client_python.rest import ApiException import requests import os import json client_id = 'use_your_own_value'", "target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\", \"wb\") as gaeb_file: gaeb_file.write(api_response.data) except ApiException as e: print(\"Exception", "10, \"unitPriceOverride\": 5 } ] } ] } ] } ] }\"\"\") #", "\"tierType\": \"Position\" } ] } }, \"serviceSpecifications\": [ { \"projectTaxRate\": 0.19, \"elements\": [", "2, \"tierType\": \"Group\" }, { \"length\": 4, \"tierType\": \"Position\" } ] } },", "{ \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\", \"itemNumber\": { \"stringRepresentation\": \"01.02.\" }, \"elements\": [", "\"unitPriceOverride\": 5 } ] } ] } ] } ] }\"\"\") # See", "set to False, the binary response content (file) will be attempted to be", "\"length\": 2, \"tierType\": \"Group\" }, { \"length\": 2, \"tierType\": \"Group\" }, { \"length\":", "as gaeb_file: gaeb_file.write(api_response.data) except ApiException as e: print(\"Exception when calling AvaConversionApi->ava_conversion_convert_to_gaeb: %s\\n\" %", "response should be used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\", \"wb\")", "= 'https://identity.dangl-it.com/connect/token' payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'} response = requests.post(url, data=payload, auth=(client_id,", "as UTF8 string, # this would lead to an error. Instead, the raw", "\"shortText\": \"Hello Position!\", \"itemNumber\": { \"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\": 10, \"unitPriceOverride\": 5 }", "requests import os import json client_id = 'use_your_own_value' client_secret = '<PASSWORD>_your_own_value' url =", "import json client_id = 'use_your_own_value' client_secret = '<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token' payload =", "saved as GAEB file try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{ \"projectInformation\": {", "{ \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello Position!\", \"itemNumber\": { \"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\": 10,", "time import avacloud_client_python from avacloud_client_python.rest import ApiException import requests import os import json", "\"tierType\": \"Group\" }, { \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 4, \"tierType\":", "{ \"stringRepresentation\": \"01.02.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello Position!\", \"itemNumber\":", "Group\", \"itemNumber\": { \"stringRepresentation\": \"01.02.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello", "}, \"quantityOverride\": 10, \"unitPriceOverride\": 5 } ] } ] } ] } ]", "] } }, \"serviceSpecifications\": [ { \"projectTaxRate\": 0.19, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\",", "\"quantityOverride\": 10, \"unitPriceOverride\": 5 } ] } ] } ] } ] }\"\"\")", "Here, a very small project is created and saved as GAEB file try:", "is created and saved as GAEB file try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project =", "[ { \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello Position!\", \"itemNumber\": { \"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\":", "\"Hello Position!\", \"itemNumber\": { \"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\": 10, \"unitPriceOverride\": 5 } ]", "{ \"length\": 4, \"tierType\": \"Position\" } ] } }, \"serviceSpecifications\": [ { \"projectTaxRate\":", "response.json()['access_token'] # Configure OAuth2 access token for authorization: Dangl.Identity configuration = avacloud_client_python.Configuration() configuration.access_token", "avacloud_client_python from avacloud_client_python.rest import ApiException import requests import os import json client_id =", "access token for authorization: Dangl.Identity configuration = avacloud_client_python.Configuration() configuration.access_token = access_token # Here,", "access_token = response.json()['access_token'] # Configure OAuth2 access token for authorization: Dangl.Identity configuration =", "to be decoded as UTF8 string, # this would lead to an error.", "\"Group\" }, { \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 4, \"tierType\": \"Position\"", "} ] } }, \"serviceSpecifications\": [ { \"projectTaxRate\": 0.19, \"elements\": [ { \"elementTypeDiscriminator\":", "ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\": { \"tiers\": [ {", "try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\": { \"tiers\": [", "{ \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 2, \"tierType\": \"Group\" }, {", "[ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\", \"itemNumber\": { \"stringRepresentation\": \"01.02.\" }, \"elements\":", "\"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\", \"itemNumber\": { \"stringRepresentation\": \"01.02.\" }, \"elements\": [ {", "# If the _preload_content parameter is not set to False, the binary response", "be decoded as UTF8 string, # this would lead to an error. Instead,", "used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\", \"wb\") as gaeb_file: gaeb_file.write(api_response.data)", "}, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\", \"itemNumber\": { \"stringRepresentation\": \"01.02.\"", "'avacloud'} response = requests.post(url, data=payload, auth=(client_id, client_secret)) access_token = response.json()['access_token'] # Configure OAuth2", "[ { \"projectTaxRate\": 0.19, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\", \"itemNumber\":", "from __future__ import print_function import time import avacloud_client_python from avacloud_client_python.rest import ApiException import", "json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\": { \"tiers\": [ { \"length\": 2, \"tierType\": \"Group\" },", "client_secret = '<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token' payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'} response", "ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\", \"wb\") as gaeb_file: gaeb_file.write(api_response.data) except ApiException as", "[ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\", \"itemNumber\": { \"stringRepresentation\": \"01.\" }, \"elements\":", "small project is created and saved as GAEB file try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration))", "\"stringRepresentation\": \"01.02.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello Position!\", \"itemNumber\": {", "[ { \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 2, \"tierType\": \"Group\" },", "raw response should be used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\",", "\"shortText\": \"Sub Group\", \"itemNumber\": { \"stringRepresentation\": \"01.02.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"PositionDto\",", "the _preload_content parameter is not set to False, the binary response content (file)", "{ \"itemNumberSchema\": { \"tiers\": [ { \"length\": 2, \"tierType\": \"Group\" }, { \"length\":", "the binary response content (file) will be attempted to be decoded as UTF8", "\"01.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\", \"itemNumber\": { \"stringRepresentation\":", "= requests.post(url, data=payload, auth=(client_id, client_secret)) access_token = response.json()['access_token'] # Configure OAuth2 access token", "client_id = 'use_your_own_value' client_secret = '<PASSWORD>_your_own_value' url = 'https://identity.dangl-it.com/connect/token' payload = {'grant_type': 'client_credentials',", "not set to False, the binary response content (file) will be attempted to", "}, \"serviceSpecifications\": [ { \"projectTaxRate\": 0.19, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent", "\"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\", \"itemNumber\": { \"stringRepresentation\": \"01.\" }, \"elements\": [ { \"elementTypeDiscriminator\":", "\"itemNumber\": { \"stringRepresentation\": \"01.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\",", "(file) will be attempted to be decoded as UTF8 string, # this would", "\"itemNumber\": { \"stringRepresentation\": \"01.02.0500\" }, \"quantityOverride\": 10, \"unitPriceOverride\": 5 } ] } ]", "\"itemNumberSchema\": { \"tiers\": [ { \"length\": 2, \"tierType\": \"Group\" }, { \"length\": 2,", "parameter is not set to False, the binary response content (file) will be", "https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why you should use _preload_content=False # If the", "# Here, a very small project is created and saved as GAEB file", "\"stringRepresentation\": \"01.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\", \"itemNumber\": {", "configuration.access_token = access_token # Here, a very small project is created and saved", "\"projectInformation\": { \"itemNumberSchema\": { \"tiers\": [ { \"length\": 2, \"tierType\": \"Group\" }, {", "and saved as GAEB file try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{ \"projectInformation\":", "avacloud_client_python.rest import ApiException import requests import os import json client_id = 'use_your_own_value' client_secret", "would lead to an error. Instead, the raw response should be used api_response", "] } ] }\"\"\") # See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why you", "= json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\": { \"tiers\": [ { \"length\": 2, \"tierType\": \"Group\"", "'https://identity.dangl-it.com/connect/token' payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'} response = requests.post(url, data=payload, auth=(client_id, client_secret))", "'client_credentials', 'scope': 'avacloud'} response = requests.post(url, data=payload, auth=(client_id, client_secret)) access_token = response.json()['access_token'] #", "GAEB file try: ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration)) ava_project = json.loads(\"\"\"{ \"projectInformation\": { \"itemNumberSchema\": {", "{ \"stringRepresentation\": \"01.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Sub Group\", \"itemNumber\":", "for more info about why you should use _preload_content=False # If the _preload_content", "import time import avacloud_client_python from avacloud_client_python.rest import ApiException import requests import os import", "should be used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\", \"wb\") as", "\"projectTaxRate\": 0.19, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\", \"itemNumber\": { \"stringRepresentation\":", "}, { \"length\": 4, \"tierType\": \"Position\" } ] } }, \"serviceSpecifications\": [ {", "= {'grant_type': 'client_credentials', 'scope': 'avacloud'} response = requests.post(url, data=payload, auth=(client_id, client_secret)) access_token =", "lead to an error. Instead, the raw response should be used api_response =", "4, \"tierType\": \"Position\" } ] } }, \"serviceSpecifications\": [ { \"projectTaxRate\": 0.19, \"elements\":", "}, \"elements\": [ { \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello Position!\", \"itemNumber\": { \"stringRepresentation\": \"01.02.0500\"", "\"length\": 4, \"tierType\": \"Position\" } ] } }, \"serviceSpecifications\": [ { \"projectTaxRate\": 0.19,", "\"itemNumber\": { \"stringRepresentation\": \"01.02.\" }, \"elements\": [ { \"elementTypeDiscriminator\": \"PositionDto\", \"shortText\": \"Hello Position!\",", "\"01.02.0500\" }, \"quantityOverride\": 10, \"unitPriceOverride\": 5 } ] } ] } ] }", "\"serviceSpecifications\": [ { \"projectTaxRate\": 0.19, \"elements\": [ { \"elementTypeDiscriminator\": \"ServiceSpecificationGroupDto\", \"shortText\": \"Parent Group\",", "be used api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project, destination_gaeb_type='GaebXml_V3_2', target_exchange_phase_transform='Grant', _preload_content=False) with open(\"./NewProject.X86\", \"wb\") as gaeb_file:" ]
[ "defvjp(logpdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x, a,", "b)), argnums=[0]) defvjp(logpdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g *", "__future__ import absolute_import import autograd.numpy as np import scipy.stats from autograd.extend import primitive,", "grad_beta_logpdf_arg2(x, a, b): return np.log1p(-x) - psi(b) + psi(a + b) defvjp(cdf, lambda", "x, a, b: unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x, a, b)), lambda ans,", "a, b: unbroadcast_f(a, lambda g: g * ans * grad_beta_logpdf_arg1(x, a, b)), lambda", "b: unbroadcast_f(x, lambda g: g * ans * grad_beta_logpdf_arg0(x, a, b)), lambda ans,", "g * ans * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b: unbroadcast_f(a,", "g * ans * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b: unbroadcast_f(b,", "b: unbroadcast_f(a, lambda g: g * ans * grad_beta_logpdf_arg1(x, a, b)), lambda ans,", "grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g", "unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf, lambda ans, x, a,", "a, b: unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf, lambda ans,", "pdf = primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a, b): return (1 + a * (x-1)", "logpdf = primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a, b): return (1 +", "a, b)), lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * ans", "import absolute_import import autograd.numpy as np import scipy.stats from autograd.extend import primitive, defvjp", "b)), lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * ans *", "ans, x, a, b: unbroadcast_f(x, lambda g: g * ans * grad_beta_logpdf_arg0(x, a,", "(x-1) + x * (b-2)) / (x * (x-1)) def grad_beta_logpdf_arg1(x, a, b):", "lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * ans * grad_beta_logpdf_arg2(x,", "lambda g: g * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b: unbroadcast_f(a,", "g: g * grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf, lambda ans, x, a, b: unbroadcast_f(x,", "(x-1)) def grad_beta_logpdf_arg1(x, a, b): return np.log(x) - psi(a) + psi(a + b)", "lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x, a, b)),", "defvjp(pdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * ans *", "lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * np.power(x, a-1) *", "from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import beta, psi cdf = primitive(scipy.stats.beta.cdf) logpdf", "= primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a, b): return", "a, b))) defvjp(pdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g *", "ans * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b: unbroadcast_f(b, lambda g:", "psi(a + b) defvjp(cdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g", "primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a, b): return (1 + a *", "x, a, b: unbroadcast_f(x, lambda g: g * np.power(x, a-1) * np.power(1-x, b-1)", "* (b-2)) / (x * (x-1)) def grad_beta_logpdf_arg1(x, a, b): return np.log(x) -", "from __future__ import absolute_import import autograd.numpy as np import scipy.stats from autograd.extend import", "autograd.numpy as np import scipy.stats from autograd.extend import primitive, defvjp from autograd.numpy.numpy_vjps import", "unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b:", "primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a, b): return (1", "def grad_beta_logpdf_arg0(x, a, b): return (1 + a * (x-1) + x *", "np.log(x) - psi(a) + psi(a + b) def grad_beta_logpdf_arg2(x, a, b): return np.log1p(-x)", "a, b: unbroadcast_f(x, lambda g: g * ans * grad_beta_logpdf_arg0(x, a, b)), lambda", "- psi(a) + psi(a + b) def grad_beta_logpdf_arg2(x, a, b): return np.log1p(-x) -", "beta(a, b)), argnums=[0]) defvjp(logpdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g", "np import scipy.stats from autograd.extend import primitive, defvjp from autograd.numpy.numpy_vjps import unbroadcast_f from", "g: g * np.power(x, a-1) * np.power(1-x, b-1) / beta(a, b)), argnums=[0]) defvjp(logpdf,", "b: unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf, lambda ans, x,", "from autograd.extend import primitive, defvjp from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import beta,", "x * (b-2)) / (x * (x-1)) def grad_beta_logpdf_arg1(x, a, b): return np.log(x)", "import unbroadcast_f from autograd.scipy.special import beta, psi cdf = primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf)", "x, a, b: unbroadcast_f(b, lambda g: g * ans * grad_beta_logpdf_arg2(x, a, b)))", "(b-2)) / (x * (x-1)) def grad_beta_logpdf_arg1(x, a, b): return np.log(x) - psi(a)", "defvjp from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import beta, psi cdf = primitive(scipy.stats.beta.cdf)", "beta, psi cdf = primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x,", "= primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a, b): return (1 + a * (x-1) +", "+ a * (x-1) + x * (b-2)) / (x * (x-1)) def", "+ x * (b-2)) / (x * (x-1)) def grad_beta_logpdf_arg1(x, a, b): return", "autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import beta, psi cdf = primitive(scipy.stats.beta.cdf) logpdf =", "lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x, a, b)))", "x, a, b: unbroadcast_f(x, lambda g: g * ans * grad_beta_logpdf_arg0(x, a, b)),", "ans, x, a, b: unbroadcast_f(b, lambda g: g * ans * grad_beta_logpdf_arg2(x, a,", "(x * (x-1)) def grad_beta_logpdf_arg1(x, a, b): return np.log(x) - psi(a) + psi(a", "* ans * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b: unbroadcast_f(b, lambda", "np.power(1-x, b-1) / beta(a, b)), argnums=[0]) defvjp(logpdf, lambda ans, x, a, b: unbroadcast_f(x,", "- psi(b) + psi(a + b) defvjp(cdf, lambda ans, x, a, b: unbroadcast_f(x,", "g: g * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b: unbroadcast_f(b, lambda", "x, a, b: unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf, lambda", "cdf = primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a, b):", "lambda g: g * grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf, lambda ans, x, a, b:", "+ b) defvjp(cdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g *", "unbroadcast_f(x, lambda g: g * np.power(x, a-1) * np.power(1-x, b-1) / beta(a, b)),", "return (1 + a * (x-1) + x * (b-2)) / (x *", "primitive, defvjp from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import beta, psi cdf =", "x, a, b: unbroadcast_f(a, lambda g: g * ans * grad_beta_logpdf_arg1(x, a, b)),", "import beta, psi cdf = primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf) def", "b-1) / beta(a, b)), argnums=[0]) defvjp(logpdf, lambda ans, x, a, b: unbroadcast_f(x, lambda", "g: g * ans * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b:", "autograd.extend import primitive, defvjp from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import beta, psi", "/ (x * (x-1)) def grad_beta_logpdf_arg1(x, a, b): return np.log(x) - psi(a) +", "unbroadcast_f(x, lambda g: g * ans * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x,", "g * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b: unbroadcast_f(b, lambda g:", "lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * ans * grad_beta_logpdf_arg0(x,", "def grad_beta_logpdf_arg1(x, a, b): return np.log(x) - psi(a) + psi(a + b) def", "unbroadcast_f(a, lambda g: g * ans * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x,", "psi(a) + psi(a + b) def grad_beta_logpdf_arg2(x, a, b): return np.log1p(-x) - psi(b)", "a, b): return np.log1p(-x) - psi(b) + psi(a + b) defvjp(cdf, lambda ans,", "a * (x-1) + x * (b-2)) / (x * (x-1)) def grad_beta_logpdf_arg1(x,", "a, b: unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x,", "grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b: unbroadcast_f(b, lambda g: g *", "ans, x, a, b: unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf,", "* grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b: unbroadcast_f(a, lambda g: g", "b): return np.log1p(-x) - psi(b) + psi(a + b) defvjp(cdf, lambda ans, x,", "a, b): return np.log(x) - psi(a) + psi(a + b) def grad_beta_logpdf_arg2(x, a,", "<gh_stars>1000+ from __future__ import absolute_import import autograd.numpy as np import scipy.stats from autograd.extend", "* np.power(x, a-1) * np.power(1-x, b-1) / beta(a, b)), argnums=[0]) defvjp(logpdf, lambda ans,", "as np import scipy.stats from autograd.extend import primitive, defvjp from autograd.numpy.numpy_vjps import unbroadcast_f", "ans, x, a, b: unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x, a, b)), lambda", "a, b)), lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * ans", "autograd.scipy.special import beta, psi cdf = primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf)", "* grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b: unbroadcast_f(b, lambda g: g", "ans, x, a, b: unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x, a, b)), lambda", "b)), lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * ans *", "unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b:", "b): return (1 + a * (x-1) + x * (b-2)) / (x", "b: unbroadcast_f(x, lambda g: g * np.power(x, a-1) * np.power(1-x, b-1) / beta(a,", "g: g * ans * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b:", "a, b: unbroadcast_f(x, lambda g: g * np.power(x, a-1) * np.power(1-x, b-1) /", "(1 + a * (x-1) + x * (b-2)) / (x * (x-1))", "primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a, b): return (1 + a * (x-1) + x", "return np.log1p(-x) - psi(b) + psi(a + b) defvjp(cdf, lambda ans, x, a,", "grad_beta_logpdf_arg1(x, a, b): return np.log(x) - psi(a) + psi(a + b) def grad_beta_logpdf_arg2(x,", "grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b: unbroadcast_f(a, lambda g: g *", "a, b): return (1 + a * (x-1) + x * (b-2)) /", "lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x, a, b)),", "ans, x, a, b: unbroadcast_f(a, lambda g: g * ans * grad_beta_logpdf_arg1(x, a,", "np.power(x, a-1) * np.power(1-x, b-1) / beta(a, b)), argnums=[0]) defvjp(logpdf, lambda ans, x,", "import autograd.numpy as np import scipy.stats from autograd.extend import primitive, defvjp from autograd.numpy.numpy_vjps", "lambda g: g * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a, b: unbroadcast_f(b,", "g * grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf, lambda ans, x, a, b: unbroadcast_f(x, lambda", "g: g * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b: unbroadcast_f(a, lambda", "a, b: unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x,", "a-1) * np.power(1-x, b-1) / beta(a, b)), argnums=[0]) defvjp(logpdf, lambda ans, x, a,", "+ b) def grad_beta_logpdf_arg2(x, a, b): return np.log1p(-x) - psi(b) + psi(a +", "* (x-1) + x * (b-2)) / (x * (x-1)) def grad_beta_logpdf_arg1(x, a,", "x, a, b: unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x, a, b)), lambda ans,", "= primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a, b): return (1 + a", "psi(b) + psi(a + b) defvjp(cdf, lambda ans, x, a, b: unbroadcast_f(x, lambda", "b): return np.log(x) - psi(a) + psi(a + b) def grad_beta_logpdf_arg2(x, a, b):", "+ psi(a + b) defvjp(cdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g:", "* (x-1)) def grad_beta_logpdf_arg1(x, a, b): return np.log(x) - psi(a) + psi(a +", "defvjp(cdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * np.power(x, a-1)", "psi(a + b) def grad_beta_logpdf_arg2(x, a, b): return np.log1p(-x) - psi(b) + psi(a", "lambda g: g * ans * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a,", "lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * ans * grad_beta_logpdf_arg1(x,", "b)), lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x, a,", "/ beta(a, b)), argnums=[0]) defvjp(logpdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g:", "return np.log(x) - psi(a) + psi(a + b) def grad_beta_logpdf_arg2(x, a, b): return", "scipy.stats from autograd.extend import primitive, defvjp from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import", "lambda g: g * ans * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a,", "import scipy.stats from autograd.extend import primitive, defvjp from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special", "g * np.power(x, a-1) * np.power(1-x, b-1) / beta(a, b)), argnums=[0]) defvjp(logpdf, lambda", "unbroadcast_f from autograd.scipy.special import beta, psi cdf = primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf) pdf", "b: unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x, a, b)), lambda ans, x, a,", "b))) defvjp(pdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * ans", "b) defvjp(cdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * np.power(x,", "import primitive, defvjp from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import beta, psi cdf", "absolute_import import autograd.numpy as np import scipy.stats from autograd.extend import primitive, defvjp from", "b) def grad_beta_logpdf_arg2(x, a, b): return np.log1p(-x) - psi(b) + psi(a + b)", "ans * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b: unbroadcast_f(a, lambda g:", "* ans * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b: unbroadcast_f(a, lambda", "grad_beta_logpdf_arg0(x, a, b): return (1 + a * (x-1) + x * (b-2))", "lambda g: g * np.power(x, a-1) * np.power(1-x, b-1) / beta(a, b)), argnums=[0])", "b: unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a,", "argnums=[0]) defvjp(logpdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g: g * grad_beta_logpdf_arg0(x,", "from autograd.scipy.special import beta, psi cdf = primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf) pdf =", "b)), lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x, a,", "def grad_beta_logpdf_arg2(x, a, b): return np.log1p(-x) - psi(b) + psi(a + b) defvjp(cdf,", "ans, x, a, b: unbroadcast_f(x, lambda g: g * np.power(x, a-1) * np.power(1-x,", "* np.power(1-x, b-1) / beta(a, b)), argnums=[0]) defvjp(logpdf, lambda ans, x, a, b:", "g * grad_beta_logpdf_arg0(x, a, b)), lambda ans, x, a, b: unbroadcast_f(a, lambda g:", "+ psi(a + b) def grad_beta_logpdf_arg2(x, a, b): return np.log1p(-x) - psi(b) +", "np.log1p(-x) - psi(b) + psi(a + b) defvjp(cdf, lambda ans, x, a, b:", "a, b)), lambda ans, x, a, b: unbroadcast_f(a, lambda g: g * grad_beta_logpdf_arg1(x,", "* grad_beta_logpdf_arg2(x, a, b))) defvjp(pdf, lambda ans, x, a, b: unbroadcast_f(x, lambda g:", "a, b)), lambda ans, x, a, b: unbroadcast_f(b, lambda g: g * grad_beta_logpdf_arg2(x,", "psi cdf = primitive(scipy.stats.beta.cdf) logpdf = primitive(scipy.stats.beta.logpdf) pdf = primitive(scipy.stats.beta.pdf) def grad_beta_logpdf_arg0(x, a," ]
[ "Test, Case, TreeNode from solutions.binary_tree_preorder_traversal import Solution with Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1,", "'[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4, 6, 6, 1, 0, 3, 9, 2, 7, 8,", "Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4,", "result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4, 6,", "result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4, 6, 6, 1, 0, 3, 9,", "as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2])", "2, 7, 8, 4, 5, 4, 5, 2, 4, 6, 3, 8, 2])", "import Solution with Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'),", "Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1,", "6, 6, 1, 0, 3, 9, 2, 7, 8, 4, 5, 4, 5,", "result=[1, 2, 4, 6, 6, 1, 0, 3, 9, 2, 7, 8, 4,", "TreeNode from solutions.binary_tree_preorder_traversal import Solution with Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3])", "4, 6, 6, 1, 0, 3, 9, 2, 7, 8, 4, 5, 4,", "from solutions.binary_tree_preorder_traversal import Solution with Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3]) Case(TreeNode.from_string('[]'),", "test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'),", "Solution with Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1])", "3, 9, 2, 7, 8, 4, 5, 4, 5, 2, 4, 6, 3,", "rapidtest import Test, Case, TreeNode from solutions.binary_tree_preorder_traversal import Solution with Test(Solution) as test:", "with Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'),", "2, 4, 6, 6, 1, 0, 3, 9, 2, 7, 8, 4, 5,", "0, 3, 9, 2, 7, 8, 4, 5, 4, 5, 2, 4, 6,", "result=[1, 2, 3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2])", "Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4, 6, 6, 1, 0, 3,", "2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4, 6, 6, 1, 0,", "result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4, 6, 6, 1,", "1, 0, 3, 9, 2, 7, 8, 4, 5, 4, 5, 2, 4,", "Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1,", "6, 1, 0, 3, 9, 2, 7, 8, 4, 5, 4, 5, 2,", "Case, TreeNode from solutions.binary_tree_preorder_traversal import Solution with Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2,", "Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1,", "Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4, 6, 6,", "3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'),", "Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4, 6, 6, 1, 0, 3, 9, 2, 7,", "9, 2, 7, 8, 4, 5, 4, 5, 2, 4, 6, 3, 8,", "2, 3]) Case(TreeNode.from_string('[]'), result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string(", "from rapidtest import Test, Case, TreeNode from solutions.binary_tree_preorder_traversal import Solution with Test(Solution) as", "solutions.binary_tree_preorder_traversal import Solution with Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3]) Case(TreeNode.from_string('[]'), result=[])", "result=[]) Case(TreeNode.from_string('[1]'), result=[1]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string('[1,2]'), result=[1, 2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2,", "2]) Case(TreeNode.from_string( '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'), result=[1, 2, 4, 6, 6, 1, 0, 3, 9, 2,", "import Test, Case, TreeNode from solutions.binary_tree_preorder_traversal import Solution with Test(Solution) as test: Case(TreeNode.from_string('[1,null,2,3]')," ]
[ "assign(this): return AssignStatement(Reference(this)) def assign_constant(this): return AssignStatement(Constant(this)) def assign_function(this): return AssignStatement(Function(this)) def extract(name,", "# an error indictates a builtin... and that a builtin won't take our", "def extract(name, key, key_property=None): \"\"\" Extract an argument from a context to another", "of the context key \"\"\" if key_property: key = \".\".join([key, key_property]) return AssignStatement(Reference(key),", "builtin won't take our 'context' argument # in any useful way. # #", "either. We could instead # check `func.__module__ == 'builtins'` but that feels more", "key :param key_property: propery of the context key \"\"\" if key_property: key =", "our 'context' argument # in any useful way. # # See: https://bugs.python.org/issue1748064 self.args", "than just assuming # an error indictates a builtin... and that a builtin", "class Function: def __init__(self, func): self.func = func try: self.args = getfullargspec(func).args except", "func): self.func = func try: self.args = getfullargspec(func).args except TypeError: # NB: getfullargspec", "# check `func.__module__ == 'builtins'` but that feels more fragile than just assuming", "getfullargspec from microcosm_pubsub.chain.exceptions import AttributeNotFound class Reference: def __init__(self, name): self.parts = name.split(\".\")", "name): self.parts = name.split(\".\") @property def key(self): return self.parts[0] def __call__(self, context): value", "this, that=None): self.this = this self.that = that def to(self, that): self.that =", "hasattr(value, part): value = getattr(value, part) else: try: value = value[part] except KeyError:", "'context' argument # in any useful way. # # See: https://bugs.python.org/issue1748064 self.args =", "return f\"assign_{self.name}\" def __call__(self, context): value = self.this(context) context[self.name] = value return value", "def to(self, that): self.that = that return self @property def name(self): return self.that", "self.parts = name.split(\".\") @property def key(self): return self.parts[0] def __call__(self, context): value =", ":param key: old context key :param key_property: propery of the context key \"\"\"", "context key :param key_property: propery of the context key \"\"\" if key_property: key", "== 'builtins'` but that feels more fragile than just assuming # an error", "__call__(self, context): value = context[self.key] for part in self.parts[1:]: if hasattr(value, part): value", "= this self.that = that def to(self, that): self.that = that return self", "this self.that = that def to(self, that): self.that = that return self @property", "() def __call__(self, context): if self.args: return self.func(context) else: return self.func() class AssignStatement:", "that): self.that = that return self @property def name(self): return self.that def __str__(self):", "= getfullargspec(func).args except TypeError: # NB: getfullargspec fails for builtins like 'dict' #", "def __str__(self): return f\"assign_{self.name}\" def __call__(self, context): value = self.this(context) context[self.name] = value", "name.split(\".\") @property def key(self): return self.parts[0] def __call__(self, context): value = context[self.key] for", "part): value = getattr(value, part) else: try: value = value[part] except KeyError: raise", "doesn't work as expected either. We could instead # check `func.__module__ == 'builtins'`", "builtins like 'dict' # # And oddly `inspect.isbuiltin` doesn't work as expected either.", "\"\"\" assign(\"foo.bar\").to(\"baz\") assign_constant(1).to(\"qux\") \"\"\" from inspect import getfullargspec from microcosm_pubsub.chain.exceptions import AttributeNotFound class", "`func.__module__ == 'builtins'` but that feels more fragile than just assuming # an", "context): value = self.this(context) context[self.name] = value return value def assign(this): return AssignStatement(Reference(this))", "an argument from a context to another context key :param name: new context", "https://bugs.python.org/issue1748064 self.args = () def __call__(self, context): if self.args: return self.func(context) else: return", "self.that = that def to(self, that): self.that = that return self @property def", "__init__(self, func): self.func = func try: self.args = getfullargspec(func).args except TypeError: # NB:", "# NB: getfullargspec fails for builtins like 'dict' # # And oddly `inspect.isbuiltin`", "assuming # an error indictates a builtin... and that a builtin won't take", "that=None): self.this = this self.that = that def to(self, that): self.that = that", "__call__(self, context): value = self.this(context) context[self.name] = value return value def assign(this): return", "as `that`. \"\"\" def __init__(self, this, that=None): self.this = this self.that = that", "__init__(self, name): self.parts = name.split(\".\") @property def key(self): return self.parts[0] def __call__(self, context):", "part in self.parts[1:]: if hasattr(value, part): value = getattr(value, part) else: try: value", "value class Constant: def __init__(self, value): self.value = value def __call__(self, context): return", "useful way. # # See: https://bugs.python.org/issue1748064 self.args = () def __call__(self, context): if", "class Reference: def __init__(self, name): self.parts = name.split(\".\") @property def key(self): return self.parts[0]", "<gh_stars>1-10 \"\"\" assign(\"foo.bar\").to(\"baz\") assign_constant(1).to(\"qux\") \"\"\" from inspect import getfullargspec from microcosm_pubsub.chain.exceptions import AttributeNotFound", "work as expected either. We could instead # check `func.__module__ == 'builtins'` but", "from a context to another context key :param name: new context key :param", "\"\"\" Assign `this` value as `that`. \"\"\" def __init__(self, this, that=None): self.this =", "key(self): return self.parts[0] def __call__(self, context): value = context[self.key] for part in self.parts[1:]:", "value = value[part] except KeyError: raise AttributeNotFound(self.parts[0], part) return value class Constant: def", "that a builtin won't take our 'context' argument # in any useful way.", "a context to another context key :param name: new context key :param key:", "self.args = getfullargspec(func).args except TypeError: # NB: getfullargspec fails for builtins like 'dict'", "take our 'context' argument # in any useful way. # # See: https://bugs.python.org/issue1748064", "Constant: def __init__(self, value): self.value = value def __call__(self, context): return self.value class", "def __call__(self, context): value = self.this(context) context[self.name] = value return value def assign(this):", "and that a builtin won't take our 'context' argument # in any useful", "TypeError: # NB: getfullargspec fails for builtins like 'dict' # # And oddly", "`inspect.isbuiltin` doesn't work as expected either. We could instead # check `func.__module__ ==", "a builtin... and that a builtin won't take our 'context' argument # in", "oddly `inspect.isbuiltin` doesn't work as expected either. We could instead # check `func.__module__", "fragile than just assuming # an error indictates a builtin... and that a", "error indictates a builtin... and that a builtin won't take our 'context' argument", "key :param key: old context key :param key_property: propery of the context key", "name: new context key :param key: old context key :param key_property: propery of", "context key :param key: old context key :param key_property: propery of the context", "could instead # check `func.__module__ == 'builtins'` but that feels more fragile than", "return AssignStatement(Reference(this)) def assign_constant(this): return AssignStatement(Constant(this)) def assign_function(this): return AssignStatement(Function(this)) def extract(name, key,", ":param key_property: propery of the context key \"\"\" if key_property: key = \".\".join([key,", "def key(self): return self.parts[0] def __call__(self, context): value = context[self.key] for part in", "value def __call__(self, context): return self.value class Function: def __init__(self, func): self.func =", "way. # # See: https://bugs.python.org/issue1748064 self.args = () def __call__(self, context): if self.args:", "self.value class Function: def __init__(self, func): self.func = func try: self.args = getfullargspec(func).args", "return self.parts[0] def __call__(self, context): value = context[self.key] for part in self.parts[1:]: if", "func try: self.args = getfullargspec(func).args except TypeError: # NB: getfullargspec fails for builtins", "from inspect import getfullargspec from microcosm_pubsub.chain.exceptions import AttributeNotFound class Reference: def __init__(self, name):", "to another context key :param name: new context key :param key: old context", "key, key_property=None): \"\"\" Extract an argument from a context to another context key", "return self.value class Function: def __init__(self, func): self.func = func try: self.args =", "except KeyError: raise AttributeNotFound(self.parts[0], part) return value class Constant: def __init__(self, value): self.value", "KeyError: raise AttributeNotFound(self.parts[0], part) return value class Constant: def __init__(self, value): self.value =", "that feels more fragile than just assuming # an error indictates a builtin...", "`that`. \"\"\" def __init__(self, this, that=None): self.this = this self.that = that def", "= self.this(context) context[self.name] = value return value def assign(this): return AssignStatement(Reference(this)) def assign_constant(this):", "context to another context key :param name: new context key :param key: old", "value = getattr(value, part) else: try: value = value[part] except KeyError: raise AttributeNotFound(self.parts[0],", "like 'dict' # # And oddly `inspect.isbuiltin` doesn't work as expected either. We", "Extract an argument from a context to another context key :param name: new", "def assign(this): return AssignStatement(Reference(this)) def assign_constant(this): return AssignStatement(Constant(this)) def assign_function(this): return AssignStatement(Function(this)) def", "getattr(value, part) else: try: value = value[part] except KeyError: raise AttributeNotFound(self.parts[0], part) return", "__str__(self): return f\"assign_{self.name}\" def __call__(self, context): value = self.this(context) context[self.name] = value return", "def __call__(self, context): return self.value class Function: def __init__(self, func): self.func = func", "in any useful way. # # See: https://bugs.python.org/issue1748064 self.args = () def __call__(self,", "def __init__(self, func): self.func = func try: self.args = getfullargspec(func).args except TypeError: #", "self.args = () def __call__(self, context): if self.args: return self.func(context) else: return self.func()", "key :param name: new context key :param key: old context key :param key_property:", "value = context[self.key] for part in self.parts[1:]: if hasattr(value, part): value = getattr(value,", "from microcosm_pubsub.chain.exceptions import AttributeNotFound class Reference: def __init__(self, name): self.parts = name.split(\".\") @property", "else: return self.func() class AssignStatement: \"\"\" Assign `this` value as `that`. \"\"\" def", "in self.parts[1:]: if hasattr(value, part): value = getattr(value, part) else: try: value =", "return AssignStatement(Constant(this)) def assign_function(this): return AssignStatement(Function(this)) def extract(name, key, key_property=None): \"\"\" Extract an", "context): value = context[self.key] for part in self.parts[1:]: if hasattr(value, part): value =", "def name(self): return self.that def __str__(self): return f\"assign_{self.name}\" def __call__(self, context): value =", "\"\"\" Extract an argument from a context to another context key :param name:", "for part in self.parts[1:]: if hasattr(value, part): value = getattr(value, part) else: try:", "context[self.key] for part in self.parts[1:]: if hasattr(value, part): value = getattr(value, part) else:", "return self.func() class AssignStatement: \"\"\" Assign `this` value as `that`. \"\"\" def __init__(self,", "# See: https://bugs.python.org/issue1748064 self.args = () def __call__(self, context): if self.args: return self.func(context)", "def assign_function(this): return AssignStatement(Function(this)) def extract(name, key, key_property=None): \"\"\" Extract an argument from", "part) else: try: value = value[part] except KeyError: raise AttributeNotFound(self.parts[0], part) return value", "def __call__(self, context): if self.args: return self.func(context) else: return self.func() class AssignStatement: \"\"\"", "old context key :param key_property: propery of the context key \"\"\" if key_property:", "as expected either. We could instead # check `func.__module__ == 'builtins'` but that", "__call__(self, context): if self.args: return self.func(context) else: return self.func() class AssignStatement: \"\"\" Assign", "def __init__(self, name): self.parts = name.split(\".\") @property def key(self): return self.parts[0] def __call__(self,", "# in any useful way. # # See: https://bugs.python.org/issue1748064 self.args = () def", "a builtin won't take our 'context' argument # in any useful way. #", "Function: def __init__(self, func): self.func = func try: self.args = getfullargspec(func).args except TypeError:", "See: https://bugs.python.org/issue1748064 self.args = () def __call__(self, context): if self.args: return self.func(context) else:", "context): if self.args: return self.func(context) else: return self.func() class AssignStatement: \"\"\" Assign `this`", "if hasattr(value, part): value = getattr(value, part) else: try: value = value[part] except", "self @property def name(self): return self.that def __str__(self): return f\"assign_{self.name}\" def __call__(self, context):", "context[self.name] = value return value def assign(this): return AssignStatement(Reference(this)) def assign_constant(this): return AssignStatement(Constant(this))", "import AttributeNotFound class Reference: def __init__(self, name): self.parts = name.split(\".\") @property def key(self):", "new context key :param key: old context key :param key_property: propery of the", "return AssignStatement(Function(this)) def extract(name, key, key_property=None): \"\"\" Extract an argument from a context", "= () def __call__(self, context): if self.args: return self.func(context) else: return self.func() class", "class Constant: def __init__(self, value): self.value = value def __call__(self, context): return self.value", "value[part] except KeyError: raise AttributeNotFound(self.parts[0], part) return value class Constant: def __init__(self, value):", "= value def __call__(self, context): return self.value class Function: def __init__(self, func): self.func", "extract(name, key, key_property=None): \"\"\" Extract an argument from a context to another context", "value as `that`. \"\"\" def __init__(self, this, that=None): self.this = this self.that =", "part) return value class Constant: def __init__(self, value): self.value = value def __call__(self,", "assign_constant(this): return AssignStatement(Constant(this)) def assign_function(this): return AssignStatement(Function(this)) def extract(name, key, key_property=None): \"\"\" Extract", "= value return value def assign(this): return AssignStatement(Reference(this)) def assign_constant(this): return AssignStatement(Constant(this)) def", "more fragile than just assuming # an error indictates a builtin... and that", "inspect import getfullargspec from microcosm_pubsub.chain.exceptions import AttributeNotFound class Reference: def __init__(self, name): self.parts", "feels more fragile than just assuming # an error indictates a builtin... and", "Assign `this` value as `that`. \"\"\" def __init__(self, this, that=None): self.this = this", "argument # in any useful way. # # See: https://bugs.python.org/issue1748064 self.args = ()", "value return value def assign(this): return AssignStatement(Reference(this)) def assign_constant(this): return AssignStatement(Constant(this)) def assign_function(this):", "another context key :param name: new context key :param key: old context key", "for builtins like 'dict' # # And oddly `inspect.isbuiltin` doesn't work as expected", "We could instead # check `func.__module__ == 'builtins'` but that feels more fragile", "@property def key(self): return self.parts[0] def __call__(self, context): value = context[self.key] for part", "except TypeError: # NB: getfullargspec fails for builtins like 'dict' # # And", "def __call__(self, context): value = context[self.key] for part in self.parts[1:]: if hasattr(value, part):", "__call__(self, context): return self.value class Function: def __init__(self, func): self.func = func try:", "= value[part] except KeyError: raise AttributeNotFound(self.parts[0], part) return value class Constant: def __init__(self,", "indictates a builtin... and that a builtin won't take our 'context' argument #", "self.that def __str__(self): return f\"assign_{self.name}\" def __call__(self, context): value = self.this(context) context[self.name] =", "\"\"\" from inspect import getfullargspec from microcosm_pubsub.chain.exceptions import AttributeNotFound class Reference: def __init__(self,", "def __init__(self, this, that=None): self.this = this self.that = that def to(self, that):", "else: try: value = value[part] except KeyError: raise AttributeNotFound(self.parts[0], part) return value class", "# # See: https://bugs.python.org/issue1748064 self.args = () def __call__(self, context): if self.args: return", "self.func(context) else: return self.func() class AssignStatement: \"\"\" Assign `this` value as `that`. \"\"\"", "AttributeNotFound class Reference: def __init__(self, name): self.parts = name.split(\".\") @property def key(self): return", "= func try: self.args = getfullargspec(func).args except TypeError: # NB: getfullargspec fails for", "f\"assign_{self.name}\" def __call__(self, context): value = self.this(context) context[self.name] = value return value def", "if self.args: return self.func(context) else: return self.func() class AssignStatement: \"\"\" Assign `this` value", "key_property=None): \"\"\" Extract an argument from a context to another context key :param", "= that def to(self, that): self.that = that return self @property def name(self):", "try: value = value[part] except KeyError: raise AttributeNotFound(self.parts[0], part) return value class Constant:", "import getfullargspec from microcosm_pubsub.chain.exceptions import AttributeNotFound class Reference: def __init__(self, name): self.parts =", "= name.split(\".\") @property def key(self): return self.parts[0] def __call__(self, context): value = context[self.key]", "assign_function(this): return AssignStatement(Function(this)) def extract(name, key, key_property=None): \"\"\" Extract an argument from a", "value): self.value = value def __call__(self, context): return self.value class Function: def __init__(self,", "raise AttributeNotFound(self.parts[0], part) return value class Constant: def __init__(self, value): self.value = value", "__init__(self, value): self.value = value def __call__(self, context): return self.value class Function: def", "fails for builtins like 'dict' # # And oddly `inspect.isbuiltin` doesn't work as", "self.func() class AssignStatement: \"\"\" Assign `this` value as `that`. \"\"\" def __init__(self, this,", "class AssignStatement: \"\"\" Assign `this` value as `that`. \"\"\" def __init__(self, this, that=None):", "AssignStatement: \"\"\" Assign `this` value as `that`. \"\"\" def __init__(self, this, that=None): self.this", "value = self.this(context) context[self.name] = value return value def assign(this): return AssignStatement(Reference(this)) def", "def assign_constant(this): return AssignStatement(Constant(this)) def assign_function(this): return AssignStatement(Function(this)) def extract(name, key, key_property=None): \"\"\"", "any useful way. # # See: https://bugs.python.org/issue1748064 self.args = () def __call__(self, context):", "just assuming # an error indictates a builtin... and that a builtin won't", "AttributeNotFound(self.parts[0], part) return value class Constant: def __init__(self, value): self.value = value def", "that return self @property def name(self): return self.that def __str__(self): return f\"assign_{self.name}\" def", "propery of the context key \"\"\" if key_property: key = \".\".join([key, key_property]) return", "assign(\"foo.bar\").to(\"baz\") assign_constant(1).to(\"qux\") \"\"\" from inspect import getfullargspec from microcosm_pubsub.chain.exceptions import AttributeNotFound class Reference:", "\"\"\" def __init__(self, this, that=None): self.this = this self.that = that def to(self,", "expected either. We could instead # check `func.__module__ == 'builtins'` but that feels", "__init__(self, this, that=None): self.this = this self.that = that def to(self, that): self.that", "self.parts[0] def __call__(self, context): value = context[self.key] for part in self.parts[1:]: if hasattr(value,", "that def to(self, that): self.that = that return self @property def name(self): return", "# And oddly `inspect.isbuiltin` doesn't work as expected either. We could instead #", "'dict' # # And oddly `inspect.isbuiltin` doesn't work as expected either. We could", "self.that = that return self @property def name(self): return self.that def __str__(self): return", "instead # check `func.__module__ == 'builtins'` but that feels more fragile than just", "name(self): return self.that def __str__(self): return f\"assign_{self.name}\" def __call__(self, context): value = self.this(context)", "context key :param name: new context key :param key: old context key :param", "AssignStatement(Reference(this)) def assign_constant(this): return AssignStatement(Constant(this)) def assign_function(this): return AssignStatement(Function(this)) def extract(name, key, key_property=None):", "the context key \"\"\" if key_property: key = \".\".join([key, key_property]) return AssignStatement(Reference(key), name)", "argument from a context to another context key :param name: new context key", "# # And oddly `inspect.isbuiltin` doesn't work as expected either. We could instead", "to(self, that): self.that = that return self @property def name(self): return self.that def", "getfullargspec fails for builtins like 'dict' # # And oddly `inspect.isbuiltin` doesn't work", "microcosm_pubsub.chain.exceptions import AttributeNotFound class Reference: def __init__(self, name): self.parts = name.split(\".\") @property def", "= getattr(value, part) else: try: value = value[part] except KeyError: raise AttributeNotFound(self.parts[0], part)", "NB: getfullargspec fails for builtins like 'dict' # # And oddly `inspect.isbuiltin` doesn't", "check `func.__module__ == 'builtins'` but that feels more fragile than just assuming #", "assign_constant(1).to(\"qux\") \"\"\" from inspect import getfullargspec from microcosm_pubsub.chain.exceptions import AttributeNotFound class Reference: def", "value def assign(this): return AssignStatement(Reference(this)) def assign_constant(this): return AssignStatement(Constant(this)) def assign_function(this): return AssignStatement(Function(this))", "context): return self.value class Function: def __init__(self, func): self.func = func try: self.args", "= that return self @property def name(self): return self.that def __str__(self): return f\"assign_{self.name}\"", "key_property: propery of the context key \"\"\" if key_property: key = \".\".join([key, key_property])", "def __init__(self, value): self.value = value def __call__(self, context): return self.value class Function:", "an error indictates a builtin... and that a builtin won't take our 'context'", "key: old context key :param key_property: propery of the context key \"\"\" if", "And oddly `inspect.isbuiltin` doesn't work as expected either. We could instead # check", "getfullargspec(func).args except TypeError: # NB: getfullargspec fails for builtins like 'dict' # #", "AssignStatement(Constant(this)) def assign_function(this): return AssignStatement(Function(this)) def extract(name, key, key_property=None): \"\"\" Extract an argument", "= context[self.key] for part in self.parts[1:]: if hasattr(value, part): value = getattr(value, part)", "won't take our 'context' argument # in any useful way. # # See:", "return self @property def name(self): return self.that def __str__(self): return f\"assign_{self.name}\" def __call__(self,", "@property def name(self): return self.that def __str__(self): return f\"assign_{self.name}\" def __call__(self, context): value", "Reference: def __init__(self, name): self.parts = name.split(\".\") @property def key(self): return self.parts[0] def", "return value def assign(this): return AssignStatement(Reference(this)) def assign_constant(this): return AssignStatement(Constant(this)) def assign_function(this): return", "`this` value as `that`. \"\"\" def __init__(self, this, that=None): self.this = this self.that", "self.this = this self.that = that def to(self, that): self.that = that return", "self.value = value def __call__(self, context): return self.value class Function: def __init__(self, func):", "AssignStatement(Function(this)) def extract(name, key, key_property=None): \"\"\" Extract an argument from a context to", "but that feels more fragile than just assuming # an error indictates a", "self.args: return self.func(context) else: return self.func() class AssignStatement: \"\"\" Assign `this` value as", "self.this(context) context[self.name] = value return value def assign(this): return AssignStatement(Reference(this)) def assign_constant(this): return", "self.func = func try: self.args = getfullargspec(func).args except TypeError: # NB: getfullargspec fails", "return self.func(context) else: return self.func() class AssignStatement: \"\"\" Assign `this` value as `that`.", "'builtins'` but that feels more fragile than just assuming # an error indictates", "try: self.args = getfullargspec(func).args except TypeError: # NB: getfullargspec fails for builtins like", "builtin... and that a builtin won't take our 'context' argument # in any", ":param name: new context key :param key: old context key :param key_property: propery", "self.parts[1:]: if hasattr(value, part): value = getattr(value, part) else: try: value = value[part]", "return self.that def __str__(self): return f\"assign_{self.name}\" def __call__(self, context): value = self.this(context) context[self.name]", "return value class Constant: def __init__(self, value): self.value = value def __call__(self, context):" ]
[ "\"\"\"create tokens table Revision ID: 1<PASSWORD> Revises: Create Date: 2020-12-12 01:44:28.195736 \"\"\" from", "tables: op.create_table( 'association', db.Model.metadata, Column('ips', String, ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True) )", "not in tables: op.create_table( 'tokens', sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage', Integer,", "1<PASSWORD> Revises: Create Date: 2020-12-12 01:44:28.195736 \"\"\" from alembic import op import sqlalchemy", "import Table, Column, Integer, String, Boolean, DateTime, ForeignKey from sqlalchemy.engine.reflection import Inspector from", "revision = '1<PASSWORD>' down_revision = None branch_labels = None depends_on = None db", "upgrade(): conn = op.get_bind() inspector = Inspector.from_engine(conn) tables = inspector.get_table_names() if 'ips' not", "sqlalchemy as sa from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey", "Table, Column, Integer, String, Boolean, DateTime, ForeignKey from sqlalchemy.engine.reflection import Inspector from flask_sqlalchemy", "= SQLAlchemy() def upgrade(): conn = op.get_bind() inspector = Inspector.from_engine(conn) tables = inspector.get_table_names()", "primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update tokens set expiration_date=null where expiration_date='None'\") def", "= Inspector.from_engine(conn) tables = inspector.get_table_names() if 'ips' not in tables: op.create_table( 'ips', sa.Column('id',", "tokens table Revision ID: 1<PASSWORD> Revises: Create Date: 2020-12-12 01:44:28.195736 \"\"\" from alembic", ") if 'tokens' not in tables: op.create_table( 'tokens', sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date', DateTime,", "tables = inspector.get_table_names() if 'ips' not in tables: op.create_table( 'ips', sa.Column('id', sa.Integer, primary_key=True),", "not in tables: op.create_table( 'ips', sa.Column('id', sa.Integer, primary_key=True), sa.Column('address', sa.String(255), nullable=True) ) if", "SQLAlchemy() def upgrade(): conn = op.get_bind() inspector = Inspector.from_engine(conn) tables = inspector.get_table_names() if", "if 'tokens' not in tables: op.create_table( 'tokens', sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date', DateTime, nullable=True),", "'association', db.Model.metadata, Column('ips', String, ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update tokens", "SQLAlchemy # revision identifiers, used by Alembic. revision = '1<PASSWORD>' down_revision = None", "ForeignKey from sqlalchemy.engine.reflection import Inspector from flask_sqlalchemy import SQLAlchemy # revision identifiers, used", "default=False), sa.Column('ips', Integer, ForeignKey('association.id')) ) else: try: with op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date',", "Revises: Create Date: 2020-12-12 01:44:28.195736 \"\"\" from alembic import op import sqlalchemy as", "Column, Integer, String, Boolean, DateTime, ForeignKey from sqlalchemy.engine.reflection import Inspector from flask_sqlalchemy import", "'association' not in tables: op.create_table( 'association', db.Model.metadata, Column('ips', String, ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer,", "sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage', Integer, default=1), sa.Column('used', Integer, default=0), sa.Column('disabled', Boolean, default=False), sa.Column('ips',", "import Inspector from flask_sqlalchemy import SQLAlchemy # revision identifiers, used by Alembic. revision", "'ips', sa.Column('id', sa.Integer, primary_key=True), sa.Column('address', sa.String(255), nullable=True) ) if 'tokens' not in tables:", "in tables: op.create_table( 'ips', sa.Column('id', sa.Integer, primary_key=True), sa.Column('address', sa.String(255), nullable=True) ) if 'tokens'", "if 'ips' not in tables: op.create_table( 'ips', sa.Column('id', sa.Integer, primary_key=True), sa.Column('address', sa.String(255), nullable=True)", "Integer, default=0), sa.Column('disabled', Boolean, default=False), sa.Column('ips', Integer, ForeignKey('association.id')) ) else: try: with op.batch_alter_table('tokens')", "'ips' not in tables: op.create_table( 'ips', sa.Column('id', sa.Integer, primary_key=True), sa.Column('address', sa.String(255), nullable=True) )", "Column('disabled', Boolean, default=False) ) except KeyError: pass if 'association' not in tables: op.create_table(", "Boolean, default=False) ) except KeyError: pass if 'association' not in tables: op.create_table( 'association',", "nullable=True) ) if 'tokens' not in tables: op.create_table( 'tokens', sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date',", "flask_sqlalchemy import SQLAlchemy # revision identifiers, used by Alembic. revision = '1<PASSWORD>' down_revision", "batch_op.add_column( Column('disabled', Boolean, default=False) ) except KeyError: pass if 'association' not in tables:", "if 'association' not in tables: op.create_table( 'association', db.Model.metadata, Column('ips', String, ForeignKey('ips.address'), primary_key=True), Column('tokens',", "default=1), sa.Column('used', Integer, default=0), sa.Column('disabled', Boolean, default=False), sa.Column('ips', Integer, ForeignKey('association.id')) ) else: try:", "pass if 'association' not in tables: op.create_table( 'association', db.Model.metadata, Column('ips', String, ForeignKey('ips.address'), primary_key=True),", "Column('ips', String, ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update tokens set expiration_date=null", "from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey from sqlalchemy.engine.reflection import", "as sa from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey from", "sqlalchemy.engine.reflection import Inspector from flask_sqlalchemy import SQLAlchemy # revision identifiers, used by Alembic.", "identifiers, used by Alembic. revision = '1<PASSWORD>' down_revision = None branch_labels = None", "Create Date: 2020-12-12 01:44:28.195736 \"\"\" from alembic import op import sqlalchemy as sa", "sa.Column('id', sa.Integer, primary_key=True), sa.Column('address', sa.String(255), nullable=True) ) if 'tokens' not in tables: op.create_table(", "KeyError: pass if 'association' not in tables: op.create_table( 'association', db.Model.metadata, Column('ips', String, ForeignKey('ips.address'),", "ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update tokens set expiration_date=null where expiration_date='None'\")", "sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage', Integer, default=1), sa.Column('used', Integer, default=0), sa.Column('disabled',", "Date: 2020-12-12 01:44:28.195736 \"\"\" from alembic import op import sqlalchemy as sa from", "except KeyError: pass if 'association' not in tables: op.create_table( 'association', db.Model.metadata, Column('ips', String,", "op.create_table( 'ips', sa.Column('id', sa.Integer, primary_key=True), sa.Column('address', sa.String(255), nullable=True) ) if 'tokens' not in", "import op import sqlalchemy as sa from sqlalchemy import Table, Column, Integer, String,", "DateTime, ForeignKey from sqlalchemy.engine.reflection import Inspector from flask_sqlalchemy import SQLAlchemy # revision identifiers,", "import sqlalchemy as sa from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime,", "in tables: op.create_table( 'tokens', sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage', Integer, default=1),", "2020-12-12 01:44:28.195736 \"\"\" from alembic import op import sqlalchemy as sa from sqlalchemy", "= None db = SQLAlchemy() def upgrade(): conn = op.get_bind() inspector = Inspector.from_engine(conn)", "Inspector from flask_sqlalchemy import SQLAlchemy # revision identifiers, used by Alembic. revision =", "= op.get_bind() inspector = Inspector.from_engine(conn) tables = inspector.get_table_names() if 'ips' not in tables:", "op.create_table( 'tokens', sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage', Integer, default=1), sa.Column('used', Integer,", "= None branch_labels = None depends_on = None db = SQLAlchemy() def upgrade():", "tables: op.create_table( 'tokens', sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage', Integer, default=1), sa.Column('used',", "sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey from sqlalchemy.engine.reflection import Inspector", "'tokens' not in tables: op.create_table( 'tokens', sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage',", "ForeignKey('association.id')) ) else: try: with op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage')", "default=0), sa.Column('disabled', Boolean, default=False), sa.Column('ips', Integer, ForeignKey('association.id')) ) else: try: with op.batch_alter_table('tokens') as", "op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean, default=False) )", "ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update tokens set expiration_date=null where expiration_date='None'\") def downgrade(): op.alter_column('tokens', 'expiration_date',", "op.get_bind() inspector = Inspector.from_engine(conn) tables = inspector.get_table_names() if 'ips' not in tables: op.create_table(", "01:44:28.195736 \"\"\" from alembic import op import sqlalchemy as sa from sqlalchemy import", "op.execute(\"update tokens set expiration_date=null where expiration_date='None'\") def downgrade(): op.alter_column('tokens', 'expiration_date', new_column_name='ex_date') op.alter_column('tokens', 'max_usage',", "with op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean, default=False)", "= inspector.get_table_names() if 'ips' not in tables: op.create_table( 'ips', sa.Column('id', sa.Integer, primary_key=True), sa.Column('address',", "db = SQLAlchemy() def upgrade(): conn = op.get_bind() inspector = Inspector.from_engine(conn) tables =", "'1<PASSWORD>' down_revision = None branch_labels = None depends_on = None db = SQLAlchemy()", "primary_key=True) ) op.execute(\"update tokens set expiration_date=null where expiration_date='None'\") def downgrade(): op.alter_column('tokens', 'expiration_date', new_column_name='ex_date')", "Alembic. revision = '1<PASSWORD>' down_revision = None branch_labels = None depends_on = None", "sa from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey from sqlalchemy.engine.reflection", "nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean, default=False) ) except KeyError: pass if 'association'", "from flask_sqlalchemy import SQLAlchemy # revision identifiers, used by Alembic. revision = '1<PASSWORD>'", "sa.Column('disabled', Boolean, default=False), sa.Column('ips', Integer, ForeignKey('association.id')) ) else: try: with op.batch_alter_table('tokens') as batch_op:", "None depends_on = None db = SQLAlchemy() def upgrade(): conn = op.get_bind() inspector", "depends_on = None db = SQLAlchemy() def upgrade(): conn = op.get_bind() inspector =", "\"\"\" from alembic import op import sqlalchemy as sa from sqlalchemy import Table,", "# revision identifiers, used by Alembic. revision = '1<PASSWORD>' down_revision = None branch_labels", "String, ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update tokens set expiration_date=null where", "String, Boolean, DateTime, ForeignKey from sqlalchemy.engine.reflection import Inspector from flask_sqlalchemy import SQLAlchemy #", "not in tables: op.create_table( 'association', db.Model.metadata, Column('ips', String, ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'),", "used by Alembic. revision = '1<PASSWORD>' down_revision = None branch_labels = None depends_on", "Boolean, default=False), sa.Column('ips', Integer, ForeignKey('association.id')) ) else: try: with op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date',", "else: try: with op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled',", "table Revision ID: 1<PASSWORD> Revises: Create Date: 2020-12-12 01:44:28.195736 \"\"\" from alembic import", "batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean, default=False) ) except KeyError: pass if 'association' not", "new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean, default=False) ) except KeyError: pass if", "branch_labels = None depends_on = None db = SQLAlchemy() def upgrade(): conn =", "revision identifiers, used by Alembic. revision = '1<PASSWORD>' down_revision = None branch_labels =", "sa.String(255), nullable=True) ) if 'tokens' not in tables: op.create_table( 'tokens', sa.Column('name', String(255), primary_key=True),", "primary_key=True), sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage', Integer, default=1), sa.Column('used', Integer, default=0), sa.Column('disabled', Boolean, default=False),", ") op.execute(\"update tokens set expiration_date=null where expiration_date='None'\") def downgrade(): op.alter_column('tokens', 'expiration_date', new_column_name='ex_date') op.alter_column('tokens',", "new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean, default=False) ) except KeyError: pass if 'association' not in", "sa.Column('ips', Integer, ForeignKey('association.id')) ) else: try: with op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True)", "from alembic import op import sqlalchemy as sa from sqlalchemy import Table, Column,", "ID: 1<PASSWORD> Revises: Create Date: 2020-12-12 01:44:28.195736 \"\"\" from alembic import op import", "sa.Column('max_usage', Integer, default=1), sa.Column('used', Integer, default=0), sa.Column('disabled', Boolean, default=False), sa.Column('ips', Integer, ForeignKey('association.id')) )", "sa.Integer, primary_key=True), sa.Column('address', sa.String(255), nullable=True) ) if 'tokens' not in tables: op.create_table( 'tokens',", "db.Model.metadata, Column('ips', String, ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update tokens set", "batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean, default=False) ) except KeyError:", "by Alembic. revision = '1<PASSWORD>' down_revision = None branch_labels = None depends_on =", "Inspector.from_engine(conn) tables = inspector.get_table_names() if 'ips' not in tables: op.create_table( 'ips', sa.Column('id', sa.Integer,", "'tokens', sa.Column('name', String(255), primary_key=True), sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage', Integer, default=1), sa.Column('used', Integer, default=0),", "def upgrade(): conn = op.get_bind() inspector = Inspector.from_engine(conn) tables = inspector.get_table_names() if 'ips'", "op import sqlalchemy as sa from sqlalchemy import Table, Column, Integer, String, Boolean,", "from sqlalchemy.engine.reflection import Inspector from flask_sqlalchemy import SQLAlchemy # revision identifiers, used by", "inspector.get_table_names() if 'ips' not in tables: op.create_table( 'ips', sa.Column('id', sa.Integer, primary_key=True), sa.Column('address', sa.String(255),", "import SQLAlchemy # revision identifiers, used by Alembic. revision = '1<PASSWORD>' down_revision =", ") else: try: with op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column(", "default=False) ) except KeyError: pass if 'association' not in tables: op.create_table( 'association', db.Model.metadata,", "inspector = Inspector.from_engine(conn) tables = inspector.get_table_names() if 'ips' not in tables: op.create_table( 'ips',", "as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean, default=False) ) except", "tokens set expiration_date=null where expiration_date='None'\") def downgrade(): op.alter_column('tokens', 'expiration_date', new_column_name='ex_date') op.alter_column('tokens', 'max_usage', new_column_name='one_time')", "conn = op.get_bind() inspector = Inspector.from_engine(conn) tables = inspector.get_table_names() if 'ips' not in", "String(255), primary_key=True), sa.Column('expiration_date', DateTime, nullable=True), sa.Column('max_usage', Integer, default=1), sa.Column('used', Integer, default=0), sa.Column('disabled', Boolean,", "Integer, default=1), sa.Column('used', Integer, default=0), sa.Column('disabled', Boolean, default=False), sa.Column('ips', Integer, ForeignKey('association.id')) ) else:", "= None depends_on = None db = SQLAlchemy() def upgrade(): conn = op.get_bind()", ") except KeyError: pass if 'association' not in tables: op.create_table( 'association', db.Model.metadata, Column('ips',", "op.create_table( 'association', db.Model.metadata, Column('ips', String, ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update", "Integer, String, Boolean, DateTime, ForeignKey from sqlalchemy.engine.reflection import Inspector from flask_sqlalchemy import SQLAlchemy", "sa.Column('used', Integer, default=0), sa.Column('disabled', Boolean, default=False), sa.Column('ips', Integer, ForeignKey('association.id')) ) else: try: with", "try: with op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean,", "batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time', new_column_name='max_usage') batch_op.add_column( Column('disabled', Boolean, default=False) ) except KeyError: pass", "in tables: op.create_table( 'association', db.Model.metadata, Column('ips', String, ForeignKey('ips.address'), primary_key=True), Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True)", "Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update tokens set expiration_date=null where expiration_date='None'\") def downgrade():", "tables: op.create_table( 'ips', sa.Column('id', sa.Integer, primary_key=True), sa.Column('address', sa.String(255), nullable=True) ) if 'tokens' not", "None db = SQLAlchemy() def upgrade(): conn = op.get_bind() inspector = Inspector.from_engine(conn) tables", "Revision ID: 1<PASSWORD> Revises: Create Date: 2020-12-12 01:44:28.195736 \"\"\" from alembic import op", "primary_key=True), sa.Column('address', sa.String(255), nullable=True) ) if 'tokens' not in tables: op.create_table( 'tokens', sa.Column('name',", "alembic import op import sqlalchemy as sa from sqlalchemy import Table, Column, Integer,", "sa.Column('address', sa.String(255), nullable=True) ) if 'tokens' not in tables: op.create_table( 'tokens', sa.Column('name', String(255),", "DateTime, nullable=True), sa.Column('max_usage', Integer, default=1), sa.Column('used', Integer, default=0), sa.Column('disabled', Boolean, default=False), sa.Column('ips', Integer,", "Integer, ForeignKey('association.id')) ) else: try: with op.batch_alter_table('tokens') as batch_op: batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True) batch_op.alter_column('one_time',", "Integer, ForeignKey('tokens.name'), primary_key=True) ) op.execute(\"update tokens set expiration_date=null where expiration_date='None'\") def downgrade(): op.alter_column('tokens',", "down_revision = None branch_labels = None depends_on = None db = SQLAlchemy() def", "Boolean, DateTime, ForeignKey from sqlalchemy.engine.reflection import Inspector from flask_sqlalchemy import SQLAlchemy # revision", "None branch_labels = None depends_on = None db = SQLAlchemy() def upgrade(): conn", "nullable=True), sa.Column('max_usage', Integer, default=1), sa.Column('used', Integer, default=0), sa.Column('disabled', Boolean, default=False), sa.Column('ips', Integer, ForeignKey('association.id'))", "= '1<PASSWORD>' down_revision = None branch_labels = None depends_on = None db =" ]
[ "formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return log @pytest.fixture(scope='function') def", "= logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr)", "ready to roll. \"\"\" from nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request):", "log.setLevel(logging.DEBUG) log.propagate = False return log @pytest.fixture(scope='function') def mongodb(request): \"\"\"Set up a mongo", "db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request): \"\"\"Set up a elasticsearch connection reset and", "elastic(request): \"\"\"Set up a elasticsearch connection reset and ready to roll. This will", "def elastic(request): \"\"\"Set up a elasticsearch connection reset and ready to roll. This", "reset and ready to roll. \"\"\" from nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function')", "%(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return", "fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate", "= False return log @pytest.fixture(scope='function') def mongodb(request): \"\"\"Set up a mongo connection reset", "a mongo connection reset and ready to roll. \"\"\" from nozama.cloudsearch.data import db", "reset and ready to roll. This will attempt to connect to the default", "up a root logger showing all entries in the console. \"\"\" log =", "%(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return log @pytest.fixture(scope='function')", "up a elasticsearch connection reset and ready to roll. This will attempt to", "a root logger showing all entries in the console. \"\"\" log = logging.getLogger()", "logger showing all entries in the console. \"\"\" log = logging.getLogger() hdlr =", "to the default elasticsearch instance on http://localhost:9200. Its not configurable yet. \"\"\" from", "logging.getLogger() hdlr = logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt)", "%(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return log", "mongodb(request): \"\"\"Set up a mongo connection reset and ready to roll. \"\"\" from", "This will attempt to connect to the default elasticsearch instance on http://localhost:9200. Its", "# -*- coding: utf-8 -*- \"\"\" \"\"\" import logging import pytest @pytest.fixture(scope='session') def", "to connect to the default elasticsearch instance on http://localhost:9200. Its not configurable yet.", "\"\"\" import logging import pytest @pytest.fixture(scope='session') def logger(request): \"\"\"Set up a root logger", "import pytest @pytest.fixture(scope='session') def logger(request): \"\"\"Set up a root logger showing all entries", "up a mongo connection reset and ready to roll. \"\"\" from nozama.cloudsearch.data import", "entries in the console. \"\"\" log = logging.getLogger() hdlr = logging.StreamHandler() fmt =", "instance on http://localhost:9200. Its not configurable yet. \"\"\" from nozama.cloudsearch.data.db import init_es, get_es", "console. \"\"\" log = logging.getLogger() hdlr = logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s", "a elasticsearch connection reset and ready to roll. This will attempt to connect", "connection reset and ready to roll. This will attempt to connect to the", "log.propagate = False return log @pytest.fixture(scope='function') def mongodb(request): \"\"\"Set up a mongo connection", "-*- \"\"\" \"\"\" import logging import pytest @pytest.fixture(scope='session') def logger(request): \"\"\"Set up a", "utf-8 -*- \"\"\" \"\"\" import logging import pytest @pytest.fixture(scope='session') def logger(request): \"\"\"Set up", "roll. \"\"\" from nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request): \"\"\"Set up", "on http://localhost:9200. Its not configurable yet. \"\"\" from nozama.cloudsearch.data.db import init_es, get_es init_es(dict(es_namespace=\"ut_\"))", "logging import pytest @pytest.fixture(scope='session') def logger(request): \"\"\"Set up a root logger showing all", "to roll. \"\"\" from nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request): \"\"\"Set", "return log @pytest.fixture(scope='function') def mongodb(request): \"\"\"Set up a mongo connection reset and ready", "root logger showing all entries in the console. \"\"\" log = logging.getLogger() hdlr", "attempt to connect to the default elasticsearch instance on http://localhost:9200. Its not configurable", "ready to roll. This will attempt to connect to the default elasticsearch instance", "def mongodb(request): \"\"\"Set up a mongo connection reset and ready to roll. \"\"\"", "in the console. \"\"\" log = logging.getLogger() hdlr = logging.StreamHandler() fmt = '%(asctime)s", "roll. This will attempt to connect to the default elasticsearch instance on http://localhost:9200.", "@pytest.fixture(scope='function') def elastic(request): \"\"\"Set up a elasticsearch connection reset and ready to roll.", "log @pytest.fixture(scope='function') def mongodb(request): \"\"\"Set up a mongo connection reset and ready to", "the default elasticsearch instance on http://localhost:9200. Its not configurable yet. \"\"\" from nozama.cloudsearch.data.db", "= logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return log @pytest.fixture(scope='function') def mongodb(request):", "\"\"\" log = logging.getLogger() hdlr = logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s %(message)s'", "to roll. This will attempt to connect to the default elasticsearch instance on", "\"\"\" \"\"\" import logging import pytest @pytest.fixture(scope='session') def logger(request): \"\"\"Set up a root", "hdlr = logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter)", "logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return log @pytest.fixture(scope='function') def mongodb(request): \"\"\"Set", "and ready to roll. \"\"\" from nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def", "logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG)", "from nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request): \"\"\"Set up a elasticsearch", "will attempt to connect to the default elasticsearch instance on http://localhost:9200. Its not", "the console. \"\"\" log = logging.getLogger() hdlr = logging.StreamHandler() fmt = '%(asctime)s %(name)s", "db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request): \"\"\"Set up a elasticsearch connection reset and ready", "and ready to roll. This will attempt to connect to the default elasticsearch", "= logging.getLogger() hdlr = logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' formatter =", "\"\"\" from nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request): \"\"\"Set up a", "mongo connection reset and ready to roll. \"\"\" from nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db'))", "elasticsearch instance on http://localhost:9200. Its not configurable yet. \"\"\" from nozama.cloudsearch.data.db import init_es,", "import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request): \"\"\"Set up a elasticsearch connection reset", "logger(request): \"\"\"Set up a root logger showing all entries in the console. \"\"\"", "log = logging.getLogger() hdlr = logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' formatter", "showing all entries in the console. \"\"\" log = logging.getLogger() hdlr = logging.StreamHandler()", "\"\"\"Set up a root logger showing all entries in the console. \"\"\" log", "nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request): \"\"\"Set up a elasticsearch connection", "@pytest.fixture(scope='session') def logger(request): \"\"\"Set up a root logger showing all entries in the", "= '%(asctime)s %(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate =", "default elasticsearch instance on http://localhost:9200. Its not configurable yet. \"\"\" from nozama.cloudsearch.data.db import", "connect to the default elasticsearch instance on http://localhost:9200. Its not configurable yet. \"\"\"", "http://localhost:9200. Its not configurable yet. \"\"\" from nozama.cloudsearch.data.db import init_es, get_es init_es(dict(es_namespace=\"ut_\")) get_es().hard_reset()", "import logging import pytest @pytest.fixture(scope='session') def logger(request): \"\"\"Set up a root logger showing", "coding: utf-8 -*- \"\"\" \"\"\" import logging import pytest @pytest.fixture(scope='session') def logger(request): \"\"\"Set", "hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return log @pytest.fixture(scope='function') def mongodb(request): \"\"\"Set up", "<filename>nozama-cloudsearch-data/nozama/cloudsearch/data/tests/conftest.py # -*- coding: utf-8 -*- \"\"\" \"\"\" import logging import pytest @pytest.fixture(scope='session')", "all entries in the console. \"\"\" log = logging.getLogger() hdlr = logging.StreamHandler() fmt", "\"\"\"Set up a elasticsearch connection reset and ready to roll. This will attempt", "-*- coding: utf-8 -*- \"\"\" \"\"\" import logging import pytest @pytest.fixture(scope='session') def logger(request):", "@pytest.fixture(scope='function') def mongodb(request): \"\"\"Set up a mongo connection reset and ready to roll.", "'%(asctime)s %(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False", "\"\"\"Set up a mongo connection reset and ready to roll. \"\"\" from nozama.cloudsearch.data", "False return log @pytest.fixture(scope='function') def mongodb(request): \"\"\"Set up a mongo connection reset and", "elasticsearch connection reset and ready to roll. This will attempt to connect to", "connection reset and ready to roll. \"\"\" from nozama.cloudsearch.data import db db.init(dict(db_name='unittesting-db')) db.db().hard_reset()", "log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return log @pytest.fixture(scope='function') def mongodb(request): \"\"\"Set up a", "db.db().hard_reset() @pytest.fixture(scope='function') def elastic(request): \"\"\"Set up a elasticsearch connection reset and ready to", "pytest @pytest.fixture(scope='session') def logger(request): \"\"\"Set up a root logger showing all entries in", "def logger(request): \"\"\"Set up a root logger showing all entries in the console." ]
[ "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "= [] # test FileContains test_file = '/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed:", "!= 2 | package_results[1] == False: failure_list.append(\"%s not installed: GetDEBPackageInfo returns %s\" %", "['jdk-ge', '1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for", "%s\" % uses_rpm) # test GetDEBPackageInfo for non-RPM systems if UsesRPM() == False:", "did not find PROFILE in /etc/hostname\") if FileContains(test_file, re.compile(r\"not anything here\")): failure_list.append(\"FileContains Failed:", "invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test that should fail: %s\" % (package_list)) print(\"\\n\\nTests", "== False: failure_list.append(\"Failed test that should pass: %s\" % (package_list)) print(\"Test is now", "['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for package_list in valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1])", "this file except in compliance with the License. # You may obtain a", "FAILURES\" % len(failure_list)) for s in failure_list: print(s) else: print(\"\\n\\nSUCCESS: All tests succeeded!\")", "failure_list.append(\"FileContains Failed: found garbage search string in /etc/hostname\") # test UsesRPM print(\"Basic checks", "PROFILE in /etc/hostname\") if FileContains(test_file, re.compile(r\"not anything here\")): failure_list.append(\"FileContains Failed: found garbage search", "ANY KIND, either express or implied. # See the License for the specific", "failure_list.append(\"Failed test that should pass: %s\" % (package_list)) print(\"Test is now looking for", "the License. # import sys import os import re from packageUtils import IsPackageVersionSufficient", "% (package_name, package_results)) # test Package check valid_test_packages = [['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'],", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "'1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']];", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "that should pass: %s\" % (package_list)) print(\"Test is now looking for invalid packages", "import UsesRPM from packageUtils import FileContains from packageUtils import GetDEBPackageInfo failure_list = []", "import FileContains from packageUtils import GetDEBPackageInfo failure_list = [] # test FileContains test_file", "looking for invalid packages (error messages expected until tests are complete).\\n\\n\") for package_list", "OF ANY KIND, either express or implied. # See the License for the", "FileContains(test_file, re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains Failed: did not find PROFILE in /etc/hostname\") if", "/etc/hostname\") if FileContains(test_file, re.compile(r\"not anything here\")): failure_list.append(\"FileContains Failed: found garbage search string in", "False: package_name = \"gdal-ge\" package_results = GetDEBPackageInfo (package_name) if len(package_results) != 2 |", "FileContains from packageUtils import GetDEBPackageInfo failure_list = [] # test FileContains test_file =", "\"uses RPM\" print(\"This machine %s\" % uses_rpm) # test GetDEBPackageInfo for non-RPM systems", "now looking for invalid packages (error messages expected until tests are complete).\\n\\n\") for", "% (package_list)) print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list) > 0: print(\"\\n\\n%s TEST FAILURES\" % len(failure_list))", "sure these coincide with your current system.\\n\\n\") uses_rpm = \"does not use RPM\"", "\"does not use RPM\" if UsesRPM(): uses_rpm = \"uses RPM\" print(\"This machine %s\"", "should fail: %s\" % (package_list)) print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list) > 0: print(\"\\n\\n%s TEST", "'2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for package_list in valid_test_packages: if IsPackageVersionSufficient(package_list[0],", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "packageUtils import IsPackageVersionSufficient from packageUtils import UsesRPM from packageUtils import FileContains from packageUtils", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "permissions and # limitations under the License. # import sys import os import", "Ubuntu vs RPM\\nMake sure these coincide with your current system.\\n\\n\") uses_rpm = \"does", "failure_list.append(\"FileContains Failed: did not find PROFILE in /etc/hostname\") if FileContains(test_file, re.compile(r\"not anything here\")):", "non-RPM systems if UsesRPM() == False: package_name = \"gdal-ge\" package_results = GetDEBPackageInfo (package_name)", "%s\" % (package_list)) print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list) > 0: print(\"\\n\\n%s TEST FAILURES\" %", "file\") if FileContains(test_file, re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains Failed: did not find PROFILE in", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "GetDEBPackageInfo (package_name) if len(package_results) != 2 | package_results[1] == False: failure_list.append(\"%s not installed:", "= \"gdal-ge\" package_results = GetDEBPackageInfo (package_name) if len(package_results) != 2 | package_results[1] ==", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "[['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for package_list in valid_test_packages: if", "machine %s\" % uses_rpm) # test GetDEBPackageInfo for non-RPM systems if UsesRPM() ==", "required by applicable law or agreed to in writing, software # distributed under", "# test FileContains test_file = '/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned true", "fail: %s\" % (package_list)) print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list) > 0: print(\"\\n\\n%s TEST FAILURES\"", "applicable law or agreed to in writing, software # distributed under the License", "from packageUtils import UsesRPM from packageUtils import FileContains from packageUtils import GetDEBPackageInfo failure_list", "[] # test FileContains test_file = '/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned", "in invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test that should fail: %s\" % (package_list))", "%s\" % (package_list)) print(\"Test is now looking for invalid packages (error messages expected", "or agreed to in writing, software # distributed under the License is distributed", "print(\"Test is now looking for invalid packages (error messages expected until tests are", "package_results = GetDEBPackageInfo (package_name) if len(package_results) != 2 | package_results[1] == False: failure_list.append(\"%s", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "len(failure_list) > 0: print(\"\\n\\n%s TEST FAILURES\" % len(failure_list)) for s in failure_list: print(s)", "'1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for package_list in valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]) == False: failure_list.append(\"Failed", "'10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for package_list in valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]) ==", "re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains Failed: did not find PROFILE in /etc/hostname\") if FileContains(test_file,", "print(\"This machine %s\" % uses_rpm) # test GetDEBPackageInfo for non-RPM systems if UsesRPM()", "for package_list in valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]) == False: failure_list.append(\"Failed test that should", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "License. # You may obtain a copy of the License at # #", "if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned true for non-existing file\") if FileContains(test_file, re.compile(r\"PROFILE\"))", "FileContains test_file = '/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned true for non-existing", "uses_rpm) # test GetDEBPackageInfo for non-RPM systems if UsesRPM() == False: package_name =", "valid_test_packages = [['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9',", "(package_name) if len(package_results) != 2 | package_results[1] == False: failure_list.append(\"%s not installed: GetDEBPackageInfo", "os import re from packageUtils import IsPackageVersionSufficient from packageUtils import UsesRPM from packageUtils", "compliance with the License. # You may obtain a copy of the License", "are complete).\\n\\n\") for package_list in invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test that should", "TEST FAILURES\" % len(failure_list)) for s in failure_list: print(s) else: print(\"\\n\\nSUCCESS: All tests", "GetDEBPackageInfo failure_list = [] # test FileContains test_file = '/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")):", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "= [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for package_list in valid_test_packages:", "complete).\\n\\n\") for package_list in invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test that should fail:", "under the License. # import sys import os import re from packageUtils import", "\"gdal-ge\" package_results = GetDEBPackageInfo (package_name) if len(package_results) != 2 | package_results[1] == False:", "package_results)) # test Package check valid_test_packages = [['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'],", "should pass: %s\" % (package_list)) print(\"Test is now looking for invalid packages (error", "(package_name, package_results)) # test Package check valid_test_packages = [['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge',", "License. # import sys import os import re from packageUtils import IsPackageVersionSufficient from", "not use this file except in compliance with the License. # You may", "found garbage search string in /etc/hostname\") # test UsesRPM print(\"Basic checks for Ubuntu", "-*- # # Copyright 2017 Google Inc. # # Licensed under the Apache", "test that should pass: %s\" % (package_list)) print(\"Test is now looking for invalid", "[['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel',", "License, Version 2.0 (the \"License\"); # you may not use this file except", "uses_rpm = \"uses RPM\" print(\"This machine %s\" % uses_rpm) # test GetDEBPackageInfo for", "messages expected until tests are complete).\\n\\n\") for package_list in invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]):", "FileContains(test_file, re.compile(r\"not anything here\")): failure_list.append(\"FileContains Failed: found garbage search string in /etc/hostname\") #", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "packageUtils import UsesRPM from packageUtils import FileContains from packageUtils import GetDEBPackageInfo failure_list =", "['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge',", "for non-RPM systems if UsesRPM() == False: package_name = \"gdal-ge\" package_results = GetDEBPackageInfo", "packages (error messages expected until tests are complete).\\n\\n\") for package_list in invalid_test_packages: if", "(package_list)) print(\"Test is now looking for invalid packages (error messages expected until tests", "vs RPM\\nMake sure these coincide with your current system.\\n\\n\") uses_rpm = \"does not", "print(\"\\n\\n%s TEST FAILURES\" % len(failure_list)) for s in failure_list: print(s) else: print(\"\\n\\nSUCCESS: All", "not installed: GetDEBPackageInfo returns %s\" % (package_name, package_results)) # test Package check valid_test_packages", "0: print(\"\\n\\n%s TEST FAILURES\" % len(failure_list)) for s in failure_list: print(s) else: print(\"\\n\\nSUCCESS:", "# you may not use this file except in compliance with the License.", "agreed to in writing, software # distributed under the License is distributed on", "for non-existing file\") if FileContains(test_file, re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains Failed: did not find", "that should fail: %s\" % (package_list)) print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list) > 0: print(\"\\n\\n%s", "(the \"License\"); # you may not use this file except in compliance with", "here\")): failure_list.append(\"FileContains Failed: found garbage search string in /etc/hostname\") # test UsesRPM print(\"Basic", "test_file = '/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned true for non-existing file\")", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "check valid_test_packages = [['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "re.compile(r\"not anything here\")): failure_list.append(\"FileContains Failed: found garbage search string in /etc/hostname\") # test", "'/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned true for non-existing file\") if FileContains(test_file,", "> 0: print(\"\\n\\n%s TEST FAILURES\" % len(failure_list)) for s in failure_list: print(s) else:", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in /etc/hostname\") # test UsesRPM print(\"Basic checks for Ubuntu vs RPM\\nMake sure these", "Failed: did not find PROFILE in /etc/hostname\") if FileContains(test_file, re.compile(r\"not anything here\")): failure_list.append(\"FileContains", "<reponame>ezeeyahoo/earthenterprise #-*- Python -*- # # Copyright 2017 Google Inc. # # Licensed", "for invalid packages (error messages expected until tests are complete).\\n\\n\") for package_list in", "['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for package_list in valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]) == False:", "file except in compliance with the License. # You may obtain a copy", "true for non-existing file\") if FileContains(test_file, re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains Failed: did not", "from packageUtils import FileContains from packageUtils import GetDEBPackageInfo failure_list = [] # test", "% (package_list)) print(\"Test is now looking for invalid packages (error messages expected until", "package_list in invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test that should fail: %s\" %", "License for the specific language governing permissions and # limitations under the License.", "to in writing, software # distributed under the License is distributed on an", "implied. # See the License for the specific language governing permissions and #", "(error messages expected until tests are complete).\\n\\n\") for package_list in invalid_test_packages: if IsPackageVersionSufficient(package_list[0],", "\"License\"); # you may not use this file except in compliance with the", "import re from packageUtils import IsPackageVersionSufficient from packageUtils import UsesRPM from packageUtils import", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "re from packageUtils import IsPackageVersionSufficient from packageUtils import UsesRPM from packageUtils import FileContains", "packageUtils import GetDEBPackageInfo failure_list = [] # test FileContains test_file = '/etc/profile' if", "valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]) == False: failure_list.append(\"Failed test that should pass: %s\" %", "or implied. # See the License for the specific language governing permissions and", "= \"does not use RPM\" if UsesRPM(): uses_rpm = \"uses RPM\" print(\"This machine", "installed: GetDEBPackageInfo returns %s\" % (package_name, package_results)) # test Package check valid_test_packages =", "from packageUtils import GetDEBPackageInfo failure_list = [] # test FileContains test_file = '/etc/profile'", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "| package_results[1] == False: failure_list.append(\"%s not installed: GetDEBPackageInfo returns %s\" % (package_name, package_results))", "systems if UsesRPM() == False: package_name = \"gdal-ge\" package_results = GetDEBPackageInfo (package_name) if", "in writing, software # distributed under the License is distributed on an \"AS", "%s\" % (package_name, package_results)) # test Package check valid_test_packages = [['apache-ge-devel', '2.2.2'], ['apache-ge-devel',", "coincide with your current system.\\n\\n\") uses_rpm = \"does not use RPM\" if UsesRPM():", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "not use RPM\" if UsesRPM(): uses_rpm = \"uses RPM\" print(\"This machine %s\" %", "# # Copyright 2017 Google Inc. # # Licensed under the Apache License,", "failure_list = [] # test FileContains test_file = '/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains", "if len(failure_list) > 0: print(\"\\n\\n%s TEST FAILURES\" % len(failure_list)) for s in failure_list:", "package_list[1]) == False: failure_list.append(\"Failed test that should pass: %s\" % (package_list)) print(\"Test is", "# limitations under the License. # import sys import os import re from", "for Ubuntu vs RPM\\nMake sure these coincide with your current system.\\n\\n\") uses_rpm =", "UsesRPM from packageUtils import FileContains from packageUtils import GetDEBPackageInfo failure_list = [] #", "Package check valid_test_packages = [['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages", "test UsesRPM print(\"Basic checks for Ubuntu vs RPM\\nMake sure these coincide with your", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "'1.99.0-0']]; for package_list in valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]) == False: failure_list.append(\"Failed test that", "specific language governing permissions and # limitations under the License. # import sys", "is now looking for invalid packages (error messages expected until tests are complete).\\n\\n\")", "['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge',", "test FileContains test_file = '/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned true for", "use this file except in compliance with the License. # You may obtain", "Failed: returned true for non-existing file\") if FileContains(test_file, re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains Failed:", "/etc/hostname\") # test UsesRPM print(\"Basic checks for Ubuntu vs RPM\\nMake sure these coincide", "FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned true for non-existing file\") if FileContains(test_file, re.compile(r\"PROFILE\")) ==", "GetDEBPackageInfo returns %s\" % (package_name, package_results)) # test Package check valid_test_packages = [['apache-ge-devel',", "garbage search string in /etc/hostname\") # test UsesRPM print(\"Basic checks for Ubuntu vs", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "failure_list.append(\"Passed test that should fail: %s\" % (package_list)) print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list) >", "Google Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "(package_list)) print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list) > 0: print(\"\\n\\n%s TEST FAILURES\" % len(failure_list)) for", "2.0 (the \"License\"); # you may not use this file except in compliance", "IsPackageVersionSufficient(package_list[0], package_list[1]) == False: failure_list.append(\"Failed test that should pass: %s\" % (package_list)) print(\"Test", "for the specific language governing permissions and # limitations under the License. #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "if FileContains(test_file, re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains Failed: did not find PROFILE in /etc/hostname\")", "if len(package_results) != 2 | package_results[1] == False: failure_list.append(\"%s not installed: GetDEBPackageInfo returns", "'1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for package_list", "# # Unless required by applicable law or agreed to in writing, software", "language governing permissions and # limitations under the License. # import sys import", "if UsesRPM(): uses_rpm = \"uses RPM\" print(\"This machine %s\" % uses_rpm) # test", "system.\\n\\n\") uses_rpm = \"does not use RPM\" if UsesRPM(): uses_rpm = \"uses RPM\"", "express or implied. # See the License for the specific language governing permissions", "if FileContains(test_file, re.compile(r\"not anything here\")): failure_list.append(\"FileContains Failed: found garbage search string in /etc/hostname\")", "% uses_rpm) # test GetDEBPackageInfo for non-RPM systems if UsesRPM() == False: package_name", "Failed: found garbage search string in /etc/hostname\") # test UsesRPM print(\"Basic checks for", "False: failure_list.append(\"FileContains Failed: did not find PROFILE in /etc/hostname\") if FileContains(test_file, re.compile(r\"not anything", "either express or implied. # See the License for the specific language governing", "anything here\")): failure_list.append(\"FileContains Failed: found garbage search string in /etc/hostname\") # test UsesRPM", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "these coincide with your current system.\\n\\n\") uses_rpm = \"does not use RPM\" if", "your current system.\\n\\n\") uses_rpm = \"does not use RPM\" if UsesRPM(): uses_rpm =", "RPM\" print(\"This machine %s\" % uses_rpm) # test GetDEBPackageInfo for non-RPM systems if", "IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test that should fail: %s\" % (package_list)) print(\"\\n\\nTests complete.\\n\\n\") if", "if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test that should fail: %s\" % (package_list)) print(\"\\n\\nTests complete.\\n\\n\")", "GetDEBPackageInfo for non-RPM systems if UsesRPM() == False: package_name = \"gdal-ge\" package_results =", "not find PROFILE in /etc/hostname\") if FileContains(test_file, re.compile(r\"not anything here\")): failure_list.append(\"FileContains Failed: found", "= [['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9', '2.2.2'],", "the License. # You may obtain a copy of the License at #", "uses_rpm = \"does not use RPM\" if UsesRPM(): uses_rpm = \"uses RPM\" print(\"This", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "in /etc/hostname\") if FileContains(test_file, re.compile(r\"not anything here\")): failure_list.append(\"FileContains Failed: found garbage search string", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "until tests are complete).\\n\\n\") for package_list in invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test", "invalid packages (error messages expected until tests are complete).\\n\\n\") for package_list in invalid_test_packages:", "# test Package check valid_test_packages = [['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge',", "import sys import os import re from packageUtils import IsPackageVersionSufficient from packageUtils import", "RPM\\nMake sure these coincide with your current system.\\n\\n\") uses_rpm = \"does not use", "returned true for non-existing file\") if FileContains(test_file, re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains Failed: did", "packageUtils import FileContains from packageUtils import GetDEBPackageInfo failure_list = [] # test FileContains", "if IsPackageVersionSufficient(package_list[0], package_list[1]) == False: failure_list.append(\"Failed test that should pass: %s\" % (package_list))", "with your current system.\\n\\n\") uses_rpm = \"does not use RPM\" if UsesRPM(): uses_rpm", "#-*- Python -*- # # Copyright 2017 Google Inc. # # Licensed under", "complete.\\n\\n\") if len(failure_list) > 0: print(\"\\n\\n%s TEST FAILURES\" % len(failure_list)) for s in", "RPM\" if UsesRPM(): uses_rpm = \"uses RPM\" print(\"This machine %s\" % uses_rpm) #", "== False: package_name = \"gdal-ge\" package_results = GetDEBPackageInfo (package_name) if len(package_results) != 2", "with the License. # You may obtain a copy of the License at", "2 | package_results[1] == False: failure_list.append(\"%s not installed: GetDEBPackageInfo returns %s\" % (package_name,", "# test GetDEBPackageInfo for non-RPM systems if UsesRPM() == False: package_name = \"gdal-ge\"", "UsesRPM() == False: package_name = \"gdal-ge\" package_results = GetDEBPackageInfo (package_name) if len(package_results) !=", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "package_list in valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]) == False: failure_list.append(\"Failed test that should pass:", "2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the", "False: failure_list.append(\"Failed test that should pass: %s\" % (package_list)) print(\"Test is now looking", "package_list[1]): failure_list.append(\"Passed test that should fail: %s\" % (package_list)) print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list)", "string in /etc/hostname\") # test UsesRPM print(\"Basic checks for Ubuntu vs RPM\\nMake sure", "current system.\\n\\n\") uses_rpm = \"does not use RPM\" if UsesRPM(): uses_rpm = \"uses", "returns %s\" % (package_name, package_results)) # test Package check valid_test_packages = [['apache-ge-devel', '2.2.2'],", "import GetDEBPackageInfo failure_list = [] # test FileContains test_file = '/etc/profile' if FileContains('/IDontExist',", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= '/etc/profile' if FileContains('/IDontExist', re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned true for non-existing file\") if", "# test UsesRPM print(\"Basic checks for Ubuntu vs RPM\\nMake sure these coincide with", "import os import re from packageUtils import IsPackageVersionSufficient from packageUtils import UsesRPM from", "expected until tests are complete).\\n\\n\") for package_list in invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed", "checks for Ubuntu vs RPM\\nMake sure these coincide with your current system.\\n\\n\") uses_rpm", "find PROFILE in /etc/hostname\") if FileContains(test_file, re.compile(r\"not anything here\")): failure_list.append(\"FileContains Failed: found garbage", "in compliance with the License. # You may obtain a copy of the", "'2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'],", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "tests are complete).\\n\\n\") for package_list in invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test that", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0", "See the License for the specific language governing permissions and # limitations under", "UsesRPM print(\"Basic checks for Ubuntu vs RPM\\nMake sure these coincide with your current", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "len(package_results) != 2 | package_results[1] == False: failure_list.append(\"%s not installed: GetDEBPackageInfo returns %s\"", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "['jdk-ge', '1.99.0-0']]; for package_list in valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]) == False: failure_list.append(\"Failed test", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "test Package check valid_test_packages = [['apache-ge-devel', '2.2.2'], ['apache-ge-devel', '2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']];", "invalid_test_packages = [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'], ['jdk-ge', '1.99.0-0']]; for package_list in", "print(\"Basic checks for Ubuntu vs RPM\\nMake sure these coincide with your current system.\\n\\n\")", "from packageUtils import IsPackageVersionSufficient from packageUtils import UsesRPM from packageUtils import FileContains from", "# import sys import os import re from packageUtils import IsPackageVersionSufficient from packageUtils", "in valid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]) == False: failure_list.append(\"Failed test that should pass: %s\"", "print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list) > 0: print(\"\\n\\n%s TEST FAILURES\" % len(failure_list)) for s", "failure_list.append(\"FileContains Failed: returned true for non-existing file\") if FileContains(test_file, re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains", "package_name = \"gdal-ge\" package_results = GetDEBPackageInfo (package_name) if len(package_results) != 2 | package_results[1]", "== False: failure_list.append(\"%s not installed: GetDEBPackageInfo returns %s\" % (package_name, package_results)) # test", "the specific language governing permissions and # limitations under the License. # import", "use RPM\" if UsesRPM(): uses_rpm = \"uses RPM\" print(\"This machine %s\" % uses_rpm)", "pass: %s\" % (package_list)) print(\"Test is now looking for invalid packages (error messages", "limitations under the License. # import sys import os import re from packageUtils", "Python -*- # # Copyright 2017 Google Inc. # # Licensed under the", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version", "and # limitations under the License. # import sys import os import re", "re.compile(r\"a\")): failure_list.append(\"FileContains Failed: returned true for non-existing file\") if FileContains(test_file, re.compile(r\"PROFILE\")) == False:", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "governing permissions and # limitations under the License. # import sys import os", "if UsesRPM() == False: package_name = \"gdal-ge\" package_results = GetDEBPackageInfo (package_name) if len(package_results)", "non-existing file\") if FileContains(test_file, re.compile(r\"PROFILE\")) == False: failure_list.append(\"FileContains Failed: did not find PROFILE", "sys import os import re from packageUtils import IsPackageVersionSufficient from packageUtils import UsesRPM", "test GetDEBPackageInfo for non-RPM systems if UsesRPM() == False: package_name = \"gdal-ge\" package_results", "for package_list in invalid_test_packages: if IsPackageVersionSufficient(package_list[0], package_list[1]): failure_list.append(\"Passed test that should fail: %s\"", "= \"uses RPM\" print(\"This machine %s\" % uses_rpm) # test GetDEBPackageInfo for non-RPM", "False: failure_list.append(\"%s not installed: GetDEBPackageInfo returns %s\" % (package_name, package_results)) # test Package", "IsPackageVersionSufficient from packageUtils import UsesRPM from packageUtils import FileContains from packageUtils import GetDEBPackageInfo", "failure_list.append(\"%s not installed: GetDEBPackageInfo returns %s\" % (package_name, package_results)) # test Package check", "package_results[1] == False: failure_list.append(\"%s not installed: GetDEBPackageInfo returns %s\" % (package_name, package_results)) #", "'2.2.2.1'], ['jdk-ge', '1.6.0-1'], ['jdk-ge', '1.6.0-0']]; invalid_test_packages = [['apache-ge-devel9', '2.2.2'], ['apache-ge-devel', '10.2.2.1'], ['j9dk-ge', '1.6.0-1'],", "UsesRPM(): uses_rpm = \"uses RPM\" print(\"This machine %s\" % uses_rpm) # test GetDEBPackageInfo", "= GetDEBPackageInfo (package_name) if len(package_results) != 2 | package_results[1] == False: failure_list.append(\"%s not", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "search string in /etc/hostname\") # test UsesRPM print(\"Basic checks for Ubuntu vs RPM\\nMake", "test that should fail: %s\" % (package_list)) print(\"\\n\\nTests complete.\\n\\n\") if len(failure_list) > 0:", "== False: failure_list.append(\"FileContains Failed: did not find PROFILE in /etc/hostname\") if FileContains(test_file, re.compile(r\"not", "import IsPackageVersionSufficient from packageUtils import UsesRPM from packageUtils import FileContains from packageUtils import" ]
[ "0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :,", "Unless required by applicable law or agreed to in writing, software # distributed", "-1 cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy()", "positive disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((64, 4,", "functions to test the cost volume create by mc_cnn \"\"\" import unittest import", "y_ref_patch = 5 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size :", "-1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) # Check if the calculated cost", ":, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :,", "= img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5", "disparity 2 cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :,", "{ \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28, \"brightness\": 1.3,", "test the function MiddleburyGenerator \"\"\" # Script use to create images_middlebury and samples_middlebury", "secondary features ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112,", ":2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity score", "+ dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch - patch_size : y_sec_neg_patch", "changing the name here loses the reference to the actual name of the", "# Script use to create images_middlebury and samples_middlebury : # shape 2, 13,", "+ patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check", "reference to the actual name of the checked function def test_MiddleburyGenerator(self): \"\"\" test", "acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost) # Check if the calculated cost volume is", "to prepare the test fixture \"\"\" self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0", "np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1)", "-1 cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # The", "-4 # all nan # disparity -3 cv_gt[:, 3:, 1] = cos(ref_feature[:, :,", "test fixture \"\"\" self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32),", "limitations under the License. # \"\"\" This module contains functions to test the", "\"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28, \"brightness\": 1.3, \"contrast\":", "pylint: disable=pointless-string-statement \"\"\" # Script use to create images_middlebury and samples_middlebury : #", "sampl_file = h5py.File('sample_middlebury.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6.,", "1)) + 1 image_pairs_1 = np.zeros((1, 2, 13, 13)) image_pairs_1[0, 0, :, :]", "col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity -4 # all", "+ patch_size + 1, x_ref_patch - patch_size : x_ref_patch + patch_size + 1,", "3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:,", "disparity -1 cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() #", "\"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\",", "1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # The", ":, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1))", "# disparity 0 cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "sampl_file = h5py.File('sample_dfc.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6.,", "sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive disparity cfg = { \"data_augmentation\": False,", "image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[0, 1, :,", "image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1, :, :] =", "acc = AccMcCnnInfer() # Because input shape of nn.Conv2d is (Batch_size, Channel, H,", "= 7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size +", "1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost) # Check if the", "cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() #", "volume is equal to the ground truth (same shape and all elements equals)", "= np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0]", ":, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = cos(ref_feature[:,", "np.array([[ 1., 7., 5., -1.], [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1),", "0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2,", ":, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2]", "= x_ref_patch - disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch", "(same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint: disable=invalid-name #", "1)) - 1 img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file =", "+ patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,", "ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name", "dimension = left patch, right positive patch, right negative patch patch = training_loader.__getitem__(0)", "cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity", "# disparity 2 cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy()", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", ":, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity score to a matching", "to test the computes_cost_volume_mc_cnn_accurate function \"\"\" return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0,", ":, :] - sec_features[0, :, :, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test", "equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with", "the computes_cost_volume_mc_cnn_fast function with negative disparities \"\"\" # create reference and secondary features", "11 # With the firt dimension = left patch, right positive patch, right", ": x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low & dataset_neg_high = 1,", "self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13,", "#!/usr/bin/env python # coding: utf8 # # Copyright (c) 2021 Centre National d'Etudes", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "cost volume (row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity", "2, 13, 13)) image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1,", "equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with", "(same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \"", "*= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) # Check if the calculated", "(row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:,", ":, 3:]).cpu().detach().numpy() # disparity 4 # all nan # The minus sign converts", "h5py.File('sample_middlebury.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.] [0.,", "1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch =", "nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth cost volume (row, col, disp) cv_gt", "\"\"\" Method called to prepare the test fixture \"\"\" self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32),", "here loses the reference to the actual name of the checked function def", "- 1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function \"\"\" # create", "= nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth cost volume (row, col, disp)", "minus sign converts the similarity score to a matching cost cv_gt *= -1", "= h5py.File('sample_middlebury.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.]", "2, 13, 13 : 1 exposures, 2 = left and right images image_pairs_0", "# Check if the calculated patch is equal to the ground truth (same", "np.nan) # disparity -4 # all nan # disparity -3 cv_gt[:, 3:, 1]", "all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate", "+ 1, x_ref_patch - patch_size : x_ref_patch + patch_size + 1, ] #", "function \"\"\" return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :, :]), dim=0)", "def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with negative disparities \"\"\" #", "+ patch_size + 1, ] # disp = 1, with middlebury image convention", ":, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] =", ": x_ref_patch + patch_size + 1, ] # disp = -1, with middlebury", "112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create", "patch_size : x_sec_neg_patch + patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch),", "samples_middlebury : # pylint: disable=pointless-string-statement \"\"\" # shape 1, 2, 13, 13 :", "dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth cost volume (row,", "torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth", "= training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch = 5 patch_size = 5 gt_ref_patch =", "with positive disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((64,", "the checked function def test_DataFusionContestGenerator(self): \"\"\" test the function DataFusionContestGenerator \"\"\" # pylint:", "the computes_cost_volume_mc_cnn_fast function \"\"\" # create reference and secondary features ref_feature = torch.randn((64,", "np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) - 1", "5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:],", ":, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] =", "dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 7", ":, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = cos(ref_feature[:,", ":1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy() # disparity", "the actual name of the checked function def test_MiddleburyGenerator(self): \"\"\" test the function", "training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) # Patch of shape 3, 11, 11 #", "cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2", "disp = 1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch =", "cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy() # disparity 4", "of the checked function def test_DataFusionContestGenerator(self): \"\"\" test the function DataFusionContestGenerator \"\"\" #", "\"\"\" # create reference and secondary features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)", ":, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity", "Centre National d'Etudes Spatiales (CNES). # # This file is part of PANDORA_MCCNN", "patch_size = 5 gt_ref_patch = self.ref_img_1[ y_ref_patch - patch_size : y_ref_patch + patch_size", "rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with positive disparities \"\"\"", "cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) # Check if the", "sec_features): \"\"\" Useful to test the computes_cost_volume_mc_cnn_accurate function \"\"\" return torch.sum(abs(ref_features[0, :, :,", "-1) # Check if the calculated cost volume is equal to the ground", "(13, 1)) + 1 image_pairs_1 = np.zeros((2, 13, 13)) image_pairs_1[0, :, :] =", "np.nan) # disparity 1 cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:,", "y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size +", "def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function \"\"\" # create reference and", "3:]).cpu().detach().numpy() # disparity 4 # all nan # The minus sign converts the", "features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64)", "# all nan # The minus sign converts the similarity score to a", "2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1", "-2 cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy()", "not use this file except in compliance with the License. # You may", "1., 7., 5., -1.] [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1)", "2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3,", "as nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import", "sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = cos(ref_feature[:, :, :],", "1.3, \"contrast\": 1.1, \"d_hscale\": 0.9, \"d_hshear\": 0.3, \"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\": 0.7,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "elements equals) np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name # -> because changing the name", "+ 1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13,", "volume create by mc_cnn \"\"\" import unittest import numpy as np import torch", "2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:,", "patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch + patch_size", "cost volume create by mc_cnn \"\"\" import unittest import numpy as np import", "cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() #", "image_pairs_0 x0 = np.array([[0., 5., 6., 1.] [0., 7., 7., 1.]]) # disparity", "agreed to in writing, software # distributed under the License is distributed on", "1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :,", "# disparity -3 cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :,", ":, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = cos(ref_feature[:,", "disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0]", "img_1 = [image_pairs_1] grp = img_file.create_group(str(1)) for light in range(len(img_1)): dset = grp.create_dataset(str(light),", "middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch -", "- 1 img_file = h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0] grp = img_file.create_group(str(0)) #", "5., -1.] [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" #", "(Batch_size, Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4,", "cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2", "disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((1, 112, 4,", "= self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # The minus sign", "# disparity -3 cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy()", "to create images_middlebury and samples_middlebury : # shape 2, 13, 13 : 2", "2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity", "= np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:,", "gt_path) # negative disparity patch = training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7", "disparity 1 cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() #", "= np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:,", "equal to the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt,", "[ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive disparity", "computes_cost_volume_mc_cnn_fast function with negative disparities \"\"\" # create reference and secondary features ref_feature", "False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"augmentation_param\": { \"vertical_disp\": 0, \"scale\": 0.8,", "function def test_MiddleburyGenerator(self): \"\"\" test the function MiddleburyGenerator \"\"\" # Script use to", "11, 11 # With the firt dimension = left patch, right positive patch,", "a matching cost cv_gt *= -1 acc = AccMcCnnInfer() # Because input shape", "to in writing, software # distributed under the License is distributed on an", "left image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[0, 1,", "implied. # See the License for the specific language governing permissions and #", "equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity patch = training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch", "-1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = -1 x_sec_pos_patch =", "img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w') # disparity of image_pairs_0 x0", "torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\"", "disparity -2 cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() #", "4) # Check if the calculated cost volume is equal to the ground", "dtype=np.float32), (13, 1)) + 1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2 =", ":, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] =", "disparity 1 cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :,", "right positive patch, right negative patch patch = training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch", "= np.tile(np.arange(13), (13, 1)) # right image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1))", "Useful to test the computes_cost_volume_mc_cnn_accurate function \"\"\" return torch.sum(abs(ref_features[0, :, :, :] -", "7., 7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5.,", ":, :, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function \"\"\"", "gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated patch is", "shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test", "1 cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy()", "cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy() #", "y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size", "checked function def test_MiddleburyGenerator(self): \"\"\" test the function MiddleburyGenerator \"\"\" # Script use", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0,", "# pylint: disable=invalid-name # -> because changing the name here loses the reference", "x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size :", "1.], [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1.,", "disparity 0 cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy() #", "= np.array([[ 1., 7., 5., -1.], [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0)", "2:4]).cpu().detach().numpy() # The minus sign converts the similarity score to a matching cost", "with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch", "2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:,", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # The minus", "\"\"\" TestMCCNN class allows to test the cost volume create by mc_cnn \"\"\"", "self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size", "d'Etudes Spatiales (CNES). # # This file is part of PANDORA_MCCNN # #", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "-1.], [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive", "0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:, :,", "- patch_size : y_ref_patch + patch_size + 1, x_ref_patch - patch_size : x_ref_patch", "= 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch -", "np.array([[ 1., 7., 5., -1.] [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1),", "the calculated patch is equal to the ground truth (same shape and all", "\"augmentation_param\": { \"vertical_disp\": 0, \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\":", "5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,", "left patch, right positive patch, right negative patch patch = training_loader.__getitem__(0) x_ref_patch =", ": y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size", ":, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = cos(ref_feature[:,", "range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1] grp = img_file.create_group(str(1)) for light", ":, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] =", "-1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch", "with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = 1 x_sec_pos_patch = x_ref_patch", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() #", "dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg", "function def test_DataFusionContestGenerator(self): \"\"\" test the function DataFusionContestGenerator \"\"\" # pylint: disable=pointless-string-statement \"\"\"", "computes_cost_volume_mc_cnn_fast function with positive disparities \"\"\" # create reference and secondary features ref_feature", "# disparity 2 cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :,", "volume (row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity 1", "= np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1", ":, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:,", "(same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \"", "dtype=np.float32), (13, 1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1 self.ref_img_1 =", "truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name #", "self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size", "name of the checked function def test_MiddleburyGenerator(self): \"\"\" test the function MiddleburyGenerator \"\"\"", "loses the reference to the actual name of the checked function def test_MiddleburyGenerator(self):", "elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function", "= np.zeros((1, 2, 13, 13)) image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1))", "= cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # The minus sign converts the", "(13, 1)) - 1 img_file = h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0] grp =", "use to create images_middlebury and samples_middlebury : # pylint: disable=pointless-string-statement \"\"\" # shape", "'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.], [0., 7.,", ":, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan # The minus sign", "= np.array([[ 1., 7., 5., -1.] [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0)", "\" Test the computes_cost_volume_mc_cnn_fast function \"\"\" # create reference and secondary features ref_feature", "np.testing.assert_array_equal(patch, gt_path) # negative disparity patch = training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch =", "cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() #", "self.sec_img_2[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size", "self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy() # disparity 4 # all", "to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1)", "4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6)", "np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1 self.ref_img_1", "equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint: disable=invalid-name # -> because changing the name", "# disparity -2 cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy()", "def test_DataFusionContestGenerator(self): \"\"\" test the function DataFusionContestGenerator \"\"\" # pylint: disable=pointless-string-statement \"\"\" #", "# right image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1", "patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if", "disparity 2 cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() #", "computes_cost_volume_mc_cnn_accurate function with negative disparities \"\"\" # create reference and secondary features ref_feature", "+ 1, ] # disp = -1, with middlebury image convention img_ref(x,y) =", "(same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \"", "elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function", "-2, 2) # Check if the calculated cost volume is equal to the", "truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) if __name__ == \"__main__\":", "training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch = 5 patch_size = 5 gt_ref_patch = self.ref_img_0[", "= computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) # Check if the calculated cost volume is", "the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) if __name__", "h5py.File('sample_dfc.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.], [0.,", "negative disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((64, 4,", "x_ref_patch - disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch -", "cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy() #", "0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :,", "4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :,", ":, :1], sec_features[:, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan # The", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4),", "= img_sec(x-d,y) disp = 1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5", "computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) # Check if the calculated cost volume is equal", "test the computes_cost_volume_mc_cnn_accurate function \"\"\" return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :,", "y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size :", "data=x1) \"\"\" # Positive disparity cfg = { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\":", "np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = cos(ref_feature[:, :,", ":, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:],", "test the cost volume create by mc_cnn \"\"\" import unittest import numpy as", "create images_middlebury and samples_middlebury : # shape 2, 13, 13 : 2 =", "3:], sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = cos(ref_feature[:, :,", "1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # disparity", "computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator", "0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:,", "right images image_pairs_0 = np.zeros((1, 2, 13, 13)) # left image_pairs_0[0, 0, :,", "the computes_cost_volume_mc_cnn_accurate function with positive disparities \"\"\" # create reference and secondary features", ":, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3],", "See the License for the specific language governing permissions and # limitations under", "to the actual name of the checked function def test_MiddleburyGenerator(self): \"\"\" test the", ":], sec_features[:, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = cos(ref_feature[:, :,", "= np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1))", "= -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[", "col, disp) cv_gt = np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:, 2:,", "# left image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[1, :,", "5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "1, ] # dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y)", "def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with negative disparities \"\"\" #", "img_file.create_group(str(0)) # 1 illumination for light in range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light]) img_1", "gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch", "= computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) # Check if the calculated cost volume is", "by mc_cnn \"\"\" import unittest import numpy as np import torch import torch.nn", "= 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size +", "7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.]", "truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 5), np.nan) #", ":, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = cos(ref_feature[:,", "x_ref_patch = 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch", "patch_size : x_ref_patch + patch_size + 1, ] # disp = -1, with", "+ 1 image_pairs_1 = np.zeros((1, 2, 13, 13)) image_pairs_1[0, 0, :, :] =", "1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function \"\"\" #", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "= torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground", ":, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :,", "create by mc_cnn \"\"\" def setUp(self): \"\"\" Method called to prepare the test", "def test_MiddleburyGenerator(self): \"\"\" test the function MiddleburyGenerator \"\"\" # Script use to create", "disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = 1", "(13, 1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function \"\"\"", "2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :,", "\"trans\": 0, \"rotate\": 28, \"brightness\": 1.3, \"contrast\": 1.1, \"d_hscale\": 0.9, \"d_hshear\": 0.3, \"d_vtrans\":", "shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test", "1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_middlebury.hdf5', 'w')", "sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:],", "computes_cost_volume_mc_cnn_accurate function \"\"\" return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :, :]),", "mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint:", "cost volume (row, col, disp) cv_gt = np.full((4, 4, 5), np.nan) # disparity", "computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) # Check if the calculated cost volume is equal", ":, :], sec_features[:, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = cos(ref_feature[:,", "and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint: disable=invalid-name # -> because", "7 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch +", "grp = img_file.create_group(str(0)) # 1 illumination for light in range(len(img_0)): dset = grp.create_dataset(str(light),", "secondary features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4),", "3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # The minus sign converts", "(c) 2021 Centre National d'Etudes Spatiales (CNES). # # This file is part", "elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function", "left and right images image_pairs_0 = np.zeros((1, 2, 13, 13)) # left image_pairs_0[0,", "= computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) # Check if the calculated cost volume is", "+ 1, ] # dataset_neg_low & dataset_neg_high = 1, with middlebury image convention", "converts the similarity score to a matching cost cv_gt *= -1 cv =", "specific language governing permissions and # limitations under the License. # \"\"\" This", "shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self, ref_features, sec_features): \"\"\"", "\" Test the computes_cost_volume_mc_cnn_accurate function \"\"\" # create reference and secondary features ref_feature", "np.array([[0., 5., 6., 1.] [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1", "1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.], [", "+ 1 image_pairs_1 = np.zeros((2, 13, 13)) image_pairs_1[0, :, :] = np.tile(np.arange(13), (13,", "6., 1.], [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[", "1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = 1 x_sec_pos_patch =", "KIND, either express or implied. # See the License for the specific language", "\"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"augmentation_param\": { \"vertical_disp\": 0, \"scale\":", "= self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:,", "patch is equal to the ground truth (same shape and all elements equals)", "to the actual name of the checked function def test_DataFusionContestGenerator(self): \"\"\" test the", "mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator", "coding: utf8 # # Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES). #", "6., 1.] [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[", "disparity -3 cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :,", "- disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch", "permissions and # limitations under the License. # \"\"\" This module contains functions", ":, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2],", "Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2,", "convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp +", "\"contrast\": 1.1, \"d_hscale\": 0.9, \"d_hshear\": 0.3, \"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\":", "the cost volume create by mc_cnn \"\"\" import unittest import numpy as np", "1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost) # Check if the", "= { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"vertical_disp\": 0, \"augmentation_param\":", "of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN # # Licensed under the Apache License, Version", "ANY KIND, either express or implied. # See the License for the specific", "= acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost) # Check if the calculated cost volume", "= AccMcCnnInfer() # Because input shape of nn.Conv2d is (Batch_size, Channel, H, W),", "4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth cost", "name here loses the reference to the actual name of the checked function", "file is part of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN # # Licensed under the", "class allows to test the cost volume create by mc_cnn \"\"\" def setUp(self):", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated patch is equal to the", ":, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] =", "cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] =", "13)) # left image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) # right", "# pylint: disable=pointless-string-statement \"\"\" # Script use to create images_middlebury and samples_middlebury :", "\"dataset_neg_high\": 1, \"dataset_pos\": 0, \"vertical_disp\": 0, \"augmentation_param\": { \"scale\": 0.8, \"hscale\": 0.8, \"hshear\":", "1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch =", "disable=pointless-string-statement \"\"\" # Script use to create images_middlebury and samples_middlebury : # shape", "self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:,", "1 cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity", "Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES). # # This file is", "right negative patch patch = training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch = 5 patch_size", "disparity cfg = { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"augmentation_param\":", "0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[0, 1, :, :]", "np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0] = cos(ref_feature[:, :,", "from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class allows", "x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch -", "negative patch patch = training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch = 5 patch_size =", "gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch", ":3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:,", "1.1, \"d_hscale\": 0.9, \"d_hshear\": 0.3, \"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1,", "1.1, }, } training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) # Patch of shape 3,", "\"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\",", "= torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)", "'w') img_0 = [image_pairs_0] grp = img_file.create_group(str(0)) # 1 illumination for light in", "ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self,", "is (Batch_size, Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features,", "np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with positive", "x_ref_patch = 6 y_ref_patch = 5 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch", "0, \"vertical_disp\": 0, \"augmentation_param\": { \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0,", "x_ref_patch - disp + dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch -", "= cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3]", "2 cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy()", "elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint: disable=invalid-name # -> because changing the", "test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with positive disparities \"\"\" # create", ":, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:,", ":, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity score to a", "0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "*= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) # Check if the calculated", "language governing permissions and # limitations under the License. # \"\"\" This module", "prepare the test fixture \"\"\" self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0 =", "AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint: disable=no-self-use class", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) # Check if the calculated cost volume", "to create images_middlebury and samples_middlebury : # pylint: disable=pointless-string-statement \"\"\" # shape 1,", "cv_gt = np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0] =", "Test the computes_cost_volume_mc_cnn_fast function \"\"\" # create reference and secondary features ref_feature =", "applicable law or agreed to in writing, software # distributed under the License", "MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class", "cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] =", "patch_size : x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low & dataset_neg_high =", "\"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg)", "# disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.], [ 0.,", "test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function \"\"\" # create reference and secondary", "}, } training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) # Patch of shape 3, 11,", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "the ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 4),", "test_MiddleburyGenerator(self): \"\"\" test the function MiddleburyGenerator \"\"\" # Script use to create images_middlebury", "= MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) # Patch of shape 3, 11, 11 # With", "writing, software # distributed under the License is distributed on an \"AS IS\"", "with negative disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((1,", "np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint: disable=invalid-name # -> because changing the name here", "the computes_cost_volume_mc_cnn_accurate function \"\"\" return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :,", "- patch_size : x_ref_patch + patch_size + 1, ] # disp = -1,", "= cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1]", "= cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2]", "# # Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES). # # This", "images_middlebury and samples_middlebury : # shape 2, 13, 13 : 2 = left", "cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity 0", ":, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :,", "shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions cv", "(same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self, ref_features, sec_features):", "dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost) # Check if the calculated", "compliance with the License. # You may obtain a copy of the License", ":] - sec_features[0, :, :, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test the", "shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test", ":]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function \"\"\" # create", "\"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) # Patch", "= -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 7 gt_sec_pos_patch = self.sec_img_2[", "# pylint: disable=pointless-string-statement \"\"\" # shape 1, 2, 13, 13 : 1 exposures,", "and right images image_pairs_0 = np.zeros((1, 2, 13, 13)) # left image_pairs_0[0, 0,", ":, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[1, :, :] = np.tile(np.arange(13),", ": x_sec_neg_patch + patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)", "disable=pointless-string-statement \"\"\" # shape 1, 2, 13, 13 : 1 exposures, 2 =", "np.zeros((1, 2, 13, 13)) # left image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13,", "= cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1]", "4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3],", "*= -1 acc = AccMcCnnInfer() # Because input shape of nn.Conv2d is (Batch_size,", "# right image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 =", "sec_features[:, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity score to a", "sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2],", "left and right images, row, col image_pairs_0 = np.zeros((2, 13, 13)) # left", "pylint: disable=pointless-string-statement \"\"\" # shape 1, 2, 13, 13 : 1 exposures, 2", "\"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"vertical_disp\": 0, \"augmentation_param\": { \"scale\":", "we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost) # Check", "nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator", "dset = grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1] grp = img_file.create_group(str(1)) for light in", "img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch =", "return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self):", "= training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch =", "5 gt_ref_patch = self.ref_img_1[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1,", "(same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity patch =", "disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.] [ 0., 0.,", "self.ref_img_1[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch - patch_size", "(the \"License\"); # you may not use this file except in compliance with", "- patch_size : x_sec_neg_patch + patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch,", "patch = training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch", "= -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = -1 x_sec_pos_patch", "test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with positive disparities \"\"\" # create", "disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.], [ 0., 0.,", "firt dimension = left patch, right positive patch, right negative patch patch =", "disparity -1 cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() #", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "the firt dimension = left patch, right positive patch, right negative patch patch", "cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity 3", "# shape 1, 2, 13, 13 : 1 exposures, 2 = left and", ":2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # The", "Patch of shape 3, 11, 11 # With the firt dimension = left", "0 cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy()", ":, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy() # disparity", "2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy() # disparity 1", "False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"vertical_disp\": 0, \"augmentation_param\": { \"scale\": 0.8,", "# disparity 1 cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :,", "called to prepare the test fixture \"\"\" self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1))", "= np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test the", "add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost) # Check if", "cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() #", "file except in compliance with the License. # You may obtain a copy", "test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with negative disparities \"\"\" # create", "-4, -1, self.sad_cost) # Check if the calculated cost volume is equal to", "sign converts the similarity score to a matching cost cv_gt *= -1 cv", "-1.] [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive", "PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN # # Licensed under the Apache License, Version 2.0", ":2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # The minus sign", "1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.] [", "License. # \"\"\" This module contains functions to test the cost volume create", "ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 5), np.nan)", "+ patch_size + 1, ] # disp = -1, with middlebury image convention", "sec_features[:, :, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity score to", "1 img_file = h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0] grp = img_file.create_group(str(0)) # 1", "13, 13 : 2 = left and right images, row, col image_pairs_0 =", "row, col image_pairs_0 = np.zeros((2, 13, 13)) # left image_pairs_0[0, :, :] =", "shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity patch = training_loader.__getitem__(2)", "# This file is part of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN # # Licensed", "cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) # Check if the calculated cost volume", "gt_sec_neg_patch), axis=0) # Check if the calculated patch is equal to the ground", "1, ] # disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)", "equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with", "2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy() # disparity 4", "# disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.] [ 0.,", "0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:,", "\"\"\" test the function MiddleburyGenerator \"\"\" # Script use to create images_middlebury and", "rtol=1e-05) def sad_cost(self, ref_features, sec_features): \"\"\" Useful to test the computes_cost_volume_mc_cnn_accurate function \"\"\"", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "the similarity score to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature,", ":, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :,", "and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test the", "4), np.nan) # disparity -4 # all nan # disparity -3 cv_gt[:, 3:,", ":, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] =", "np.full((4, 4, 4), np.nan) # disparity -4 # all nan # disparity -3", "to test the cost volume create by mc_cnn \"\"\" def setUp(self): \"\"\" Method", ":1], sec_features[:, :, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan # The", ":1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy() # disparity 4 #", "disparity 1 cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() #", "\"\"\" import unittest import numpy as np import torch import torch.nn as nn", "images image_pairs_0 = np.zeros((1, 2, 13, 13)) # left image_pairs_0[0, 0, :, :]", "= np.zeros((2, 13, 13)) # left image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1))", "# # This file is part of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN # #", "np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast", "to the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)", "col image_pairs_0 = np.zeros((2, 13, 13)) # left image_pairs_0[0, :, :] = np.tile(np.arange(13),", "4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # The minus sign converts", "a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) #", "img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch", "cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] =", "ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos", "-2, 2, self.sad_cost) # Check if the calculated cost volume is equal to", "the similarity score to a matching cost cv_gt *= -1 acc = AccMcCnnInfer()", "disparity -4 # all nan # disparity -3 cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:,", "matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) # Check", "is part of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN # # Licensed under the Apache", "The minus sign converts the similarity score to a matching cost cv_gt *=", ":, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4]", ":2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:,", "dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create the ground truth", "the License. # \"\"\" This module contains functions to test the cost volume", "elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function", "gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch", "5., 6., 1.] [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1 =", "np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :,", "def setUp(self): \"\"\" Method called to prepare the test fixture \"\"\" self.ref_img_0 =", "np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0] grp", "= h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0] grp = img_file.create_group(str(0)) # 1 illumination for", ":, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1],", "to the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) #", "# disparity -1 cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :,", "1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy() # disparity -2", "\"hshear\": 0.1, \"trans\": 0, \"rotate\": 28, \"brightness\": 1.3, \"contrast\": 1.1, \"d_hscale\": 0.9, \"d_hshear\":", "AccMcCnnInfer() # Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we", "in range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1] grp = img_file.create_group(str(1)) for", "checked function def test_DataFusionContestGenerator(self): \"\"\" test the function DataFusionContestGenerator \"\"\" # pylint: disable=pointless-string-statement", "is equal to the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv,", "cv_gt = np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0] =", "np.zeros((2, 13, 13)) # left image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1)) #", "cv_gt, rtol=1e-05) # pylint: disable=invalid-name # -> because changing the name here loses", "x1 = np.array([[ 1., 7., 5., -1.] [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0),", "\"vertical_disp\": 0, \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28, \"brightness\":", "nan # disparity -3 cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :,", "sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :,", "# 1 illumination for light in range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light]) img_1 =", "= 5 gt_ref_patch = self.ref_img_1[ y_ref_patch - patch_size : y_ref_patch + patch_size +", "patch_size + 1, ] # disp = -1, with middlebury image convention img_ref(x,y)", "cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() #", "computes_cost_volume_mc_cnn_accurate function \"\"\" # create reference and secondary features ref_feature = torch.randn((1, 112,", "class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class allows to test the cost volume create by", "the name here loses the reference to the actual name of the checked", "sign converts the similarity score to a matching cost cv_gt *= -1 acc", "self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) -", "and all elements equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity patch = training_loader.__getitem__(2) x_ref_patch", "= cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2]", "\"d_hscale\": 0.9, \"d_hshear\": 0.3, \"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, },", "https://github.com/CNES/Pandora_MCCNN # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low & dataset_neg_high = 1, with", "6 y_ref_patch = 5 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size", "2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.], [0., 7., 7., 1.]])", "1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \"", "x_ref_patch - patch_size : x_ref_patch + patch_size + 1, ] # disp =", "\" Test the computes_cost_volume_mc_cnn_fast function with positive disparities \"\"\" # create reference and", "cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost) # Check if the calculated cost", "disp + dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch - patch_size :", "image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[1, :, :] =", "112, 4, 4), dtype=torch.float64) # Create the ground truth cost volume (row, col,", "sec_features, -4, -1) # Check if the calculated cost volume is equal to", "= self.sec_img_2[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch -", "contains functions to test the cost volume create by mc_cnn \"\"\" import unittest", "of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.] [ 0., 0., 0.,", "-4 # all nan # disparity -3 cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :,", "self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch - patch_size", "np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :,", "all elements equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity patch = training_loader.__getitem__(2) x_ref_patch =", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "patch = training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch = 5 patch_size = 5 gt_ref_patch", "disparity 1 cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :,", "fixture \"\"\" self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13,", "shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint: disable=invalid-name # ->", "h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0] grp = img_file.create_group(str(0)) # 1 illumination for light", "0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive disparity cfg = { \"data_augmentation\":", "mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class allows to", "as np import torch import torch.nn as nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast from", "import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN", "truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\"", "1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:,", "# Positive disparity cfg = { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\":", "# disparity 1 cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy()", "disparity -3 cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy() #", "# # https://github.com/CNES/Pandora_MCCNN # # Licensed under the Apache License, Version 2.0 (the", ":, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2]", "y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size :", "= 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch", "dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost) # Check if the calculated", "= 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch", "the computes_cost_volume_mc_cnn_fast function with positive disparities \"\"\" # create reference and secondary features", "# disparity 2 cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy()", "training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_1[", "1, ] # disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)", "sec_features[:, :, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :,", "of the checked function def test_MiddleburyGenerator(self): \"\"\" test the function MiddleburyGenerator \"\"\" #", "the cost volume create by mc_cnn \"\"\" def setUp(self): \"\"\" Method called to", "1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"augmentation_param\": { \"vertical_disp\": 0, \"scale\": 0.8, \"hscale\": 0.8,", "computes_cost_volume_mc_cnn_fast function \"\"\" # create reference and secondary features ref_feature = torch.randn((64, 4,", "middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch -", "the test fixture \"\"\" self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0 = np.tile(np.arange(13,", "- patch_size : x_ref_patch + patch_size + 1, ] # disp = 1,", "the License for the specific language governing permissions and # limitations under the", "converts the similarity score to a matching cost cv_gt *= -1 acc =", "disparity -1 cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :,", "sec_features, -4, -1, self.sad_cost) # Check if the calculated cost volume is equal", "y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size +", ": x_ref_patch + patch_size + 1, ] # disp = 1, with middlebury", "dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1", "ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self):", "cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) # Check if", ":, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity score", ":, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :],", "# disparity 1 cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :,", "= h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w') # disparity", "rtol=1e-05) # pylint: disable=invalid-name # -> because changing the name here loses the", "4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create the ground", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "# disparity 4 # all nan # The minus sign converts the similarity", "- 1 img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5',", "disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.] [0., 7., 7., 1.]])", "and samples_middlebury : # shape 2, 13, 13 : 2 = left and", "= acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost) # Check if the calculated cost volume", "\"d_contrast\": 1.1, }, } training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) # Patch of shape", "4, 4), dtype=torch.float64) # Create the ground truth cost volume (row, col, disp)", "4), np.nan) # disparity 1 cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3],", "all nan # disparity -3 cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:],", "& dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg =", "= self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy() # disparity 4 #", "grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1] grp = img_file.create_group(str(1)) for light in range(len(img_1)): dset", "disparity -2 cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() #", "dtype=np.float32), (13, 1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self):", "0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :,", "\"\"\" # shape 1, 2, 13, 13 : 1 exposures, 2 = left", "0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1", "0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28, \"brightness\": 1.3, \"contrast\": 1.1,", "all nan # disparity -3 cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:,", "y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size", "import numpy as np import torch import torch.nn as nn from mc_cnn.run import", "1 img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w')", "np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2", "(same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name # ->", "28, \"brightness\": 1.3, \"contrast\": 1.1, \"d_hscale\": 0.9, \"d_hshear\": 0.3, \"d_vtrans\": 1, \"d_rotate\": 3,", "patch_size : y_ref_patch + patch_size + 1, x_ref_patch - patch_size : x_ref_patch +", "\" Test the computes_cost_volume_mc_cnn_fast function with negative disparities \"\"\" # create reference and", "truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self, ref_features,", "Version 2.0 (the \"License\"); # you may not use this file except in", "H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost)", "= self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # The minus sign", "{ \"vertical_disp\": 0, \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28,", "= left and right images image_pairs_0 = np.zeros((1, 2, 13, 13)) # left", "np.tile(np.arange(13), (13, 1)) # right image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1))", "1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = self.sad_cost(ref_feature[:,", "# https://github.com/CNES/Pandora_MCCNN # # Licensed under the Apache License, Version 2.0 (the \"License\");", "left image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[1, :, :]", "dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch +", "python # coding: utf8 # # Copyright (c) 2021 Centre National d'Etudes Spatiales", "sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1],", "# disparity -2 cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :,", "Test the computes_cost_volume_mc_cnn_fast function with positive disparities \"\"\" # create reference and secondary", "gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch", "0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive disparity cfg = {", "img_sec(x-d,y) disp = 1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch", "sec_features, -2, 2) # Check if the calculated cost volume is equal to", "disparity 2 cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :,", "test the function DataFusionContestGenerator \"\"\" # pylint: disable=pointless-string-statement \"\"\" # Script use to", "to test the cost volume create by mc_cnn \"\"\" import unittest import numpy", ":, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # The minus sign converts the", "cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost) # Check if the calculated cost", "the ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 5),", "images, row, col image_pairs_0 = np.zeros((2, 13, 13)) # left image_pairs_0[0, :, :]", "= self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch -", "gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch", ": # pylint: disable=pointless-string-statement \"\"\" # shape 1, 2, 13, 13 : 1", "to the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) if", "equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with", ":, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :,", "- patch_size : x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low & dataset_neg_high", "img_file = h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0] grp = img_file.create_group(str(0)) # 1 illumination", "x_ref_patch + patch_size + 1, ] # disp = 1, with middlebury image", "ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint:", "1)) # right image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1", "= img_file.create_group(str(0)) # 1 illumination for light in range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light])", "pylint: disable=invalid-name # -> because changing the name here loses the reference to", "Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1,", "image_pairs_0 x0 = np.array([[0., 5., 6., 1.], [0., 7., 7., 1.]]) # disparity", "and samples_middlebury : # pylint: disable=pointless-string-statement \"\"\" # shape 1, 2, 13, 13", "nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature,", "DataFusionContestGenerator \"\"\" # pylint: disable=pointless-string-statement \"\"\" # Script use to create images_middlebury and", "patch patch = training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch = 5 patch_size = 5", "# -> because changing the name here loses the reference to the actual", "- disp + dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch - patch_size", "def sad_cost(self, ref_features, sec_features): \"\"\" Useful to test the computes_cost_volume_mc_cnn_accurate function \"\"\" return", "Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1", "torch import torch.nn as nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer", "\"dataset_neg_high\": 1, \"dataset_pos\": 0, \"augmentation_param\": { \"vertical_disp\": 0, \"scale\": 0.8, \"hscale\": 0.8, \"hshear\":", "\"\"\" self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1))", "(13, 1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\"", "\"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with positive disparities \"\"\" # create reference", "truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) #", "image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.] [ 0., 0., 0., 0.]])", "= 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = 1 x_sec_pos_patch", "= 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size +", ":, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:,", ":, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_middlebury.hdf5', 'w') img_0", "x1 = np.array([[ 1., 7., 5., -1.], [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0),", "1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch =", "# disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp =", "acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost) # Check if the calculated cost volume is", "1, \"dataset_pos\": 0, \"vertical_disp\": 0, \"augmentation_param\": { \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1,", "ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self):", "13)) image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :, :] = np.tile(np.arange(13),", "3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) #", "sec_features[:, :, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity score to", "score to a matching cost cv_gt *= -1 acc = AccMcCnnInfer() # Because", "Check if the calculated patch is equal to the ground truth (same shape", "disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class allows to test the cost volume create", "sec_features[:, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity score to a", "1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32),", "image_pairs_0 = np.zeros((1, 2, 13, 13)) # left image_pairs_0[0, 0, :, :] =", "= 1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[", "truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\"", ":3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:,", "similarity score to a matching cost cv_gt *= -1 acc = AccMcCnnInfer() #", "function DataFusionContestGenerator \"\"\" # pylint: disable=pointless-string-statement \"\"\" # Script use to create images_middlebury", "2 cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy()", "matching cost cv_gt *= -1 acc = AccMcCnnInfer() # Because input shape of", "= self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:,", "and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test the", "5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1,", "truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\"", "[image_pairs_0] grp = img_file.create_group(str(0)) # 1 illumination for light in range(len(img_0)): dset =", "\"d_contrast\": 1.1, }, } training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) # Patch of shape", "OF ANY KIND, either express or implied. # See the License for the", "image convention img_ref(x,y) = img_sec(x-d,y) disp = 1 x_sec_pos_patch = x_ref_patch - disp", "np.nan) # disparity 1 cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :,", "name of the checked function def test_DataFusionContestGenerator(self): \"\"\" test the function DataFusionContestGenerator \"\"\"", "= img_file.create_group(str(1)) for light in range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5',", ":, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2]", "dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost) # Check if the calculated", "[image_pairs_1] grp = img_file.create_group(str(1)) for light in range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file", "utf8 # # Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES). # #", "np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self, ref_features, sec_features): \"\"\" Useful to test the computes_cost_volume_mc_cnn_accurate", "1.] [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1.,", "5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,", "volume (row, col, disp) cv_gt = np.full((4, 4, 5), np.nan) # disparity -2", "cost volume create by mc_cnn \"\"\" def setUp(self): \"\"\" Method called to prepare", "illumination for light in range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1] grp", "= grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w') # disparity of image_pairs_0 x0 =", "all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate", "to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2)", "# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add", "0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:,", "of image_pairs_0 x0 = np.array([[0., 5., 6., 1.] [0., 7., 7., 1.]]) #", "Positive disparity cfg = { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0,", ":3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:,", "Spatiales (CNES). # # This file is part of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN", "(row, col, disp) cv_gt = np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:,", "positive disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((1, 112,", "W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost) #", "\"\"\" def setUp(self): \"\"\" Method called to prepare the test fixture \"\"\" self.ref_img_0", "actual name of the checked function def test_DataFusionContestGenerator(self): \"\"\" test the function DataFusionContestGenerator", "img_ref(x,y) = img_sec(x-d,y) disp = 1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch =", "+ 1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1, ] #", "= np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:, :3, 0] = cos(ref_feature[:,", "0.7, \"d_contrast\": 1.1, }, } training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) # Patch of", "= np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1", "import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import", "x_sec_neg_patch + patch_size + 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) #", ":, :, :] - sec_features[0, :, :, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \"", "\"\"\" # create reference and secondary features ref_feature = torch.randn((1, 112, 4, 4),", "\"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"vertical_disp\": 0, \"augmentation_param\": { \"scale\": 0.8, \"hscale\":", "the actual name of the checked function def test_DataFusionContestGenerator(self): \"\"\" test the function", "all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast", "= 6 y_ref_patch = 5 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch -", "(Batch_size, Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2,", "13 : 2 = left and right images, row, col image_pairs_0 = np.zeros((2,", "\"\"\" return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :, :]), dim=0) def", "ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self):", "or agreed to in writing, software # distributed under the License is distributed", "loses the reference to the actual name of the checked function def test_DataFusionContestGenerator(self):", ": # shape 2, 13, 13 : 2 = left and right images,", "the reference to the actual name of the checked function def test_MiddleburyGenerator(self): \"\"\"", "shape 2, 13, 13 : 2 = left and right images, row, col", "np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1 def", "+ patch_size + 1, ] # dataset_neg_low & dataset_neg_high = 1, with middlebury", "matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) # Check", "self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # The minus sign converts", "H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost)", "ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4, 4), np.nan)", "sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2],", "# disparity 3 cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy()", "truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity patch", "+ 1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the", "input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions", "# disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp =", ":, :, :], sec_features[:, :, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3]", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "img_file.create_group(str(1)) for light in range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w')", "# all nan # disparity -3 cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :,", "0:3]).cpu().detach().numpy() # The minus sign converts the similarity score to a matching cost", ":] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0)", "test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with negative disparities \"\"\" # create", "cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] =", "License. # You may obtain a copy of the License at # #", "self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test", "cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) # Check if", "= acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost) # Check if the calculated cost volume", "sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive disparity cfg = { \"data_augmentation\": False, \"dataset_neg_low\": 1,", "np.array([[0., 5., 6., 1.], [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1", "cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) # Check if the", "patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch +", "rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with positive disparities \"\"\"", "add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost) # Check if", "x_ref_patch + patch_size + 1, ] # disp = -1, with middlebury image", "3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # The minus", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "x0 = np.array([[0., 5., 6., 1.], [0., 7., 7., 1.]]) # disparity of", "self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # The minus sign converts", "- sec_features[0, :, :, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate", "cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with negative disparities", "2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:,", "function \"\"\" # create reference and secondary features ref_feature = torch.randn((1, 112, 4,", "self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:,", "With the firt dimension = left patch, right positive patch, right negative patch", "disparity -2 cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :,", "DataFusionContestGenerator # pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class allows to test the", "volume create by mc_cnn \"\"\" def setUp(self): \"\"\" Method called to prepare the", "1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # disparity 3", "License, Version 2.0 (the \"License\"); # you may not use this file except", "cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] =", "0, \"rotate\": 28, \"brightness\": 1.3, \"contrast\": 1.1, \"d_hscale\": 0.9, \"d_hshear\": 0.3, \"d_vtrans\": 1,", "7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,", "= cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2]", "the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # pylint:", "image_pairs_0 = np.zeros((2, 13, 13)) # left image_pairs_0[0, :, :] = np.tile(np.arange(13), (13,", ":] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((2, 13, 13)) image_pairs_1[0,", "-1 acc = AccMcCnnInfer() # Because input shape of nn.Conv2d is (Batch_size, Channel,", "disparity 4 # all nan # The minus sign converts the similarity score", "= np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((2, 13, 13)) image_pairs_1[0, :,", "use to create images_middlebury and samples_middlebury : # shape 2, 13, 13 :", "# disparity 1 cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy()", "volume (row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity -4", "if the calculated patch is equal to the ground truth (same shape and", "1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :,", ":2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:,", "13)) # left image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[1,", "\"\"\" # Script use to create images_middlebury and samples_middlebury : # shape 2,", "self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2,", ":, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = cos(ref_feature[:,", "the specific language governing permissions and # limitations under the License. # \"\"\"", "sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the", "cv_gt = np.full((4, 4, 4), np.nan) # disparity -4 # all nan #", "13, 13 : 1 exposures, 2 = left and right images image_pairs_0 =", "} training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) # Patch of shape 3, 11, 11", "cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) # Check if the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) #", "-1, self.sad_cost) # Check if the calculated cost volume is equal to the", "and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self, ref_features, sec_features): \"\"\" Useful", ": 2 = left and right images, row, col image_pairs_0 = np.zeros((2, 13,", "patch, right negative patch patch = training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch = 5", "2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:,", "patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1, ]", "= 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch + patch_size +", "= h5py.File('sample_dfc.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.],", ":, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1]", "and secondary features ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1,", "rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with negative disparities \"\"\"", "np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((1, 2, 13, 13)) image_pairs_1[0, 0,", ": 1 exposures, 2 = left and right images image_pairs_0 = np.zeros((1, 2,", "13, 13)) image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1, :,", "under the License. # \"\"\" This module contains functions to test the cost", "sec_features, -2, 2, self.sad_cost) # Check if the calculated cost volume is equal", "np.tile(np.arange(13), (13, 1)) # right image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) +", "sec_features[:, :, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan # The minus", "img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg", "patch_size + 1, x_ref_patch - patch_size : x_ref_patch + patch_size + 1, ]", "2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = cos(ref_feature[:, :,", "disparity 3 cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :,", "-4, -1) # Check if the calculated cost volume is equal to the", "disable=invalid-name # -> because changing the name here loses the reference to the", ":, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((1, 2, 13,", "and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test the", "or implied. # See the License for the specific language governing permissions and", "= cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan", "np import torch import torch.nn as nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate", "h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w') # disparity of", "y_ref_patch + patch_size + 1, x_ref_patch - patch_size : x_ref_patch + patch_size +", "sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create the ground truth cost", "(13, 1)) + 1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2 = np.tile(np.arange(13,", "2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:,", "# disparity 2 cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :,", "# left image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[0,", "= self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:,", "1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch", "3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2,", "eps=1e-6) # Create the ground truth cost volume (row, col, disp) cv_gt =", "1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:,", "np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with negative", "grp = img_file.create_group(str(1)) for light in range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file =", "3 cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy() # disparity", "images_middlebury and samples_middlebury : # pylint: disable=pointless-string-statement \"\"\" # shape 1, 2, 13,", "\"augmentation_param\": { \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28, \"brightness\":", "convention img_ref(x,y) = img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch", "cfg) # Patch of shape 3, 11, 11 # With the firt dimension", "7., 5., -1.] [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\"", "# disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.] [0., 7., 7.,", "disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = -1", "test the cost volume create by mc_cnn \"\"\" def setUp(self): \"\"\" Method called", "and secondary features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4,", "(same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \"", "Test the computes_cost_volume_mc_cnn_fast function with negative disparities \"\"\" # create reference and secondary", "part of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN # # Licensed under the Apache License,", "equal to the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path)", "4, self.sad_cost) # Check if the calculated cost volume is equal to the", "\"dataset_pos\": 0, \"augmentation_param\": { \"vertical_disp\": 0, \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\":", "the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # negative", "matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) # Check", "disp y_sec_pos_patch = 7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch - patch_size : y_sec_pos_patch +", "] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated patch", "H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost)", "elements equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity patch = training_loader.__getitem__(2) x_ref_patch = 5", "use this file except in compliance with the License. # You may obtain", "cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1", "data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5.,", "= self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:,", "1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((1, 2,", "(same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) if __name__ == \"__main__\": unittest.main()", "2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy() # disparity 4 # all", "the checked function def test_MiddleburyGenerator(self): \"\"\" test the function MiddleburyGenerator \"\"\" # Script", "reference to the actual name of the checked function def test_DataFusionContestGenerator(self): \"\"\" test", "reference and secondary features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64,", "= np.full((4, 4, 4), np.nan) # disparity -4 # all nan # disparity", "5 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch +", ":, 2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:,", "= np.array([[0., 5., 6., 1.] [0., 7., 7., 1.]]) # disparity of image_pairs_1", ":] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[1, :, :] = np.tile(np.arange(13), (13,", "col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity 1 cv_gt[:, :3,", "with positive disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((1,", "training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_0[", "}, } training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) # Patch of shape 3, 11,", "\" Test the computes_cost_volume_mc_cnn_accurate function with positive disparities \"\"\" # create reference and", "from mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from", "np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file", "# disparity -2 cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :,", "disparity 2 cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() #", "0, \"augmentation_param\": { \"vertical_disp\": 0, \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0,", "light in range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1] grp = img_file.create_group(str(1))", "for the specific language governing permissions and # limitations under the License. #", "disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((64, 4, 4),", "self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) +", "the computes_cost_volume_mc_cnn_accurate function \"\"\" # create reference and secondary features ref_feature = torch.randn((1,", "np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name # -> because changing the name here loses", "] # disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp", "= np.array([[0., 5., 6., 1.], [0., 7., 7., 1.]]) # disparity of image_pairs_1", "test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function \"\"\" # create reference and secondary", "acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost) # Check if the calculated cost volume is", "calculated cost volume is equal to the ground truth (same shape and all", "image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.], [ 0., 0., 0., 0.]])", "a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1) #", "all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self, ref_features, sec_features): \"\"\" Useful to", "dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 5", "MiddleburyGenerator \"\"\" # Script use to create images_middlebury and samples_middlebury : # pylint:", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "cv_gt *= -1 acc = AccMcCnnInfer() # Because input shape of nn.Conv2d is", "sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :,", "MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) # Patch of shape 3, 11, 11 # With the", "4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create the", "axis=0) # Check if the calculated patch is equal to the ground truth", "import DataFusionContestGenerator # pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class allows to test", "= cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4]", "4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth cost volume", "cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy() #", "disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size : y_sec_pos_patch +", "x0 = np.array([[0., 5., 6., 1.] [0., 7., 7., 1.]]) # disparity of", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) #", "(13, 1)) self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1 self.ref_img_1 = np.tile(np.arange(13,", "img_ref(x,y) = img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch =", "(CNES). # # This file is part of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN #", "sad_cost(self, ref_features, sec_features): \"\"\" Useful to test the computes_cost_volume_mc_cnn_accurate function \"\"\" return torch.sum(abs(ref_features[0,", "x_ref_patch - disp y_sec_pos_patch = 7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch - patch_size :", "y_sec_pos_patch = 7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size", ":, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity", "- disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size", "# disparity -2 cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy()", "\"vertical_disp\": 0, \"augmentation_param\": { \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\":", "dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch - patch_size : y_sec_neg_patch +", "all elements equals) np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name # -> because changing the", "7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,", "Script use to create images_middlebury and samples_middlebury : # pylint: disable=pointless-string-statement \"\"\" #", "calculated patch is equal to the ground truth (same shape and all elements", "5., -1.], [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" #", "the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) #", "with negative disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((64,", "disp) cv_gt = np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0]", "cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1", "with the License. # You may obtain a copy of the License at", "# shape 2, 13, 13 : 2 = left and right images, row,", "2 cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # The", "\"tests/images_middlebury.hdf5\", cfg) # Patch of shape 3, 11, 11 # With the firt", "# disparity -1 cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy()", ":, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:],", "0, \"augmentation_param\": { \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28,", "disparity 3 cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy() #", "# negative disparity patch = training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7 patch_size", ":]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:,", "\"\"\" test the function DataFusionContestGenerator \"\"\" # pylint: disable=pointless-string-statement \"\"\" # Script use", "law or agreed to in writing, software # distributed under the License is", ":, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:,", "features ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4,", "Test the computes_cost_volume_mc_cnn_accurate function \"\"\" # create reference and secondary features ref_feature =", "= torch.randn((64, 4, 4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos =", "This module contains functions to test the cost volume create by mc_cnn \"\"\"", ":] = np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1))", "and # limitations under the License. # \"\"\" This module contains functions to", "= grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1] grp = img_file.create_group(str(1)) for light in range(len(img_1)):", "= [image_pairs_1] grp = img_file.create_group(str(1)) for light in range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light])", "image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_middlebury.hdf5',", ":, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity score to a matching", ":, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] =", ":, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13,", "1)) + 1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32),", "self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :,", "x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1, ] gt_path = np.stack((gt_ref_patch,", "= np.zeros((1, 2, 13, 13)) # left image_pairs_0[0, 0, :, :] = np.tile(np.arange(13),", "= np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1),", "disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size :", "# pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class allows to test the cost", "right image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((2,", "0.1, \"trans\": 0, \"rotate\": 28, \"brightness\": 1.3, \"contrast\": 1.1, \"d_hscale\": 0.9, \"d_hshear\": 0.3,", "np.nan) # disparity -2 cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :,", "\"rotate\": 28, \"brightness\": 1.3, \"contrast\": 1.1, \"d_hscale\": 0.9, \"d_hshear\": 0.3, \"d_vtrans\": 1, \"d_rotate\":", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "dtype=np.float32), (13, 1)) - 1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function", "disparity cfg = { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"vertical_disp\":", ":3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = cos(ref_feature[:, :,", "0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28, \"brightness\": 1.3, \"contrast\": 1.1, \"d_hscale\": 0.9,", "= DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) # Patch of shape 3, 11, 11 # With", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "create reference and secondary features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with negative disparities \"\"\"", "for light in range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w') #", "x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch = self.sec_img_2[", "1 self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1))", "Create the ground truth cost volume (row, col, disp) cv_gt = np.full((4, 4,", "1 image_pairs_1 = np.zeros((2, 13, 13)) image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1))", "cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy() # disparity 1", "x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[", "patch_size + 1, ] # dataset_neg_low & dataset_neg_high = 1, with middlebury image", "of image_pairs_0 x0 = np.array([[0., 5., 6., 1.], [0., 7., 7., 1.]]) #", "# disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.], [0., 7., 7.,", "computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) # Check if the calculated cost volume is equal", "all nan # The minus sign converts the similarity score to a matching", "+ 1, ] # disp = 1, with middlebury image convention img_ref(x,y) =", "1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity score to", "data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w') # disparity of image_pairs_0 x0 =", "= 5 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch", "# Script use to create images_middlebury and samples_middlebury : # pylint: disable=pointless-string-statement \"\"\"", "\"\"\" # pylint: disable=pointless-string-statement \"\"\" # Script use to create images_middlebury and samples_middlebury", "cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with positive disparities", ": y_ref_patch + patch_size + 1, x_ref_patch - patch_size : x_ref_patch + patch_size", "1)) - 1 img_file = h5py.File('images_middlebury.hdf5', 'w') img_0 = [image_pairs_0] grp = img_file.create_group(str(0))", "image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_dfc.hdf5', 'w')", "and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test the", "1, 4, self.sad_cost) # Check if the calculated cost volume is equal to", "TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class allows to test the cost volume create by mc_cnn", "1)) image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_dfc.hdf5',", "in range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w') # disparity of", "from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint: disable=no-self-use class TestMCCNN(unittest.TestCase):", "import torch.nn as nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from", "shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test", "score to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1,", "= img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 7", "dtype=torch.float64) # Create the ground truth cost volume (row, col, disp) cv_gt =", "2, 13, 13 : 2 = left and right images, row, col image_pairs_0", ":, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:,", "data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive disparity cfg = { \"data_augmentation\": False, \"dataset_neg_low\":", "disparity -2 cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :,", "1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:,", "\"\"\" # Positive disparity cfg = { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1,", "cfg = { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"vertical_disp\": 0,", "image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((1,", "{ \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"vertical_disp\": 0, \"augmentation_param\": {", ":, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan #", "4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # The minus", "image convention img_ref(x,y) = img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp", ":, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:,", "to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4)", "= np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) -", "the computes_cost_volume_mc_cnn_accurate function with negative disparities \"\"\" # create reference and secondary features", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "right images, row, col image_pairs_0 = np.zeros((2, 13, 13)) # left image_pairs_0[0, :,", "= np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) - 1", "-2 cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy()", "= 7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size +", "3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy() # disparity", "add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost) # Check if", "-> because changing the name here loses the reference to the actual name", "\"\"\" This module contains functions to test the cost volume create by mc_cnn", "0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:,", "= x_ref_patch - disp + dataset_neg y_sec_neg_patch = 7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch", "2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = cos(ref_feature[:, :,", "similarity score to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features,", "# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)", ":, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0),", "for light in range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1] grp =", "1)) # right image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) + 1", "1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost) # Check if the", "\"dataset_pos\": 0, \"vertical_disp\": 0, \"augmentation_param\": { \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\":", "1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1, ] gt_path =", "this file except in compliance with the License. # You may obtain a", "0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive disparity cfg =", ":, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1]", "\"brightness\": 1.3, \"contrast\": 1.1, \"d_hscale\": 0.9, \"d_hshear\": 0.3, \"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\":", "and right images, row, col image_pairs_0 = np.zeros((2, 13, 13)) # left image_pairs_0[0,", "nan # The minus sign converts the similarity score to a matching cost", ":3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity", "3 cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy()", ":, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # The minus sign converts the", "image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((2, 13,", "13, 13)) image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :, :] =", "1 exposures, 2 = left and right images image_pairs_0 = np.zeros((1, 2, 13,", "\"\"\" # Script use to create images_middlebury and samples_middlebury : # pylint: disable=pointless-string-statement", ":1], sec_features[:, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan # The minus", "cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan #", "(13, 1)) + 1 image_pairs_1 = np.zeros((1, 2, 13, 13)) image_pairs_1[0, 0, :,", "0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\" # Positive disparity cfg", "\"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with negative disparities \"\"\" # create reference", "TestMCCNN class allows to test the cost volume create by mc_cnn \"\"\" def", "Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4,", "the function DataFusionContestGenerator \"\"\" # pylint: disable=pointless-string-statement \"\"\" # Script use to create", "mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\"", "5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size", "1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low", "patch_size + 1, ] # disp = 1, with middlebury image convention img_ref(x,y)", "7., 5., -1.], [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1) \"\"\"", "def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with positive disparities \"\"\" #", "2 = left and right images image_pairs_0 = np.zeros((1, 2, 13, 13)) #", "= 7 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch", "image convention img_ref(x,y) = img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch - disp", "gt_ref_patch = self.ref_img_1[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch", "# \"\"\" This module contains functions to test the cost volume create by", "] # disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp", "= 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_1[ y_ref_patch -", "y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_1[ y_ref_patch - patch_size :", "1, 4) # Check if the calculated cost volume is equal to the", "torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64) #", "= np.tile(np.arange(13), (13, 1)) # right image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13,", "cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity", "\"\"\" \" Test the computes_cost_volume_mc_cnn_fast function \"\"\" # create reference and secondary features", "torch.nn as nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator", "create images_middlebury and samples_middlebury : # pylint: disable=pointless-string-statement \"\"\" # shape 1, 2,", "x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch -", "= [image_pairs_0] grp = img_file.create_group(str(0)) # 1 illumination for light in range(len(img_0)): dset", "4 # all nan # The minus sign converts the similarity score to", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "= x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch = self.sec_img_0[ y_sec_pos_patch - patch_size", ": y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size", "exposures, 2 = left and right images image_pairs_0 = np.zeros((1, 2, 13, 13))", "'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w') # disparity of image_pairs_0", "truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint: disable=invalid-name", "function with negative disparities \"\"\" # create reference and secondary features ref_feature =", "1., 7., 5., -1.], [ 0., 0., 0., 0.]]) sampl_file.create_dataset(str(0), data=x0) sampl_file.create_dataset(str(1), data=x1)", "dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create", "4, 4), np.nan) # disparity -4 # all nan # disparity -3 cv_gt[:,", "sec_features[0, :, :, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function", ":, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan", "0.3, \"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader =", "1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\",", "sec_features[:, :, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :,", "(Batch_size, Channel, H, W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1,", "1 cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity", "= self.sec_img_2[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch -", "sec_features, 1, 4) # Check if the calculated cost volume is equal to", "1, x_ref_patch - patch_size : x_ref_patch + patch_size + 1, ] # disp", "required by applicable law or agreed to in writing, software # distributed under", "0.7, \"d_contrast\": 1.1, }, } training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) # Patch of", "-3 cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity", "2021 Centre National d'Etudes Spatiales (CNES). # # This file is part of", "training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) # Patch of shape 3, 11, 11 #", "cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] =", "the ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def", "cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] =", "\"\"\" Useful to test the computes_cost_volume_mc_cnn_accurate function \"\"\" return torch.sum(abs(ref_features[0, :, :, :]", "-3 cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy()", "np.zeros((2, 13, 13)) image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :, :]", "cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity -2", ":2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # disparity", "cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost) # Check if the calculated cost", "+ dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch = self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch", "Test the computes_cost_volume_mc_cnn_accurate function with positive disparities \"\"\" # create reference and secondary", "of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.], [ 0., 0., 0.,", "ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # negative disparity", "shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name # -> because", "\"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function \"\"\" # create reference and secondary features", "0, \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28, \"brightness\": 1.3,", "with middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch", ":] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[0, 1, :, :] = np.tile(np.arange(13),", "torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create the ground truth cost volume (row,", "0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1, :, :] = np.tile(np.arange(13),", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "= self.sec_img_0[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch -", "positive patch, right negative patch patch = training_loader.__getitem__(0) x_ref_patch = 6 y_ref_patch =", "sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :,", "function with positive disparities \"\"\" # create reference and secondary features ref_feature =", "img_0 = [image_pairs_0] grp = img_file.create_group(str(0)) # 1 illumination for light in range(len(img_0)):", "-1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) # Check if the calculated cost", "allows to test the cost volume create by mc_cnn \"\"\" def setUp(self): \"\"\"", "4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :,", "sec_features[:, :, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :,", "sec_features[:, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:],", "image_pairs_1 = np.zeros((1, 2, 13, 13)) image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13,", "truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\"", "cost cv_gt *= -1 acc = AccMcCnnInfer() # Because input shape of nn.Conv2d", "cv_gt, rtol=1e-05) def sad_cost(self, ref_features, sec_features): \"\"\" Useful to test the computes_cost_volume_mc_cnn_accurate function", "1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:,", ":, :], sec_features[:, :, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] =", "4), dtype=torch.float64) sec_features = torch.randn((64, 4, 4), dtype=torch.float64) cos = nn.CosineSimilarity(dim=0, eps=1e-6) #", "(13, 1)) # right image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) +", "image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :, :] = np.tile(np.arange(13), (13,", "samples_middlebury : # shape 2, 13, 13 : 2 = left and right", "because changing the name here loses the reference to the actual name of", "img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w') #", "create by mc_cnn \"\"\" import unittest import numpy as np import torch import", "the calculated cost volume is equal to the ground truth (same shape and", "7 patch_size = 5 gt_ref_patch = self.ref_img_1[ y_ref_patch - patch_size : y_ref_patch +", "disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 5 gt_sec_pos_patch =", "= self.ref_img_0[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch -", "ref_features, sec_features): \"\"\" Useful to test the computes_cost_volume_mc_cnn_accurate function \"\"\" return torch.sum(abs(ref_features[0, :,", "= img_sec(x-d,y) dataset_neg = 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch", "disparity 0 cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :,", "module contains functions to test the cost volume create by mc_cnn \"\"\" import", "4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:],", "-2 cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity", "Check if the calculated cost volume is equal to the ground truth (same", "1, ] gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated", "dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w') # disparity of image_pairs_0 x0", ":, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :,", "# you may not use this file except in compliance with the License.", "1 image_pairs_1 = np.zeros((1, 2, 13, 13)) image_pairs_1[0, 0, :, :] = np.tile(np.arange(13),", "1 def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function \"\"\" # create reference", "sec_features[:, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3],", "cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() #", "cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] =", "y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch - patch_size :", "# disparity 0 cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy()", "dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function \"\"\" # create reference", "middlebury image convention img_ref(x,y) = img_sec(x-d,y) disp = 1 x_sec_pos_patch = x_ref_patch -", "= { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"augmentation_param\": { \"vertical_disp\":", ":3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity", "2, 13, 13)) # left image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1))", "mc_cnn \"\"\" import unittest import numpy as np import torch import torch.nn as", ":] = np.tile(np.arange(13), (13, 1)) - 1 img_file = h5py.File('images_middlebury.hdf5', 'w') img_0 =", "computes_cost_volume_mc_cnn_accurate function with positive disparities \"\"\" # create reference and secondary features ref_feature", ":] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((1, 2, 13, 13))", "= self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:,", "W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost) #", "'w') # disparity of image_pairs_0 x0 = np.array([[0., 5., 6., 1.] [0., 7.,", "\" Test the computes_cost_volume_mc_cnn_accurate function with negative disparities \"\"\" # create reference and", "= self.ref_img_1[ y_ref_patch - patch_size : y_ref_patch + patch_size + 1, x_ref_patch -", "np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with positive", "} training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) # Patch of shape 3, 11, 11", "1 cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy()", "range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w') # disparity of image_pairs_0", "by mc_cnn \"\"\" def setUp(self): \"\"\" Method called to prepare the test fixture", "\"tests/images_dfc.hdf5\", cfg) # Patch of shape 3, 11, 11 # With the firt", "patch_size : x_ref_patch + patch_size + 1, ] # disp = 1, with", "3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:,", "Test the computes_cost_volume_mc_cnn_accurate function with negative disparities \"\"\" # create reference and secondary", "= left patch, right positive patch, right negative patch patch = training_loader.__getitem__(0) x_ref_patch", "License for the specific language governing permissions and # limitations under the License.", "1, \"dataset_pos\": 0, \"augmentation_param\": { \"vertical_disp\": 0, \"scale\": 0.8, \"hscale\": 0.8, \"hshear\": 0.1,", "-1 cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity", "] # dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) =", "# Patch of shape 3, 11, 11 # With the firt dimension =", "cost volume is equal to the ground truth (same shape and all elements", "1.1, }, } training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) # Patch of shape 3,", "\"License\"); # you may not use this file except in compliance with the", "4), dtype=torch.float64) # Create the ground truth cost volume (row, col, disp) cv_gt", "actual name of the checked function def test_MiddleburyGenerator(self): \"\"\" test the function MiddleburyGenerator", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "-1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch", "import unittest import numpy as np import torch import torch.nn as nn from", "+ 1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1, ] gt_path", "# limitations under the License. # \"\"\" This module contains functions to test", "\"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with positive disparities \"\"\" # create reference", "7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1., 7., 5., -1.],", ":, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:],", "pylint: disable=no-self-use class TestMCCNN(unittest.TestCase): \"\"\" TestMCCNN class allows to test the cost volume", "13, 13)) # left image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1)) # right", "we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost) # Check", "and all elements equals) np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name # -> because changing", "function MiddleburyGenerator \"\"\" # Script use to create images_middlebury and samples_middlebury : #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "# disparity -1 cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy()", "in writing, software # distributed under the License is distributed on an \"AS", "img_sec(x-d,y) disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 7 gt_sec_pos_patch", "is equal to the ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch,", "unittest import numpy as np import torch import torch.nn as nn from mc_cnn.run", "1, 2, 13, 13 : 1 exposures, 2 = left and right images", "- disp y_sec_pos_patch = 7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch - patch_size : y_sec_pos_patch", "= cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # The minus sign converts the", "2, self.sad_cost) # Check if the calculated cost volume is equal to the", "13 : 1 exposures, 2 = left and right images image_pairs_0 = np.zeros((1,", "(13, 1)) image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file", "from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator #", "np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with negative", "1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity score", ":] = np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) -", "ground truth (same shape and all elements equals) np.testing.assert_array_equal(patch, gt_path) if __name__ ==", "1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() # disparity 0", "disparity -1 cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :,", "patch, right positive patch, right negative patch patch = training_loader.__getitem__(0) x_ref_patch = 6", "13, 13)) # left image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) #", "self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1,", ":, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3]", "1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1,", "13)) image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[0, 1, :, :]", ":3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = cos(ref_feature[:, :,", "\"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg)", ":3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:,", "1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"vertical_disp\": 0, \"augmentation_param\": { \"scale\": 0.8, \"hscale\": 0.8,", "1)) image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file =", "cos = nn.CosineSimilarity(dim=0, eps=1e-6) # Create the ground truth cost volume (row, col,", "= x_ref_patch - disp y_sec_pos_patch = 7 gt_sec_pos_patch = self.sec_img_2[ y_sec_pos_patch - patch_size", "This file is part of PANDORA_MCCNN # # https://github.com/CNES/Pandora_MCCNN # # Licensed under", ":, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:,", "# coding: utf8 # # Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).", ":, :]), dim=0) def test_computes_cost_volume_mc_cnn_accurate(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function \"\"\" #", "cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # The minus", "1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :,", "disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity -4 # all nan", "data=img_0[light]) img_1 = [image_pairs_1] grp = img_file.create_group(str(1)) for light in range(len(img_1)): dset =", "x_ref_patch = 5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_1[ y_ref_patch", "Method called to prepare the test fixture \"\"\" self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13,", "(row, col, disp) cv_gt = np.full((4, 4, 4), np.nan) # disparity -4 #", "(13, 1)) # right image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) + 1", "right image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 =", "2.0 (the \"License\"); # you may not use this file except in compliance", "import AccMcCnnInfer from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator # pylint: disable=no-self-use", "= np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated patch is equal", "5 y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_1[ y_ref_patch - patch_size", "disp = -1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch = 7 gt_sec_pos_patch =", "{ \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"augmentation_param\": { \"vertical_disp\": 0,", "3, 11, 11 # With the firt dimension = left patch, right positive", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:,", "cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) # Check if", "-1 cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy()", "= np.full((4, 4, 5), np.nan) # disparity -2 cv_gt[:, 2:, 0] = cos(ref_feature[:,", "sec_features[:, :, 3:]).cpu().detach().numpy() # disparity 4 # all nan # The minus sign", "sec_features[:, :, :, 0:1]).cpu().detach().numpy() # disparity -2 cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :,", "= 7 patch_size = 5 gt_ref_patch = self.ref_img_1[ y_ref_patch - patch_size : y_ref_patch", "sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :,", ":, :] = np.tile(np.arange(13), (13, 1)) # right image_pairs_0[0, 1, :, :] =", "# # Unless required by applicable law or agreed to in writing, software", "create reference and secondary features ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features", "express or implied. # See the License for the specific language governing permissions", "cfg = { \"data_augmentation\": False, \"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"augmentation_param\": {", "to a matching cost cv_gt *= -1 acc = AccMcCnnInfer() # Because input", ":, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # The minus sign converts the similarity score", "np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0) # Check if the calculated patch is equal to", "either express or implied. # See the License for the specific language governing", "# create reference and secondary features ref_feature = torch.randn((64, 4, 4), dtype=torch.float64) sec_features", "shape 1, 2, 13, 13 : 1 exposures, 2 = left and right", "= np.zeros((2, 13, 13)) image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[1, :,", "equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self, ref_features, sec_features): \"\"\" Useful to test the", "\"d_hshear\": 0.3, \"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader", "5., 6., 1.], [0., 7., 7., 1.]]) # disparity of image_pairs_1 x1 =", "score to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4,", "disparity -4 # all nan # disparity -3 cv_gt[:, 3:, 1] = cos(ref_feature[:,", "cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with negative disparities", "shape 3, 11, 11 # With the firt dimension = left patch, right", ":]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :,", "the reference to the actual name of the checked function def test_DataFusionContestGenerator(self): \"\"\"", "governing permissions and # limitations under the License. # \"\"\" This module contains", "3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy() # disparity 2", "the License. # You may obtain a copy of the License at #", "np.nan) # disparity -2 cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:,", "# The minus sign converts the similarity score to a matching cost cv_gt", "4), np.nan) # disparity 1 cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:,", "if the calculated cost volume is equal to the ground truth (same shape", "0 cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy() # disparity", "\"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\", cfg) # Patch", "of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions cv =", ":, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :,", "# all nan # disparity -3 cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:],", "def test_computes_cost_volume_mc_cnn_fast(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function \"\"\" # create reference and", ":, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:,", "1)) + 1 image_pairs_1 = np.zeros((2, 13, 13)) image_pairs_1[0, :, :] = np.tile(np.arange(13),", "function \"\"\" # create reference and secondary features ref_feature = torch.randn((64, 4, 4),", "2) # Check if the calculated cost volume is equal to the ground", "\"hscale\": 0.8, \"hshear\": 0.1, \"trans\": 0, \"rotate\": 28, \"brightness\": 1.3, \"contrast\": 1.1, \"d_hscale\":", "nan # disparity -3 cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:,", "all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast", ":, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = cos(ref_feature[:,", "x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1, ] # dataset_neg_low &", "-2 cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity", "def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with positive disparities \"\"\" #", "cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy() #", "test_DataFusionContestGenerator(self): \"\"\" test the function DataFusionContestGenerator \"\"\" # pylint: disable=pointless-string-statement \"\"\" # Script", "= 1 x_sec_neg_patch = x_ref_patch - disp + dataset_neg y_sec_neg_patch = 5 gt_sec_neg_patch", "cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) # Check if the calculated cost volume", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "W), we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost) #", "1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, } training_loader = MiddleburyGenerator(\"tests/sample_middlebury.hdf5\", \"tests/images_middlebury.hdf5\",", "elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def sad_cost(self, ref_features, sec_features): \"\"\" Useful to test", "- patch_size : y_sec_pos_patch + patch_size + 1, x_sec_pos_patch - patch_size : x_sec_pos_patch", "1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # disparity 0 cv_gt[:, :, 2] = cos(ref_feature[:, :,", "0.9, \"d_hshear\": 0.3, \"d_vtrans\": 1, \"d_rotate\": 3, \"d_brightness\": 0.7, \"d_contrast\": 1.1, }, }", "self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3,", "= torch.randn((1, 112, 4, 4), dtype=torch.float64) # Create the ground truth cost volume", "image_pairs_1 = np.zeros((2, 13, 13)) image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[1,", "all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) # pylint: disable=invalid-name # -> because changing", "cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_positive_disp(self): \"\"\" \" Test the computes_cost_volume_mc_cnn_fast function with positive disparities", "we add 1 dimensions cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost) # Check", "# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES). # # This file", "sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:],", "1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy() # The minus sign", ":, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2],", "\"dataset_neg_low\": 1, \"dataset_neg_high\": 1, \"dataset_pos\": 0, \"augmentation_param\": { \"vertical_disp\": 0, \"scale\": 0.8, \"hscale\":", "= left and right images, row, col image_pairs_0 = np.zeros((2, 13, 13)) #", "# With the firt dimension = left patch, right positive patch, right negative", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 3]", "- patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch", "(13, 1)) - 1 img_file = h5py.File('images_dfc.hdf5', 'w') img_file.create_dataset(str(0), data=image_pairs_0) img_file.create_dataset(str(1), data=image_pairs_1) sampl_file", "reference and secondary features ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64) sec_features =", "score to a matching cost cv_gt *= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2,", "DataFusionContestGenerator(\"tests/sample_dfc.hdf5\", \"tests/images_dfc.hdf5\", cfg) # Patch of shape 3, 11, 11 # With the", ":, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity score to a", "patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch +", "the function MiddleburyGenerator \"\"\" # Script use to create images_middlebury and samples_middlebury :", ":2], sec_features[:, :, 2:4]).cpu().detach().numpy() # The minus sign converts the similarity score to", "Script use to create images_middlebury and samples_middlebury : # shape 2, 13, 13", "self.sec_img_2[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1, x_sec_neg_patch - patch_size", "img_file.create_dataset(str(1), data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0.,", "np.zeros((1, 2, 13, 13)) image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1)) image_pairs_1[0,", "gt_path) # pylint: disable=invalid-name # -> because changing the name here loses the", ":], sec_features[:, :, :, :]).cpu().detach().numpy() # disparity 1 cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:,", "patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1, ]", "-1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2) # Check if the calculated cost", "# disparity -1 cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :,", "= np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((1, 2, 13, 13)) image_pairs_1[0,", "of shape 3, 11, 11 # With the firt dimension = left patch,", ":, :, 1:4]).cpu().detach().numpy() # disparity 2 cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :,", "\"\"\" \" Test the computes_cost_volume_mc_cnn_accurate function with negative disparities \"\"\" # create reference", "except in compliance with the License. # You may obtain a copy of", "[0., 7., 7., 1.]]) # disparity of image_pairs_1 x1 = np.array([[ 1., 7.,", "negative disparity patch = training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7 patch_size =", "light in range(len(img_1)): dset = grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w') # disparity", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "equals) np.testing.assert_array_equal(patch, gt_path) # pylint: disable=invalid-name # -> because changing the name here", "# disparity 3 cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :,", "y_sec_neg_patch = 7 gt_sec_neg_patch = self.sec_img_2[ y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size", "ground truth (same shape and all elements equals) np.testing.assert_allclose(cv, cv_gt, rtol=1e-05) def test_computes_cost_volume_mc_cnn_fast_negative_disp(self):", "# Check if the calculated cost volume is equal to the ground truth", "2 = left and right images, row, col image_pairs_0 = np.zeros((2, 13, 13))", "disparity patch = training_loader.__getitem__(2) x_ref_patch = 5 y_ref_patch = 7 patch_size = 5", ":2], sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity 3 cv_gt[:, :1, 2] = cos(ref_feature[:, :,", ":, :] = np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((2, 13, 13))", "*= -1 cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4) # Check if the calculated", "mc_cnn \"\"\" def setUp(self): \"\"\" Method called to prepare the test fixture \"\"\"", "+ patch_size + 1, x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,", "1 illumination for light in range(len(img_0)): dset = grp.create_dataset(str(light), data=img_0[light]) img_1 = [image_pairs_1]", "self.sad_cost) # Check if the calculated cost volume is equal to the ground", "2 cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy() # disparity", "numpy as np import torch import torch.nn as nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast", "National d'Etudes Spatiales (CNES). # # This file is part of PANDORA_MCCNN #", "= self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:,", "import torch import torch.nn as nn from mc_cnn.run import computes_cost_volume_mc_cnn_fast from mc_cnn.model.mc_cnn_accurate import", "negative disparities \"\"\" # create reference and secondary features ref_feature = torch.randn((1, 112,", "y_ref_patch = 7 patch_size = 5 gt_ref_patch = self.ref_img_0[ y_ref_patch - patch_size :", "sec_features, 1, 4, self.sad_cost) # Check if the calculated cost volume is equal", "grp.create_dataset(str(light), data=img_1[light]) sampl_file = h5py.File('sample_middlebury.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0.,", "# Create the ground truth cost volume (row, col, disp) cv_gt = np.full((4,", "(13, 1)) image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) - 1 img_file =", "# create reference and secondary features ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)", "convention img_ref(x,y) = img_sec(x-d,y) disp = 1 x_sec_pos_patch = x_ref_patch - disp y_sec_pos_patch", "0:2]).cpu().detach().numpy() # disparity -1 cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:,", "setUp(self): \"\"\" Method called to prepare the test fixture \"\"\" self.ref_img_0 = np.tile(np.arange(13,", "np.tile(np.arange(13), (13, 1)) + 1 image_pairs_1 = np.zeros((2, 13, 13)) image_pairs_1[0, :, :]", "# disparity -4 # all nan # disparity -3 cv_gt[:, 3:, 1] =", "data=image_pairs_1) sampl_file = h5py.File('sample_dfc.hdf5', 'w') # disparity of image_pairs_0 x0 = np.array([[0., 5.," ]
[ "runids[1]), va='top', fontsize = 8) ax1.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used:", "'_pred'] - track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi)) else: residual_track =", "bins[i]) data_sliced = df.loc[idx, :].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error, cascade_error =", "+ '_pred'] - track[target]) residual_cascade = (cascade[target + '_pred'] - cascade[target]) return resolution_fn(residual_track),", "roc_curve def add_truth(data, database): data = data.sort_values('event_no').reset_index(drop = True) with sqlite3.connect(database) as con:", "= plt.subplot2grid((6, 6), (0, 0), colspan = 6, rowspan= 6) for runid in", "- np.percentile(r, 16)) / 2. else: return np.nan def add_energylog10(df): df['energy_log10'] = np.log10(df['energy'])", "training\"%(10,20), va='top', fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track'] ==", "fontsize = 8) for line in leg.get_lines(): line.set_linewidth(4.0) if target == 'energy': ax1.set_ylim((0,175))", "= {140021: 'tab:blue', 140022: 'tab:orange'} fig = plt.figure(constrained_layout = True) ax1 = plt.subplot2grid((6,", "energy, 'width': track_widths, 'width_error': track_errors}) cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors})", "histtype = 'step', label = 'deposited energy', color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size =", "target == 'energy': ax1.set_ylim((0,175)) ymax = 23. y_sep = 8 unit_tag = '(%)'", "save_as_csv = False): colors = {140021: 'tab:blue', 140022: 'tab:orange'} fig = plt.figure(constrained_layout =", "23. y_sep = 8 unit_tag = '(%)' else: unit_tag = '(deg.)' if target", "Upgrade MC using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids =", "add_truth(data, database) data = add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) pulses_cut_val = 20 if", "=' %s : %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True Positive Rate', fontsize = 12)", "= 6, rowspan= 6) for runid in runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database =", "ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False, fontsize = 8) for line in leg.get_lines():", "'energy', 'track'] save_as_csv = True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in targets: if", "bins = np.arange(0,3.1,0.1) if target in ['zenith', 'energy', 'XYZ']: for i in range(1,len(bins)):", "+ '/%s_%s.csv'%(runid, target)) pulses_cut_val = 20 if runid == 140021: pulses_cut_val = 10", "= [] track_errors = [] cascade_errors = [] energy = [] bins =", "8 unit_tag = '(%)' else: unit_tag = '(deg.)' if target == 'angular_res': target", "rng.choice(residual, size = len(residual), replace = True) w.append(resolution_fn(new_sample)) return np.std(w) def get_roc_and_auc(data, target):", "= roc_curve(data[target], data[target+'_pred']) auc_score = auc(fpr,tpr) return fpr,tpr,auc_score def plot_roc(target, runids, save_dir, save_as_csv", "import roc_curve def add_truth(data, database): data = data.sort_values('event_no').reset_index(drop = True) with sqlite3.connect(database) as", "+ {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])] return f\"{particle_type} CC\" else: return \"NC\"", "if target == 'zenith': ymax = 10. y_sep = 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s", "= True) ax1 = plt.subplot2grid((6, 6), (0, 0), colspan = 6, rowspan= 6)", "line in leg.get_lines(): line.set_linewidth(4.0) if target == 'energy': ax1.set_ylim((0,175)) ymax = 23. y_sep", "save_as_csv = True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in targets: if target !=", "- 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) ax1.text(x_text,", "np.std(w) def get_roc_and_auc(data, target): fpr, tpr, _ = roc_curve(data[target], data[target+'_pred']) auc_score = auc(fpr,tpr)", "0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) plt.text(x_text, y_text - 1", "2. ax1.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8)", "data.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) pulses_cut_val = 20 if runid == 140021: pulses_cut_val =", "tpr, _ = roc_curve(data[target], data[target+'_pred']) auc_score = auc(fpr,tpr) return fpr,tpr,auc_score def plot_roc(target, runids,", "target == 'energy': residual_track = ((track[target + '_pred'] - track[target])/track[target])*100 residual_cascade = ((cascade[target", "'_pred'] - cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def get_width(df, target): track_widths =", "energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data", "(log10 GeV)', size = 10) x_text = 0.5 y_text = ymax - 2.", "(%s, %s) selection applied during training\"%(10,20), va='top', fontsize = 8) fig.suptitle(\"%s regression Upgrade", "alpha = 0.3, label = 'Cascade %s'%runid ) ax2 = ax1.twinx() ax2.hist(df['energy_log10'], histtype", "target)) plot_data_track, plot_data_cascade = get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black',", "= plt.figure(figsize = (width,height)) for runid in runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database =", "\"NC\" def resolution_fn(r): if len(r) > 1: return (np.percentile(r, 84) - np.percentile(r, 16))", "cascade_error = calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean': energy, 'width':", "data def make_plot(target, runids, save_dir, save_as_csv = False): colors = {140021: 'tab:blue', 140022:", "140022] targets = ['zenith', 'energy', 'track'] save_as_csv = True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for", "# CC particle_type = \"nu_\" + {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])] return", "1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) ax1.text(x_text, y_text", "=data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True) cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True) if target", "for i in range(1,len(bins)): print(bins[i]) idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i])", "regression Upgrade MC using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids", "size = 10) x_text = 0.5 y_text = ymax - 2. ax1.text(x_text, y_text", "> (%s, %s) selection applied during training\"%(10,20), va='top', fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return", "'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])] return f\"{particle_type} CC\" else: return \"NC\" def resolution_fn(r):", "8) for line in leg.get_lines(): line.set_linewidth(4.0) if target == 'energy': ax1.set_ylim((0,175)) ymax =", "leg.get_lines(): line.set_linewidth(4.0) if target == 'energy': ax1.set_ylim((0,175)) ymax = 23. y_sep = 8", "runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv: data = add_truth(data,", "= False): width = 3.176*2 height = 2.388*2 fig = plt.figure(figsize = (width,height))", "= 0 truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1), 'track'] = 1 add_these", "== 1), 'track'] = 1 add_these = [] for key in truth.columns: if", "= pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors}) return track_plot_data, cascade_plot_data else: print('target not", "'Cascade %s'%runid ) ax2 = ax1.twinx() ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited", "cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def get_width(df, target): track_widths = [] cascade_widths", "get_roc_and_auc(data, target) plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True Positive", "add_these.append(key) for key in add_these: data[key] = truth[key] return data def get_interaction_type(row): if", "import sqlite3 from sklearn.metrics import auc from sklearn.metrics import roc_curve def add_truth(data, database):", "((cascade[target + '_pred'] - cascade[target])/cascade[target])*100 elif target == 'zenith': residual_track = (track[target +", "track_errors = [] cascade_errors = [] energy = [] bins = np.arange(0,3.1,0.1) if", "= [] energy = [] bins = np.arange(0,3.1,0.1) if target in ['zenith', 'energy',", "add_energylog10(df) if save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade = get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid',", "SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) plt.text(x_text, y_text - 2 * y_sep, \"n_pulses", "alpha = 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha =", "i in range(150): new_sample = rng.choice(residual, size = len(residual), replace = True) w.append(resolution_fn(new_sample))", "label = 'Cascade %s'%runid ) ax2 = ax1.twinx() ax2.hist(df['energy_log10'], histtype = 'step', label", "colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size = 20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False,", "truth where event_no in %s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track'] = 0", "colors = {140021: 'tab:blue', 140022: 'tab:orange'} fig = plt.figure(constrained_layout = True) ax1 =", "(df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i]) data_sliced = df.loc[idx, :].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10']))", "1: # CC particle_type = \"nu_\" + {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])]", "residual_track = ((track[target + '_pred'] - track[target])/track[target])*100 residual_cascade = ((cascade[target + '_pred'] -", "'/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val = 20 if runid == 140021: pulses_cut_val = 10 df", "colors[runid], alpha = 0.3, label = 'Cascade %s'%runid ) ax2 = ax1.twinx() ax2.hist(df['energy_log10'],", "plot_roc(target, runids, save_dir, save_as_csv = False): width = 3.176*2 height = 2.388*2 fig", "'/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv: data = add_truth(data, database) data = add_energylog10(data) data.to_csv(save_dir +", "= [] cascade_widths = [] track_errors = [] cascade_errors = [] energy =", "make_plot(target, runids, save_dir, save_as_csv = False): colors = {140021: 'tab:blue', 140022: 'tab:orange'} fig", "= np.log10(df['energy']) return df def get_error(residual): rng = np.random.default_rng(42) w = [] for", "df.loc[idx, :].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target) track_widths.append(track_width)", "target = 'direction' if target == 'XYZ': target = 'vertex' unit_tag = '(m)'", "if target in ['zenith', 'energy', 'XYZ']: for i in range(1,len(bins)): print(bins[i]) idx =", "used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) ax1.text(x_text, y_text - 2 * y_sep,", "in runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val = 20 if", "np.arange(0,3.1,0.1) if target in ['zenith', 'energy', 'XYZ']: for i in range(1,len(bins)): print(bins[i]) idx", "\"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) ax1.text(x_text, y_text - 1 * y_sep, \"Pulsemaps", "truth.columns: if key not in data.columns: add_these.append(key) for key in add_these: data[key] =", "supported: %s'%target) # Load data def make_plot(target, runids, save_dir, save_as_csv = False): colors", "cascade_plot_data else: print('target not supported: %s'%target) # Load data def make_plot(target, runids, save_dir,", "'(deg.)' if target == 'angular_res': target = 'direction' if target == 'XYZ': target", "if runid == 140021: pulses_cut_val = 10 fpr, tpr, auc = get_roc_and_auc(data, target)", "%s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True Positive Rate', fontsize = 12) plt.xlabel('False Positive Rate',", "def get_width(df, target): track_widths = [] cascade_widths = [] track_errors = [] cascade_errors", "# Load data def make_plot(target, runids, save_dir, save_as_csv = False): colors = {140021:", "return \"NC\" def resolution_fn(r): if len(r) > 1: return (np.percentile(r, 84) - np.percentile(r,", "= '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv: data = add_truth(data, database) data = add_energylog10(data) data.to_csv(save_dir", "'width': cascade_widths, 'width_error': cascade_errors}) return track_plot_data, cascade_plot_data else: print('target not supported: %s'%target) #", "va='top', fontsize = 8) ax1.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s,", "y_text - 2 * y_sep, \"n_pulses > (%s, %s) selection applied during training\"%(10,20),", "data.columns: add_these.append(key) for key in add_these: data[key] = truth[key] return data def get_interaction_type(row):", "truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track'] = 0 truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type']", "12) ymax = 0.3 x_text = 0.2 y_text = ymax - 0.05 y_sep", "=data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True) if target == 'energy': residual_track = ((track[target +", "14: 'mu', 16: 'tau'}[abs(row['pid'])] return f\"{particle_type} CC\" else: return \"NC\" def resolution_fn(r): if", "14) & (truth['interaction_type'] == 1), 'track'] = 1 add_these = [] for key", "= auc(fpr,tpr) return fpr,tpr,auc_score def plot_roc(target, runids, save_dir, save_as_csv = False): width =", "0.2 y_text = ymax - 0.05 y_sep = 0.1 plt.text(x_text, y_text - 0", "get_interaction_type(row): if row[\"interaction_type\"] == 1: # CC particle_type = \"nu_\" + {12: 'e',", "auc(fpr,tpr) return fpr,tpr,auc_score def plot_roc(target, runids, save_dir, save_as_csv = False): width = 3.176*2", "cascade_widths = [] track_errors = [] cascade_errors = [] energy = [] bins", "[] track_errors = [] cascade_errors = [] energy = [] bins = np.arange(0,3.1,0.1)", "%s) selection applied during training\"%(10,20), va='top', fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced,", "def get_roc_and_auc(data, target): fpr, tpr, _ = roc_curve(data[target], data[target+'_pred']) auc_score = auc(fpr,tpr) return", "- 2 * y_sep, \"n_pulses > (%s, %s) selection applied during training\"%(10,20), va='top',", "y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) ax1.text(x_text, y_text", "= True) with sqlite3.connect(database) as con: query = 'select event_no, energy, interaction_type, pid", "'tab:blue', lw = 0.5, alpha = 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color", "for key in truth.columns: if key not in data.columns: add_these.append(key) for key in", "plt.figure(figsize = (width,height)) for runid in runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid,", "using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids = [140021, 140022]", "fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids = [140021, 140022] targets = ['zenith', 'energy', 'track'] save_as_csv =", "== 'zenith': ymax = 10. y_sep = 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(),", "10) ax1.set_xlabel('Energy (log10 GeV)', size = 10) x_text = 0.5 y_text = ymax", "runids[1]), va='top', fontsize = 8) plt.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used:", "database): data = data.sort_values('event_no').reset_index(drop = True) with sqlite3.connect(database) as con: query = 'select", "Rate', fontsize = 12) plt.xlabel('False Positive Rate', fontsize = 12) ymax = 0.3", "8) plt.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s, %s) selection applied", "print(bins[i]) idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i]) data_sliced = df.loc[idx, :].reset_index(drop", "df def get_error(residual): rng = np.random.default_rng(42) w = [] for i in range(150):", "else: return np.nan def add_energylog10(df): df['energy_log10'] = np.log10(df['energy']) return df def get_error(residual): rng", "True) ax1 = plt.subplot2grid((6, 6), (0, 0), colspan = 6, rowspan= 6) for", "get_width(df, target): track_widths = [] cascade_widths = [] track_errors = [] cascade_errors =", "plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10) ax1.set_xlabel('Energy (log10 GeV)', size =", "= ymax - 2. ax1.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top',", "tpr, auc = get_roc_and_auc(data, target) plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade", "alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha =", "= 12) plt.xlabel('False Positive Rate', fontsize = 12) ymax = 0.3 x_text =", "False): width = 3.176*2 height = 2.388*2 fig = plt.figure(figsize = (width,height)) for", "track_error, cascade_error = calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean': energy,", "8) ax1.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s, %s) selection applied", "- plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label = 'Track", "lw = 0.5, color = 'black', alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width']", "in truth.columns: if key not in data.columns: add_these.append(key) for key in add_these: data[key]", "residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi)) else: residual_track = (track[target + '_pred']", "for key in add_these: data[key] = truth[key] return data def get_interaction_type(row): if row[\"interaction_type\"]", "target): track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True) cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True)", "database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val = 20 if runid == 140021: pulses_cut_val =", "get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black', alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width']", "cascade[target])/cascade[target])*100 elif target == 'zenith': residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi)) residual_cascade", "cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors}) return track_plot_data, cascade_plot_data else: print('target", "== 14) & (truth['interaction_type'] == 1), 'track'] = 1 add_these = [] for", "= 8) plt.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top',", "%s'%runid ) ax2 = ax1.twinx() ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited energy',", "= np.arange(0,3.1,0.1) if target in ['zenith', 'energy', 'XYZ']: for i in range(1,len(bins)): print(bins[i])", "label = 'deposited energy', color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size = 20) ax1.tick_params(axis='x', labelsize=6)", "CC particle_type = \"nu_\" + {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])] return f\"{particle_type}", "database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv: data = add_truth(data, database) data = add_energylog10(data)", "y_text = ymax - 2. ax1.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]),", "roc_curve(data[target], data[target+'_pred']) auc_score = auc(fpr,tpr) return fpr,tpr,auc_score def plot_roc(target, runids, save_dir, save_as_csv =", "energy', color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size = 20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1))", "add_these = [] for key in truth.columns: if key not in data.columns: add_these.append(key)", "['zenith', 'energy', 'XYZ']: for i in range(1,len(bins)): print(bins[i]) idx = (df['energy_log10']> bins[i-1]) &", "else: unit_tag = '(deg.)' if target == 'angular_res': target = 'direction' if target", "pid from truth where event_no in %s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track']", "& (truth['interaction_type'] == 1), 'track'] = 1 add_these = [] for key in", "w = [] for i in range(150): new_sample = rng.choice(residual, size = len(residual),", "= 8) plt.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s, %s) selection", "elif target == 'zenith': residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi)) residual_cascade =", "True) w.append(resolution_fn(new_sample)) return np.std(w) def get_roc_and_auc(data, target): fpr, tpr, _ = roc_curve(data[target], data[target+'_pred'])", "1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label", "plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True Positive Rate', fontsize", "color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size = 20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg", "save_as_csv: data = add_truth(data, database) data = add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) pulses_cut_val", "fontsize = 8) plt.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \",", "data.sort_values('event_no').reset_index(drop = True) with sqlite3.connect(database) as con: query = 'select event_no, energy, interaction_type,", "ax1.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s, %s) selection applied during", "== 'zenith': residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target +", "Load data def make_plot(target, runids, save_dir, save_as_csv = False): colors = {140021: 'tab:blue',", "line.set_linewidth(4.0) if target == 'energy': ax1.set_ylim((0,175)) ymax = 23. y_sep = 8 unit_tag", "= [140021, 140022] targets = ['zenith', 'energy', 'track'] save_as_csv = True save_dir =", "if target != 'track': make_plot(target, runids, save_dir, save_as_csv) else: plot_roc(target, runids, save_dir, save_as_csv)", "= 1 add_these = [] for key in truth.columns: if key not in", "* y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) ax1.text(x_text, y_text - 1 *", "'select event_no, energy, interaction_type, pid from truth where event_no in %s'%str(tuple(data['event_no'])) truth =", "'/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val = 20 if runid == 140021: pulses_cut_val", "def add_truth(data, database): data = data.sort_values('event_no').reset_index(drop = True) with sqlite3.connect(database) as con: query", "'track'] = 1 add_these = [] for key in truth.columns: if key not", "if target == 'XYZ': target = 'vertex' unit_tag = '(m)' if target ==", "= '(%)' else: unit_tag = '(deg.)' if target == 'angular_res': target = 'direction'", "= (width,height)) for runid in runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid)", "'/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in targets: if target != 'track': make_plot(target, runids, save_dir, save_as_csv)", "runid == 140021: pulses_cut_val = 10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df =", "(cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi)) else: residual_track = (track[target + '_pred'] - track[target])", "in range(150): new_sample = rng.choice(residual, size = len(residual), replace = True) w.append(resolution_fn(new_sample)) return", "Classification') plt.ylabel('True Positive Rate', fontsize = 12) plt.xlabel('False Positive Rate', fontsize = 12)", "def make_plot(target, runids, save_dir, save_as_csv = False): colors = {140021: 'tab:blue', 140022: 'tab:orange'}", "= 'step', label = 'deposited energy', color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size = 20)", "residual_track = (track[target + '_pred'] - track[target]) residual_cascade = (cascade[target + '_pred'] -", "GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids = [140021, 140022] targets", "particle_type = \"nu_\" + {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])] return f\"{particle_type} CC\"", "np.nan def add_energylog10(df): df['energy_log10'] = np.log10(df['energy']) return df def get_error(residual): rng = np.random.default_rng(42)", "12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids = [140021, 140022] targets = ['zenith', 'energy', 'track'] save_as_csv", "[] cascade_widths = [] track_errors = [] cascade_errors = [] energy = []", "plt.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) plt.text(x_text,", "= colors[runid], alpha = 0.3, label = 'Cascade %s'%runid ) ax2 = ax1.twinx()", "if target == 'energy': residual_track = ((track[target + '_pred'] - track[target])/track[target])*100 residual_cascade =", "GeV)', size = 10) x_text = 0.5 y_text = ymax - 2. ax1.text(x_text,", "sqlite3.connect(database) as con: query = 'select event_no, energy, interaction_type, pid from truth where", "(truth['interaction_type'] == 1), 'track'] = 1 add_these = [] for key in truth.columns:", "calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True) cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop =", "\"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) ax1.text(x_text, y_text - 2 *", "size = 10) ax1.set_xlabel('Energy (log10 GeV)', size = 10) x_text = 0.5 y_text", "= ax1.twinx() ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited energy', color = colors[runid])", "['zenith', 'energy', 'track'] save_as_csv = True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in targets:", "df['energy_log10'] = np.log10(df['energy']) return df def get_error(residual): rng = np.random.default_rng(42) w = []", "fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop =", "1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3, label", "== 'energy': ax1.set_ylim((0,175)) ymax = 23. y_sep = 8 unit_tag = '(%)' else:", "in %s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track'] = 0 truth.loc[(abs(truth['pid']) == 14)", "y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) plt.text(x_text, y_text", "- track[target]) residual_cascade = (cascade[target + '_pred'] - cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track),", "6) for runid in runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val", "runid) pulses_cut_val = 20 if runid == 140021: pulses_cut_val = 10 df =", "if save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade = get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw", "+ '_pred'] - track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi)) else: residual_track", "8) plt.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize", "def calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True) cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop", "else: residual_track = (track[target + '_pred'] - track[target]) residual_cascade = (cascade[target + '_pred']", "= [] cascade_errors = [] energy = [] bins = np.arange(0,3.1,0.1) if target", "during training\"%(10,20), va='top', fontsize = 8) fig.suptitle(\"%s regression Upgrade MC using GNN\"%target) #fig.suptitle('%s", "fontsize = 8) plt.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s, %s)", ": %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True Positive Rate', fontsize = 12) plt.xlabel('False Positive", "(track[target + '_pred'] - track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi)) else:", "color = 'black', alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color =", "pandas as pd import sqlite3 from sklearn.metrics import auc from sklearn.metrics import roc_curve", "'step', label = 'deposited energy', color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size = 20) ax1.tick_params(axis='x',", "8) ax1.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize", "va='top', fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop", "* y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) plt.text(x_text, y_text - 1 *", "'deposited energy', color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size = 20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6)", "track_plot_data, cascade_plot_data else: print('target not supported: %s'%target) # Load data def make_plot(target, runids,", "'vertex' unit_tag = '(m)' if target == 'zenith': ymax = 10. y_sep =", "target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors})", "sklearn.metrics import auc from sklearn.metrics import roc_curve def add_truth(data, database): data = data.sort_values('event_no').reset_index(drop", "alpha = 0.8 ,label = 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw =", "- 0.05 y_sep = 0.1 plt.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]),", "10 fpr, tpr, auc = get_roc_and_auc(data, target) plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3)))", "= ['zenith', 'energy', 'track'] save_as_csv = True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in", "return np.nan def add_energylog10(df): df['energy_log10'] = np.log10(df['energy']) return df def get_error(residual): rng =", "cascade_widths, 'width_error': cascade_errors}) return track_plot_data, cascade_plot_data else: print('target not supported: %s'%target) # Load", "runid) if save_as_csv: data = add_truth(data, database) data = add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid,", "= 'vertex' unit_tag = '(m)' if target == 'zenith': ymax = 10. y_sep", "cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True) if target == 'energy': residual_track = ((track[target", "auc from sklearn.metrics import roc_curve def add_truth(data, database): data = data.sort_values('event_no').reset_index(drop = True)", "in add_these: data[key] = truth[key] return data def get_interaction_type(row): if row[\"interaction_type\"] == 1:", "'_pred'] - cascade[target])*(360/(2*np.pi)) else: residual_track = (track[target + '_pred'] - track[target]) residual_cascade =", "track[target]) residual_cascade = (cascade[target + '_pred'] - cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade)", "rowspan= 6) for runid in runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid)", "def plot_roc(target, runids, save_dir, save_as_csv = False): width = 3.176*2 height = 2.388*2", "sqlite3 from sklearn.metrics import auc from sklearn.metrics import roc_curve def add_truth(data, database): data", "return f\"{particle_type} CC\" else: return \"NC\" def resolution_fn(r): if len(r) > 1: return", "SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) ax1.text(x_text, y_text - 2 * y_sep, \"n_pulses", "selection applied during training\"%(10,20), va='top', fontsize = 8) fig.suptitle(\"%s regression Upgrade MC using", "= [] for i in range(150): new_sample = rng.choice(residual, size = len(residual), replace", "rng = np.random.default_rng(42) w = [] for i in range(150): new_sample = rng.choice(residual,", "2.388*2 fig = plt.figure(figsize = (width,height)) for runid in runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target))", "= [] bins = np.arange(0,3.1,0.1) if target in ['zenith', 'energy', 'XYZ']: for i", "plt import numpy as np import pandas as pd import sqlite3 from sklearn.metrics", "'energy': residual_track = ((track[target + '_pred'] - track[target])/track[target])*100 residual_cascade = ((cascade[target + '_pred']", "for runid in runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv:", "fontsize = 12) ymax = 0.3 x_text = 0.2 y_text = ymax -", "0.3 x_text = 0.2 y_text = ymax - 0.05 y_sep = 0.1 plt.text(x_text,", "target in ['zenith', 'energy', 'XYZ']: for i in range(1,len(bins)): print(bins[i]) idx = (df['energy_log10']>", "= 10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df = add_truth(df, database) df =", "= 0.5, alpha = 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid],", "pulses_cut_val = 10 fpr, tpr, auc = get_roc_and_auc(data, target) plt.plot(fpr,tpr, label =' %s", "data = add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) pulses_cut_val = 20 if runid ==", "pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track'] = 0 truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1),", "= calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths,", "= 'deposited energy', color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size = 20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y',", "else: print('target not supported: %s'%target) # Load data def make_plot(target, runids, save_dir, save_as_csv", "10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df = add_truth(df, database) df = add_energylog10(df)", "= 0.3, label = 'Cascade %s'%runid ) ax2 = ax1.twinx() ax2.hist(df['energy_log10'], histtype =", "20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False, fontsize = 8) for", "ax1.set_ylim((0,175)) ymax = 23. y_sep = 8 unit_tag = '(%)' else: unit_tag =", "matplotlib.pyplot as plt import numpy as np import pandas as pd import sqlite3", "va='top', fontsize = 8) fig.suptitle(\"%s regression Upgrade MC using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size", "0 truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1), 'track'] = 1 add_these =", "for runid in runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val =", "'tab:orange'} fig = plt.figure(constrained_layout = True) ax1 = plt.subplot2grid((6, 6), (0, 0), colspan", "'zenith': residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target + '_pred']", "va='top', fontsize = 8) ax1.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses", "np.random.default_rng(42) w = [] for i in range(150): new_sample = rng.choice(residual, size =", "= 20 if runid == 140021: pulses_cut_val = 10 fpr, tpr, auc =", "w.append(resolution_fn(new_sample)) return np.std(w) def get_roc_and_auc(data, target): fpr, tpr, _ = roc_curve(data[target], data[target+'_pred']) auc_score", "pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df = add_truth(df, database) df = add_energylog10(df) if save_as_csv: df.to_csv(save_dir", "ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5, alpha = 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'],", "(df['energy_log10'] < bins[i]) data_sliced = df.loc[idx, :].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error,", "energy = [] bins = np.arange(0,3.1,0.1) if target in ['zenith', 'energy', 'XYZ']: for", "plt.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize =", "True) cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True) if target == 'energy': residual_track =", "1,:].reset_index(drop = True) cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True) if target == 'energy':", "import matplotlib.pyplot as plt import numpy as np import pandas as pd import", "target == 'XYZ': target = 'vertex' unit_tag = '(m)' if target == 'zenith':", "== 'angular_res': target = 'direction' if target == 'XYZ': target = 'vertex' unit_tag", "event_no in %s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track'] = 0 truth.loc[(abs(truth['pid']) ==", "np.percentile(r, 16)) / 2. else: return np.nan def add_energylog10(df): df['energy_log10'] = np.log10(df['energy']) return", "track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors}) cascade_plot_data = pd.DataFrame({'mean': energy, 'width':", "True) df = add_truth(df, database) df = add_energylog10(df) if save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid,", "df = add_energylog10(df) if save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade = get_width(df,", "ymax = 10. y_sep = 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size", "target) plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True Positive Rate',", "& (df['energy_log10'] < bins[i]) data_sliced = df.loc[idx, :].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width,", "== 'XYZ': target = 'vertex' unit_tag = '(m)' if target == 'zenith': ymax", "\"n_pulses > (%s, %s) selection applied during training\"%(10,20), va='top', fontsize = 8) fig.suptitle(\"%s", "save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in targets: if target != 'track': make_plot(target, runids,", "get_error(residual_cascade) def get_width(df, target): track_widths = [] cascade_widths = [] track_errors = []", "((track[target + '_pred'] - track[target])/track[target])*100 residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100 elif", "key not in data.columns: add_these.append(key) for key in add_these: data[key] = truth[key] return", "query = 'select event_no, energy, interaction_type, pid from truth where event_no in %s'%str(tuple(data['event_no']))", "pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv: data = add_truth(data, database) data =", "0,:].reset_index(drop = True) if target == 'energy': residual_track = ((track[target + '_pred'] -", "save_dir, save_as_csv = False): colors = {140021: 'tab:blue', 140022: 'tab:orange'} fig = plt.figure(constrained_layout", "\", va='top', fontsize = 8) plt.text(x_text, y_text - 2 * y_sep, \"n_pulses >", "data_sliced = df.loc[idx, :].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced,", "False): colors = {140021: 'tab:blue', 140022: 'tab:orange'} fig = plt.figure(constrained_layout = True) ax1", "= 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3,", "+ plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label = 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color", "y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) plt.text(x_text, y_text - 2", "where event_no in %s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track'] = 0 truth.loc[(abs(truth['pid'])", "track[target])/track[target])*100 residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100 elif target == 'zenith': residual_track", "pulses_cut_val = 10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df = add_truth(df, database) df", "if len(r) > 1: return (np.percentile(r, 84) - np.percentile(r, 16)) / 2. else:", "'_pred'] - track[target]) residual_cascade = (cascade[target + '_pred'] - cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade),", "as plt import numpy as np import pandas as pd import sqlite3 from", "save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade = get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw =", "= True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error)", "df.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade = get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5,", "= '(deg.)' if target == 'angular_res': target = 'direction' if target == 'XYZ':", "[] for key in truth.columns: if key not in data.columns: add_these.append(key) for key", "Positive Rate', fontsize = 12) plt.xlabel('False Positive Rate', fontsize = 12) ymax =", "training\"%(10,20), va='top', fontsize = 8) fig.suptitle(\"%s regression Upgrade MC using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(),", "'tab:blue', 140022: 'tab:orange'} fig = plt.figure(constrained_layout = True) ax1 = plt.subplot2grid((6, 6), (0,", "color = 'tab:blue', lw = 0.5, alpha = 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+", "= False): colors = {140021: 'tab:blue', 140022: 'tab:orange'} fig = plt.figure(constrained_layout = True)", "ymax - 2. ax1.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize", "'track'] save_as_csv = True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in targets: if target", "= pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors}) cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths,", "track_errors}) cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors}) return track_plot_data, cascade_plot_data else:", "def add_energylog10(df): df['energy_log10'] = np.log10(df['energy']) return df def get_error(residual): rng = np.random.default_rng(42) w", "'(m)' if target == 'zenith': ymax = 10. y_sep = 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False)", "- 2. ax1.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize =", "fontsize = 8) fig.suptitle(\"%s regression Upgrade MC using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size =", "= 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids = [140021, 140022] targets = ['zenith', 'energy', 'track']", "key in add_these: data[key] = truth[key] return data def get_interaction_type(row): if row[\"interaction_type\"] ==", "add_these: data[key] = truth[key] return data def get_interaction_type(row): if row[\"interaction_type\"] == 1: #", "plt.figure(constrained_layout = True) ax1 = plt.subplot2grid((6, 6), (0, 0), colspan = 6, rowspan=", "replace = True) w.append(resolution_fn(new_sample)) return np.std(w) def get_roc_and_auc(data, target): fpr, tpr, _ =", "targets = ['zenith', 'energy', 'track'] save_as_csv = True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target", "[] for i in range(150): new_sample = rng.choice(residual, size = len(residual), replace =", "data = data.sort_values('event_no').reset_index(drop = True) with sqlite3.connect(database) as con: query = 'select event_no,", "- track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi)) else: residual_track = (track[target", "ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False, fontsize = 8) for line", "target)) pulses_cut_val = 20 if runid == 140021: pulses_cut_val = 10 fpr, tpr,", "1 add_these = [] for key in truth.columns: if key not in data.columns:", "pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors}) return track_plot_data, cascade_plot_data else: print('target not supported:", "labelsize=6) ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False, fontsize = 8) for line in leg.get_lines(): line.set_linewidth(4.0)", "as np import pandas as pd import sqlite3 from sklearn.metrics import auc from", "84) - np.percentile(r, 16)) / 2. else: return np.nan def add_energylog10(df): df['energy_log10'] =", "for target in targets: if target != 'track': make_plot(target, runids, save_dir, save_as_csv) else:", "not supported: %s'%target) # Load data def make_plot(target, runids, save_dir, save_as_csv = False):", "ax1.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) ax1.text(x_text,", "residual_cascade = (cascade[target + '_pred'] - cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def", "= len(residual), replace = True) w.append(resolution_fn(new_sample)) return np.std(w) def get_roc_and_auc(data, target): fpr, tpr,", "track_widths, 'width_error': track_errors}) cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors}) return track_plot_data,", "return runids = [140021, 140022] targets = ['zenith', 'energy', 'track'] save_as_csv = True", "new_sample = rng.choice(residual, size = len(residual), replace = True) w.append(resolution_fn(new_sample)) return np.std(w) def", "3.176*2 height = 2.388*2 fig = plt.figure(figsize = (width,height)) for runid in runids:", "ax1.legend(frameon=False, fontsize = 8) for line in leg.get_lines(): line.set_linewidth(4.0) if target == 'energy':", "= True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in targets: if target != 'track':", "fig = plt.figure(figsize = (width,height)) for runid in runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database", "track_widths = [] cascade_widths = [] track_errors = [] cascade_errors = [] energy", "fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True) cascade =data_sliced.loc[data_sliced['track']", "= 2.388*2 fig = plt.figure(figsize = (width,height)) for runid in runids: data =", "%s'%(target.capitalize(), unit_tag), size = 10) ax1.set_xlabel('Energy (log10 GeV)', size = 10) x_text =", "+ '_pred'] - cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def get_width(df, target): track_widths", ":].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width)", "= '(m)' if target == 'zenith': ymax = 10. y_sep = 2.3 ax1.set_ylim((0,45))", "track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi)) else: residual_track = (track[target +", "labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False, fontsize = 8) for line in", "'angular_res': target = 'direction' if target == 'XYZ': target = 'vertex' unit_tag =", "CC\" else: return \"NC\" def resolution_fn(r): if len(r) > 1: return (np.percentile(r, 84)", "= 8) for line in leg.get_lines(): line.set_linewidth(4.0) if target == 'energy': ax1.set_ylim((0,175)) ymax", "140021: pulses_cut_val = 10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df = add_truth(df, database)", "= colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size = 20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg =", "plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True Positive Rate', fontsize = 12) plt.xlabel('False Positive Rate', fontsize", "import auc from sklearn.metrics import roc_curve def add_truth(data, database): data = data.sort_values('event_no').reset_index(drop =", "\"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) plt.text(x_text, y_text - 2 *", "return track_plot_data, cascade_plot_data else: print('target not supported: %s'%target) # Load data def make_plot(target,", "8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True) cascade", "'width_error': track_errors}) cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors}) return track_plot_data, cascade_plot_data", "plot_data_cascade = get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black', alpha =", "y_text = ymax - 0.05 y_sep = 0.1 plt.text(x_text, y_text - 0 *", "ymax - 0.05 y_sep = 0.1 plt.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0],", "colors[runid], alpha = 0.8 ,label = 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw", "va='top', fontsize = 8) plt.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s,", "resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def get_width(df, target): track_widths = [] cascade_widths = []", "return fpr,tpr,auc_score def plot_roc(target, runids, save_dir, save_as_csv = False): width = 3.176*2 height", "= (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i]) data_sliced = df.loc[idx, :].reset_index(drop = True)", "pulses_cut_val = 20 if runid == 140021: pulses_cut_val = 10 fpr, tpr, auc", "unit_tag), size = 10) ax1.set_xlabel('Energy (log10 GeV)', size = 10) x_text = 0.5", "10) x_text = 0.5 y_text = ymax - 2. ax1.text(x_text, y_text - 0", "return df def get_error(residual): rng = np.random.default_rng(42) w = [] for i in", "= 0.1 plt.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize =", "residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100 elif target == 'zenith': residual_track =", "True) if target == 'energy': residual_track = ((track[target + '_pred'] - track[target])/track[target])*100 residual_cascade", "track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data =", "= 0.5 y_text = ymax - 2. ax1.text(x_text, y_text - 0 * y_sep,", "len(r) > 1: return (np.percentile(r, 84) - np.percentile(r, 16)) / 2. else: return", "== 1,:].reset_index(drop = True) cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True) if target ==", "< bins[i]) data_sliced = df.loc[idx, :].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error, cascade_error", "runids, save_dir, save_as_csv = False): colors = {140021: 'tab:blue', 140022: 'tab:orange'} fig =", "= 10) x_text = 0.5 y_text = ymax - 2. ax1.text(x_text, y_text -", "print('target not supported: %s'%target) # Load data def make_plot(target, runids, save_dir, save_as_csv =", "- cascade[target])*(360/(2*np.pi)) else: residual_track = (track[target + '_pred'] - track[target]) residual_cascade = (cascade[target", "1: return (np.percentile(r, 84) - np.percentile(r, 16)) / 2. else: return np.nan def", "ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black', alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'],", "plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3, label = 'Cascade %s'%runid )", "save_dir, save_as_csv = False): width = 3.176*2 height = 2.388*2 fig = plt.figure(figsize", "= True) cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True) if target == 'energy': residual_track", "return np.std(w) def get_roc_and_auc(data, target): fpr, tpr, _ = roc_curve(data[target], data[target+'_pred']) auc_score =", "plt.title('Track/Cascade Classification') plt.ylabel('True Positive Rate', fontsize = 12) plt.xlabel('False Positive Rate', fontsize =", "np.log10(df['energy']) return df def get_error(residual): rng = np.random.default_rng(42) w = [] for i", "y_sep, \"n_pulses > (%s, %s) selection applied during training\"%(10,20), va='top', fontsize = 8)", "1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) plt.text(x_text, y_text", "%s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track'] = 0 truth.loc[(abs(truth['pid']) == 14) &", "plt.subplot2grid((6, 6), (0, 0), colspan = 6, rowspan= 6) for runid in runids:", "= 8 unit_tag = '(%)' else: unit_tag = '(deg.)' if target == 'angular_res':", "get_error(residual_track), get_error(residual_cascade) def get_width(df, target): track_widths = [] cascade_widths = [] track_errors =", "= plt.figure(constrained_layout = True) ax1 = plt.subplot2grid((6, 6), (0, 0), colspan = 6,", "plot_data_track, plot_data_cascade = get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black', alpha", "fpr, tpr, _ = roc_curve(data[target], data[target+'_pred']) auc_score = auc(fpr,tpr) return fpr,tpr,auc_score def plot_roc(target,", "len(residual), replace = True) w.append(resolution_fn(new_sample)) return np.std(w) def get_roc_and_auc(data, target): fpr, tpr, _", "= ax1.legend(frameon=False, fontsize = 8) for line in leg.get_lines(): line.set_linewidth(4.0) if target ==", "not in data.columns: add_these.append(key) for key in add_these: data[key] = truth[key] return data", "> 1: return (np.percentile(r, 84) - np.percentile(r, 16)) / 2. else: return np.nan", "= 23. y_sep = 8 unit_tag = '(%)' else: unit_tag = '(deg.)' if", "- 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) plt.text(x_text,", "(track[target + '_pred'] - track[target]) residual_cascade = (cascade[target + '_pred'] - cascade[target]) return", "= \"nu_\" + {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])] return f\"{particle_type} CC\" else:", "else: return \"NC\" def resolution_fn(r): if len(r) > 1: return (np.percentile(r, 84) -", "plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label = 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color =", "from truth where event_no in %s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track'] =", "return def calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True) cascade =data_sliced.loc[data_sliced['track'] ==", "0.1 plt.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8)", ") ax2 = ax1.twinx() ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited energy', color", "= df.loc[idx, :].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target)", "= 0.2 y_text = ymax - 0.05 y_sep = 0.1 plt.text(x_text, y_text -", "(width,height)) for runid in runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if", "fig = plt.figure(constrained_layout = True) ax1 = plt.subplot2grid((6, 6), (0, 0), colspan =", "fpr,tpr,auc_score def plot_roc(target, runids, save_dir, save_as_csv = False): width = 3.176*2 height =", "= True) w.append(resolution_fn(new_sample)) return np.std(w) def get_roc_and_auc(data, target): fpr, tpr, _ = roc_curve(data[target],", "ax1 = plt.subplot2grid((6, 6), (0, 0), colspan = 6, rowspan= 6) for runid", "track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors}) cascade_plot_data = pd.DataFrame({'mean':", "track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True) cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True) if", "2 * y_sep, \"n_pulses > (%s, %s) selection applied during training\"%(10,20), va='top', fontsize", "def get_interaction_type(row): if row[\"interaction_type\"] == 1: # CC particle_type = \"nu_\" + {12:", "interaction_type, pid from truth where event_no in %s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True)", "height = 2.388*2 fig = plt.figure(figsize = (width,height)) for runid in runids: data", "in ['zenith', 'energy', 'XYZ']: for i in range(1,len(bins)): print(bins[i]) idx = (df['energy_log10']> bins[i-1])", "target): track_widths = [] cascade_widths = [] track_errors = [] cascade_errors = []", "unit_tag = '(%)' else: unit_tag = '(deg.)' if target == 'angular_res': target =", "ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3, label =", "* y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) plt.text(x_text, y_text -", "df = add_truth(df, database) df = add_energylog10(df) if save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid, target))", "pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors}) cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error':", "- 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) plt.text(x_text, y_text -", "0), colspan = 6, rowspan= 6) for runid in runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)", "as con: query = 'select event_no, energy, interaction_type, pid from truth where event_no", "for i in range(150): new_sample = rng.choice(residual, size = len(residual), replace = True)", "from sklearn.metrics import roc_curve def add_truth(data, database): data = data.sort_values('event_no').reset_index(drop = True) with", "{140021: 'tab:blue', 140022: 'tab:orange'} fig = plt.figure(constrained_layout = True) ax1 = plt.subplot2grid((6, 6),", "runid == 140021: pulses_cut_val = 10 fpr, tpr, auc = get_roc_and_auc(data, target) plt.plot(fpr,tpr,", "y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) plt.text(x_text, y_text - 1 * y_sep,", "+ '/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade = get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color", "np import pandas as pd import sqlite3 from sklearn.metrics import auc from sklearn.metrics", "{12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])] return f\"{particle_type} CC\" else: return \"NC\" def", "_ = roc_curve(data[target], data[target+'_pred']) auc_score = auc(fpr,tpr) return fpr,tpr,auc_score def plot_roc(target, runids, save_dir,", "(0, 0), colspan = 6, rowspan= 6) for runid in runids: predictions_path =", "'zenith': ymax = 10. y_sep = 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag),", "selection applied during training\"%(10,20), va='top', fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced, target):", "#fig.suptitle('%s Resolution'%target.capitalize(), size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids = [140021, 140022] targets =", "data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv: data = add_truth(data, database)", "'XYZ']: for i in range(1,len(bins)): print(bins[i]) idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] <", "def get_error(residual): rng = np.random.default_rng(42) w = [] for i in range(150): new_sample", "- 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) ax1.text(x_text, y_text -", "applied during training\"%(10,20), va='top', fontsize = 8) fig.suptitle(\"%s regression Upgrade MC using GNN\"%target)", "idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i]) data_sliced = df.loc[idx, :].reset_index(drop =", "used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) plt.text(x_text, y_text - 2 * y_sep,", "* y_sep, \"n_pulses > (%s, %s) selection applied during training\"%(10,20), va='top', fontsize =", "ax1.twinx() ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited energy', color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$',", "energy, 'width': cascade_widths, 'width_error': cascade_errors}) return track_plot_data, cascade_plot_data else: print('target not supported: %s'%target)", "[] bins = np.arange(0,3.1,0.1) if target in ['zenith', 'energy', 'XYZ']: for i in", "= 0.8 ,label = 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5,", "= 8) ax1.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top',", "y_sep = 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10) ax1.set_xlabel('Energy", "save_as_csv = False): width = 3.176*2 height = 2.388*2 fig = plt.figure(figsize =", "'(%)' else: unit_tag = '(deg.)' if target == 'angular_res': target = 'direction' if", "fpr, tpr, auc = get_roc_and_auc(data, target) plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3))) plt.legend()", "= 10) ax1.set_xlabel('Energy (log10 GeV)', size = 10) x_text = 0.5 y_text =", "0.05 y_sep = 0.1 plt.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top',", "fontsize = 8) ax1.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \",", "+ '_pred'] - cascade[target])*(360/(2*np.pi)) else: residual_track = (track[target + '_pred'] - track[target]) residual_cascade", "0.3, label = 'Cascade %s'%runid ) ax2 = ax1.twinx() ax2.hist(df['energy_log10'], histtype = 'step',", "'XYZ': target = 'vertex' unit_tag = '(m)' if target == 'zenith': ymax =", "+ '_pred'] - cascade[target])/cascade[target])*100 elif target == 'zenith': residual_track = (track[target + '_pred']", "%s'%target) # Load data def make_plot(target, runids, save_dir, save_as_csv = False): colors =", "ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label =", "1), 'track'] = 1 add_these = [] for key in truth.columns: if key", "range(150): new_sample = rng.choice(residual, size = len(residual), replace = True) w.append(resolution_fn(new_sample)) return np.std(w)", "plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label = 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed',", "runid in runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv: data", "if target == 'energy': ax1.set_ylim((0,175)) ymax = 23. y_sep = 8 unit_tag =", "ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10) ax1.set_xlabel('Energy (log10 GeV)', size = 10)", "i in range(1,len(bins)): print(bins[i]) idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i]) data_sliced", "event_no, energy, interaction_type, pid from truth where event_no in %s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop", "y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) ax1.text(x_text, y_text - 2", "y_sep = 0.1 plt.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize", "'tau'}[abs(row['pid'])] return f\"{particle_type} CC\" else: return \"NC\" def resolution_fn(r): if len(r) > 1:", "16: 'tau'}[abs(row['pid'])] return f\"{particle_type} CC\" else: return \"NC\" def resolution_fn(r): if len(r) >", "numpy as np import pandas as pd import sqlite3 from sklearn.metrics import auc", "+ '_pred'] - track[target])/track[target])*100 residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100 elif target", "target in targets: if target != 'track': make_plot(target, runids, save_dir, save_as_csv) else: plot_roc(target,", "targets: if target != 'track': make_plot(target, runids, save_dir, save_as_csv) else: plot_roc(target, runids, save_dir,", "auc = get_roc_and_auc(data, target) plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification')", "va='top', fontsize = 8) plt.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses", "colspan = 6, rowspan= 6) for runid in runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database", "%s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5, alpha = 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']-", "== 'energy': residual_track = ((track[target + '_pred'] - track[target])/track[target])*100 residual_cascade = ((cascade[target +", "= pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True) truth['track'] = 0 truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] ==", "\"n_pulses > (%s, %s) selection applied during training\"%(10,20), va='top', fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\")", "Resolution %s'%(target.capitalize(), unit_tag), size = 10) ax1.set_xlabel('Energy (log10 GeV)', size = 10) x_text", "- cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def get_width(df, target): track_widths = []", "df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df = add_truth(df, database) df = add_energylog10(df) if", "Positive Rate', fontsize = 12) ymax = 0.3 x_text = 0.2 y_text =", "import numpy as np import pandas as pd import sqlite3 from sklearn.metrics import", "target == 'zenith': ymax = 10. y_sep = 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution", "ax1.set_xlabel('Energy (log10 GeV)', size = 10) x_text = 0.5 y_text = ymax -", "width = 3.176*2 height = 2.388*2 fig = plt.figure(figsize = (width,height)) for runid", "f\"{particle_type} CC\" else: return \"NC\" def resolution_fn(r): if len(r) > 1: return (np.percentile(r,", "= colors[runid], alpha = 0.8 ,label = 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue',", "[140021, 140022] targets = ['zenith', 'energy', 'track'] save_as_csv = True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv'", "True) with sqlite3.connect(database) as con: query = 'select event_no, energy, interaction_type, pid from", "True) energy.append(np.mean(data_sliced['energy_log10'])) track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error)", "if save_as_csv: data = add_truth(data, database) data = add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid, target))", "'width': track_widths, 'width_error': track_errors}) cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors}) return", "for line in leg.get_lines(): line.set_linewidth(4.0) if target == 'energy': ax1.set_ylim((0,175)) ymax = 23.", "target = 'vertex' unit_tag = '(m)' if target == 'zenith': ymax = 10.", "= 'black', alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid],", "ax2 = ax1.twinx() ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited energy', color =", "[] energy = [] bins = np.arange(0,3.1,0.1) if target in ['zenith', 'energy', 'XYZ']:", "- cascade[target])/cascade[target])*100 elif target == 'zenith': residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi))", "= '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in targets: if target != 'track': make_plot(target, runids, save_dir,", "get_roc_and_auc(data, target): fpr, tpr, _ = roc_curve(data[target], data[target+'_pred']) auc_score = auc(fpr,tpr) return fpr,tpr,auc_score", "pulses_cut_val = 20 if runid == 140021: pulses_cut_val = 10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop", "12) plt.xlabel('False Positive Rate', fontsize = 12) ymax = 0.3 x_text = 0.2", "6, rowspan= 6) for runid in runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid,", "'energy': ax1.set_ylim((0,175)) ymax = 23. y_sep = 8 unit_tag = '(%)' else: unit_tag", "ymax = 23. y_sep = 8 unit_tag = '(%)' else: unit_tag = '(deg.)'", "= True) df = add_truth(df, database) df = add_energylog10(df) if save_as_csv: df.to_csv(save_dir +", "sklearn.metrics import roc_curve def add_truth(data, database): data = data.sort_values('event_no').reset_index(drop = True) with sqlite3.connect(database)", "= 'Cascade %s'%runid ) ax2 = ax1.twinx() ax2.hist(df['energy_log10'], histtype = 'step', label =", "with sqlite3.connect(database) as con: query = 'select event_no, energy, interaction_type, pid from truth", "predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val = 20 if runid ==", "= add_truth(df, database) df = add_energylog10(df) if save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) plot_data_track,", "ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10) ax1.set_xlabel('Energy (log10 GeV)', size", "= '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val = 20 if runid == 140021:", "size = 20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False, fontsize =", "unit_tag = '(m)' if target == 'zenith': ymax = 10. y_sep = 2.3", "10. y_sep = 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10)", "cascade_width, track_error, cascade_error = calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean':", "= 8) ax1.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s, %s) selection", "2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10) ax1.set_xlabel('Energy (log10 GeV)',", "> (%s, %s) selection applied during training\"%(10,20), va='top', fontsize = 8) fig.suptitle(\"%s regression", "= truth[key] return data def get_interaction_type(row): if row[\"interaction_type\"] == 1: # CC particle_type", "Rate', fontsize = 12) ymax = 0.3 x_text = 0.2 y_text = ymax", "0.8 ,label = 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5, alpha", ",label = 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5, alpha =", "label =' %s : %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True Positive Rate', fontsize =", "= ymax - 0.05 y_sep = 0.1 plt.text(x_text, y_text - 0 * y_sep,", "= ((track[target + '_pred'] - track[target])/track[target])*100 residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100", "data[key] = truth[key] return data def get_interaction_type(row): if row[\"interaction_type\"] == 1: # CC", "return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def get_width(df, target): track_widths = [] cascade_widths =", "unit_tag = '(deg.)' if target == 'angular_res': target = 'direction' if target ==", "= 12) ymax = 0.3 x_text = 0.2 y_text = ymax - 0.05", "target == 'zenith': residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target", "resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def get_width(df, target): track_widths = [] cascade_widths = [] track_errors", "140022: 'tab:orange'} fig = plt.figure(constrained_layout = True) ax1 = plt.subplot2grid((6, 6), (0, 0),", "plt.ylabel('True Positive Rate', fontsize = 12) plt.xlabel('False Positive Rate', fontsize = 12) ymax", "runid in runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val = 20", "y_sep = 8 unit_tag = '(%)' else: unit_tag = '(deg.)' if target ==", "calculate_width(data_sliced, target) track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error':", "in targets: if target != 'track': make_plot(target, runids, save_dir, save_as_csv) else: plot_roc(target, runids,", "== 140021: pulses_cut_val = 10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df = add_truth(df,", "size = len(residual), replace = True) w.append(resolution_fn(new_sample)) return np.std(w) def get_roc_and_auc(data, target): fpr,", "= True) if target == 'energy': residual_track = ((track[target + '_pred'] - track[target])/track[target])*100", "lw = 0.5, alpha = 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color =", "track_widths.append(track_width) cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors}) cascade_plot_data", "== 140021: pulses_cut_val = 10 fpr, tpr, auc = get_roc_and_auc(data, target) plt.plot(fpr,tpr, label", "add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) pulses_cut_val = 20 if runid == 140021: pulses_cut_val", "applied during training\"%(10,20), va='top', fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced, target): track", "runids = [140021, 140022] targets = ['zenith', 'energy', 'track'] save_as_csv = True save_dir", "resolution_fn(r): if len(r) > 1: return (np.percentile(r, 84) - np.percentile(r, 16)) / 2.", "as pd import sqlite3 from sklearn.metrics import auc from sklearn.metrics import roc_curve def", "key in truth.columns: if key not in data.columns: add_these.append(key) for key in add_these:", "range(1,len(bins)): print(bins[i]) idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i]) data_sliced = df.loc[idx,", "truth['track'] = 0 truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1), 'track'] = 1", "20 if runid == 140021: pulses_cut_val = 10 fpr, tpr, auc = get_roc_and_auc(data,", "0.5, color = 'black', alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color", "= 8) fig.suptitle(\"%s regression Upgrade MC using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size = 12)", "= add_truth(data, database) data = add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) pulses_cut_val = 20", "= 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha = 0.8", "= 20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False, fontsize = 8)", "plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3, label = 'Cascade", "(%s, %s) selection applied during training\"%(10,20), va='top', fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def", "con: query = 'select event_no, energy, interaction_type, pid from truth where event_no in", "row[\"interaction_type\"] == 1: # CC particle_type = \"nu_\" + {12: 'e', 14: 'mu',", "%s : %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True Positive Rate', fontsize = 12) plt.xlabel('False", "ymax = 0.3 x_text = 0.2 y_text = ymax - 0.05 y_sep =", "target): fpr, tpr, _ = roc_curve(data[target], data[target+'_pred']) auc_score = auc(fpr,tpr) return fpr,tpr,auc_score def", "runids, save_dir, save_as_csv = False): width = 3.176*2 height = 2.388*2 fig =", "'width_error': cascade_errors}) return track_plot_data, cascade_plot_data else: print('target not supported: %s'%target) # Load data", "0.5, alpha = 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha", "[] cascade_errors = [] energy = [] bins = np.arange(0,3.1,0.1) if target in", "from sklearn.metrics import auc from sklearn.metrics import roc_curve def add_truth(data, database): data =", "= rng.choice(residual, size = len(residual), replace = True) w.append(resolution_fn(new_sample)) return np.std(w) def get_roc_and_auc(data,", "ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False, fontsize = 8) for line in leg.get_lines(): line.set_linewidth(4.0) if", "ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited energy', color = colors[runid]) #plt.title('$\\\\nu_{v,u,e}$', size", "plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3, label = 'Cascade %s'%runid ) ax2", "cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors}) cascade_plot_data = pd.DataFrame({'mean': energy,", "size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids = [140021, 140022] targets = ['zenith', 'energy',", "= 3.176*2 height = 2.388*2 fig = plt.figure(figsize = (width,height)) for runid in", "runids: predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val = 20 if runid", "get_error(residual): rng = np.random.default_rng(42) w = [] for i in range(150): new_sample =", "'_pred'] - cascade[target])/cascade[target])*100 elif target == 'zenith': residual_track = (track[target + '_pred'] -", "%s) selection applied during training\"%(10,20), va='top', fontsize = 8) fig.suptitle(\"%s regression Upgrade MC", "0.5 y_text = ymax - 2. ax1.text(x_text, y_text - 0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0],", "truth[key] return data def get_interaction_type(row): if row[\"interaction_type\"] == 1: # CC particle_type =", "= 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10) ax1.set_xlabel('Energy (log10", "= 10. y_sep = 2.3 ax1.set_ylim((0,45)) plt.tick_params(right=False,labelright=False) ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size =", "data[target+'_pred']) auc_score = auc(fpr,tpr) return fpr,tpr,auc_score def plot_roc(target, runids, save_dir, save_as_csv = False):", "== 1: # CC particle_type = \"nu_\" + {12: 'e', 14: 'mu', 16:", "= 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True)", "residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target + '_pred'] -", "= (track[target + '_pred'] - track[target]) residual_cascade = (cascade[target + '_pred'] - cascade[target])", "data = add_truth(data, database) data = add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) pulses_cut_val =", "in runids: data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv: data =", "fontsize = 8) ax1.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s, %s)", "return data def get_interaction_type(row): if row[\"interaction_type\"] == 1: # CC particle_type = \"nu_\"", "MC using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids = [140021,", "ax1.text(x_text, y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize =", "8) fig.suptitle(\"%s regression Upgrade MC using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\")", "16)) / 2. else: return np.nan def add_energylog10(df): df['energy_log10'] = np.log10(df['energy']) return df", "y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) ax1.text(x_text, y_text - 1 * y_sep,", "cascade[target])*(360/(2*np.pi)) else: residual_track = (track[target + '_pred'] - track[target]) residual_cascade = (cascade[target +", "auc_score = auc(fpr,tpr) return fpr,tpr,auc_score def plot_roc(target, runids, save_dir, save_as_csv = False): width", "energy, interaction_type, pid from truth where event_no in %s'%str(tuple(data['event_no'])) truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop =", "\"nu_\" + {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])] return f\"{particle_type} CC\" else: return", "if key not in data.columns: add_these.append(key) for key in add_these: data[key] = truth[key]", "140021: pulses_cut_val = 10 fpr, tpr, auc = get_roc_and_auc(data, target) plt.plot(fpr,tpr, label ='", "pd import sqlite3 from sklearn.metrics import auc from sklearn.metrics import roc_curve def add_truth(data,", "= pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)) database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) if save_as_csv: data = add_truth(data, database) data", "(np.percentile(r, 84) - np.percentile(r, 16)) / 2. else: return np.nan def add_energylog10(df): df['energy_log10']", "'_pred'] - track[target])/track[target])*100 residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100 elif target ==", "plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label = 'Track %s'%runid)", "= 10 fpr, tpr, auc = get_roc_and_auc(data, target) plt.plot(fpr,tpr, label =' %s :", "= pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df = add_truth(df, database) df = add_energylog10(df) if save_as_csv:", "cascade_errors}) return track_plot_data, cascade_plot_data else: print('target not supported: %s'%target) # Load data def", "= 'select event_no, energy, interaction_type, pid from truth where event_no in %s'%str(tuple(data['event_no'])) truth", "'/%s_%s.csv'%(runid, target)) pulses_cut_val = 20 if runid == 140021: pulses_cut_val = 10 fpr,", "#plt.title('$\\\\nu_{v,u,e}$', size = 20) ax1.tick_params(axis='x', labelsize=6) ax1.tick_params(axis='y', labelsize=6) ax1.set_xlim((0,3.1)) leg = ax1.legend(frameon=False, fontsize", "= 'direction' if target == 'XYZ': target = 'vertex' unit_tag = '(m)' if", "cascade_errors = [] energy = [] bins = np.arange(0,3.1,0.1) if target in ['zenith',", "= 20 if runid == 140021: pulses_cut_val = 10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop =", "Resolution'%target.capitalize(), size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return runids = [140021, 140022] targets = ['zenith',", "in data.columns: add_these.append(key) for key in add_these: data[key] = truth[key] return data def", "leg = ax1.legend(frameon=False, fontsize = 8) for line in leg.get_lines(): line.set_linewidth(4.0) if target", "plt.text(x_text, y_text - 2 * y_sep, \"n_pulses > (%s, %s) selection applied during", "plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3, label = 'Cascade %s'%runid", "if runid == 140021: pulses_cut_val = 10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True) df", "truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1), 'track'] = 1 add_these = []", "== 0,:].reset_index(drop = True) if target == 'energy': residual_track = ((track[target + '_pred']", "= get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black', alpha = 1)", "in range(1,len(bins)): print(bins[i]) idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i]) data_sliced =", "True save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv' for target in targets: if target != 'track': make_plot(target,", "= [] for key in truth.columns: if key not in data.columns: add_these.append(key) for", "= (cascade[target + '_pred'] - cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def get_width(df,", "= 'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5, alpha = 1)", "def resolution_fn(r): if len(r) > 1: return (np.percentile(r, 84) - np.percentile(r, 16)) /", "6), (0, 0), colspan = 6, rowspan= 6) for runid in runids: predictions_path", "True) truth['track'] = 0 truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1), 'track'] =", "\"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) plt.text(x_text, y_text - 1 * y_sep, \"Pulsemaps", "during training\"%(10,20), va='top', fontsize = 8) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches=\"tight\") return def calculate_width(data_sliced, target): track =data_sliced.loc[data_sliced['track']", "= 'tab:blue', lw = 0.5, alpha = 1) ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'],", "= 0.3 x_text = 0.2 y_text = ymax - 0.05 y_sep = 0.1", "data def get_interaction_type(row): if row[\"interaction_type\"] == 1: # CC particle_type = \"nu_\" +", "0 * y_sep, \"IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)\"%(runids[0], runids[1]), va='top', fontsize = 8) ax1.text(x_text, y_text - 1", "\", va='top', fontsize = 8) ax1.text(x_text, y_text - 2 * y_sep, \"n_pulses >", "= True) truth['track'] = 0 truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1), 'track']", "plt.xlabel('False Positive Rate', fontsize = 12) ymax = 0.3 x_text = 0.2 y_text", "x_text = 0.2 y_text = ymax - 0.05 y_sep = 0.1 plt.text(x_text, y_text", "target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black', alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] -", "/ 2. else: return np.nan def add_energylog10(df): df['energy_log10'] = np.log10(df['energy']) return df def", "* y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8) ax1.text(x_text, y_text -", "= 0.5, color = 'black', alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] +", "= add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) pulses_cut_val = 20 if runid == 140021:", "import pandas as pd import sqlite3 from sklearn.metrics import auc from sklearn.metrics import", "bins[i-1]) & (df['energy_log10'] < bins[i]) data_sliced = df.loc[idx, :].reset_index(drop = True) energy.append(np.mean(data_sliced['energy_log10'])) track_width,", "'black', alpha = 1) ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha", "20 if runid == 140021: pulses_cut_val = 10 df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True)", "- track[target])/track[target])*100 residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100 elif target == 'zenith':", "(cascade[target + '_pred'] - cascade[target]) return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade) def get_width(df, target):", "if row[\"interaction_type\"] == 1: # CC particle_type = \"nu_\" + {12: 'e', 14:", "cascade_widths.append(cascade_width) track_errors.append(track_error) cascade_errors.append(cascade_error) track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors}) cascade_plot_data =", "'Track %s'%runid) ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5, alpha = 1) ax1.fill_between(plot_data_cascade['mean'],", "color = colors[runid], alpha = 0.3, label = 'Cascade %s'%runid ) ax2 =", "2. else: return np.nan def add_energylog10(df): df['energy_log10'] = np.log10(df['energy']) return df def get_error(residual):", "database) data = add_energylog10(data) data.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) pulses_cut_val = 20 if runid", "= get_roc_and_auc(data, target) plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3))) plt.legend() plt.title('Track/Cascade Classification') plt.ylabel('True", "add_energylog10(df): df['energy_log10'] = np.log10(df['energy']) return df def get_error(residual): rng = np.random.default_rng(42) w =", "x_text = 0.5 y_text = ymax - 2. ax1.text(x_text, y_text - 0 *", "database) df = add_energylog10(df) if save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade =", "'energy', 'XYZ']: for i in range(1,len(bins)): print(bins[i]) idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10']", "add_truth(df, database) df = add_energylog10(df) if save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade", "add_truth(data, database): data = data.sort_values('event_no').reset_index(drop = True) with sqlite3.connect(database) as con: query =", "return (np.percentile(r, 84) - np.percentile(r, 16)) / 2. else: return np.nan def add_energylog10(df):", "y_text - 1 * y_sep, \"Pulsemaps used: SplitInIcePulses_GraphSage_Pulses \", va='top', fontsize = 8)", "fontsize = 12) plt.xlabel('False Positive Rate', fontsize = 12) ymax = 0.3 x_text", "= (track[target + '_pred'] - track[target])*(360/(2*np.pi)) residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi))", "'mu', 16: 'tau'}[abs(row['pid'])] return f\"{particle_type} CC\" else: return \"NC\" def resolution_fn(r): if len(r)", "fig.suptitle(\"%s regression Upgrade MC using GNN\"%target) #fig.suptitle('%s Resolution'%target.capitalize(), size = 12) fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches=\"tight\") return", "= (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi)) else: residual_track = (track[target + '_pred'] -", "if target == 'angular_res': target = 'direction' if target == 'XYZ': target =", "'direction' if target == 'XYZ': target = 'vertex' unit_tag = '(m)' if target", "= np.random.default_rng(42) w = [] for i in range(150): new_sample = rng.choice(residual, size", "= data.sort_values('event_no').reset_index(drop = True) with sqlite3.connect(database) as con: query = 'select event_no, energy,", "= ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100 elif target == 'zenith': residual_track = (track[target", "in leg.get_lines(): line.set_linewidth(4.0) if target == 'energy': ax1.set_ylim((0,175)) ymax = 23. y_sep =", "= '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid) pulses_cut_val = 20 if runid == 140021: pulses_cut_val = 10", "= add_energylog10(df) if save_as_csv: df.to_csv(save_dir + '/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade = get_width(df, target)", "target == 'angular_res': target = 'direction' if target == 'XYZ': target = 'vertex'", "'/%s_%s.csv'%(runid, target)) plot_data_track, plot_data_cascade = get_width(df, target) ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color =" ]
[ "if bboxes != []: for bbox in bboxes: img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2],", "= axs.flatten() for img, ax in zip(imglist, axs): ax.axis('off') ax.imshow(img) print(row) plt.show() imglist", "print(i, len(imglist)) _, axs = plt.subplots(7, 7, figsize=(32, 32)) axs = axs.flatten() for", "np.array(Image.open(img_name)) def get_bbox(annots): bboxes = [list(annot.values()) for annot in annots] return bboxes df", "len(imglist)) _, axs = plt.subplots(7, 7, figsize=(32, 32)) axs = axs.flatten() for img,", "ax.imshow(img) for bbox in bboxes: rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r',", "32)) axs = axs.flatten() for img, ax in zip(imglist, axs): ax.axis('off') ax.imshow(img) print(row)", "df.iterrows(): img = get_image(row[\"path\"]) bboxes = row[\"bboxes\"] if bboxes != []: for bbox", "Image import matplotlib.pyplot as plt import matplotlib.patches as patches import cv2 def plot_image_and_bboxes(img,", "(bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2) imglist.append(img) if (i+1) % 49 == 0:", "df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox) imglist = [] for i,", "def get_bbox(annots): bboxes = [list(annot.values()) for annot in annots] return bboxes df =", "= plt.subplots(7, 7, figsize=(32, 32)) axs = axs.flatten() for img, ax in zip(imglist,", "row in df.iterrows(): img = get_image(row[\"path\"]) bboxes = row[\"bboxes\"] if bboxes != []:", "plot_image_and_bboxes(img, bboxes): fig, ax = plt.subplots(1, figsize=(10, 8)) ax.axis('off') ax.imshow(img) for bbox in", "= pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox) imglist = []", "axs.flatten() for img, ax in zip(imglist, axs): ax.axis('off') ax.imshow(img) print(row) plt.show() imglist =", "7, figsize=(32, 32)) axs = axs.flatten() for img, ax in zip(imglist, axs): ax.axis('off')", "matplotlib.patches as patches import cv2 def plot_image_and_bboxes(img, bboxes): fig, ax = plt.subplots(1, figsize=(10,", "[list(annot.values()) for annot in annots] return bboxes df = pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda", "axs = plt.subplots(7, 7, figsize=(32, 32)) axs = axs.flatten() for img, ax in", "patches import cv2 def plot_image_and_bboxes(img, bboxes): fig, ax = plt.subplots(1, figsize=(10, 8)) ax.axis('off')", "in annots] return bboxes df = pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes']", "return bboxes df = pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox)", "get_image(row[\"path\"]) bboxes = row[\"bboxes\"] if bboxes != []: for bbox in bboxes: img", "bboxes): fig, ax = plt.subplots(1, figsize=(10, 8)) ax.axis('off') ax.imshow(img) for bbox in bboxes:", "== 0: print(i, len(imglist)) _, axs = plt.subplots(7, 7, figsize=(32, 32)) axs =", "numpy as np import pandas as pd import ast from PIL import Image", "imglist.append(img) if (i+1) % 49 == 0: print(i, len(imglist)) _, axs = plt.subplots(7,", "facecolor=\"none\") ax.add_patch(rect) plt.show() def get_image(img_name): return np.array(Image.open(img_name)) def get_bbox(annots): bboxes = [list(annot.values()) for", "!= []: for bbox in bboxes: img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255,", "bbox in bboxes: rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect)", "plt import matplotlib.patches as patches import cv2 def plot_image_and_bboxes(img, bboxes): fig, ax =", "ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox) imglist = [] for i, row in df.iterrows(): img", "import cv2 def plot_image_and_bboxes(img, bboxes): fig, ax = plt.subplots(1, figsize=(10, 8)) ax.axis('off') ax.imshow(img)", "matplotlib.pyplot as plt import matplotlib.patches as patches import cv2 def plot_image_and_bboxes(img, bboxes): fig,", "get_bbox(annots): bboxes = [list(annot.values()) for annot in annots] return bboxes df = pd.read_csv('./slices_df.csv')", "import matplotlib.patches as patches import cv2 def plot_image_and_bboxes(img, bboxes): fig, ax = plt.subplots(1,", "bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show() def get_image(img_name): return np.array(Image.open(img_name)) def", "img = get_image(row[\"path\"]) bboxes = row[\"bboxes\"] if bboxes != []: for bbox in", "plt.subplots(1, figsize=(10, 8)) ax.axis('off') ax.imshow(img) for bbox in bboxes: rect = patches.Rectangle((bbox[0], bbox[1]),", "0: print(i, len(imglist)) _, axs = plt.subplots(7, 7, figsize=(32, 32)) axs = axs.flatten()", "= [] for i, row in df.iterrows(): img = get_image(row[\"path\"]) bboxes = row[\"bboxes\"]", "import matplotlib.pyplot as plt import matplotlib.patches as patches import cv2 def plot_image_and_bboxes(img, bboxes):", "bboxes = row[\"bboxes\"] if bboxes != []: for bbox in bboxes: img =", "df.annotations.apply(get_bbox) imglist = [] for i, row in df.iterrows(): img = get_image(row[\"path\"]) bboxes", "import ast from PIL import Image import matplotlib.pyplot as plt import matplotlib.patches as", "def plot_image_and_bboxes(img, bboxes): fig, ax = plt.subplots(1, figsize=(10, 8)) ax.axis('off') ax.imshow(img) for bbox", "bboxes: rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show() def", "= [list(annot.values()) for annot in annots] return bboxes df = pd.read_csv('./slices_df.csv') df['annotations'] =", "bboxes df = pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox) imglist", "0), 2) imglist.append(img) if (i+1) % 49 == 0: print(i, len(imglist)) _, axs", "(bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2) imglist.append(img) if (i+1) % 49 ==", "pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox) imglist = [] for", "= cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2) imglist.append(img) if (i+1) %", "in bboxes: rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show()", "bbox[1]+bbox[3]), (255, 0, 0), 2) imglist.append(img) if (i+1) % 49 == 0: print(i,", "import numpy as np import pandas as pd import ast from PIL import", "ax.axis('off') ax.imshow(img) for bbox in bboxes: rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2,", "imglist = [] for i, row in df.iterrows(): img = get_image(row[\"path\"]) bboxes =", "i, row in df.iterrows(): img = get_image(row[\"path\"]) bboxes = row[\"bboxes\"] if bboxes !=", "df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox) imglist = [] for i, row in", "[]: for bbox in bboxes: img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0,", "<filename>tools/checkimg.py import numpy as np import pandas as pd import ast from PIL", "ax = plt.subplots(1, figsize=(10, 8)) ax.axis('off') ax.imshow(img) for bbox in bboxes: rect =", "def get_image(img_name): return np.array(Image.open(img_name)) def get_bbox(annots): bboxes = [list(annot.values()) for annot in annots]", "import Image import matplotlib.pyplot as plt import matplotlib.patches as patches import cv2 def", "= df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox) imglist = [] for i, row", "patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show() def get_image(img_name): return np.array(Image.open(img_name))", "df['bboxes'] = df.annotations.apply(get_bbox) imglist = [] for i, row in df.iterrows(): img =", "for annot in annots] return bboxes df = pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda x:", "x: ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox) imglist = [] for i, row in df.iterrows():", "= plt.subplots(1, figsize=(10, 8)) ax.axis('off') ax.imshow(img) for bbox in bboxes: rect = patches.Rectangle((bbox[0],", "% 49 == 0: print(i, len(imglist)) _, axs = plt.subplots(7, 7, figsize=(32, 32))", "if (i+1) % 49 == 0: print(i, len(imglist)) _, axs = plt.subplots(7, 7,", "ax.add_patch(rect) plt.show() def get_image(img_name): return np.array(Image.open(img_name)) def get_bbox(annots): bboxes = [list(annot.values()) for annot", "annots] return bboxes df = pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes'] =", "= df.annotations.apply(get_bbox) imglist = [] for i, row in df.iterrows(): img = get_image(row[\"path\"])", "as np import pandas as pd import ast from PIL import Image import", "np import pandas as pd import ast from PIL import Image import matplotlib.pyplot", "for i, row in df.iterrows(): img = get_image(row[\"path\"]) bboxes = row[\"bboxes\"] if bboxes", "(i+1) % 49 == 0: print(i, len(imglist)) _, axs = plt.subplots(7, 7, figsize=(32,", "as pd import ast from PIL import Image import matplotlib.pyplot as plt import", "pandas as pd import ast from PIL import Image import matplotlib.pyplot as plt", "for bbox in bboxes: rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\")", "as plt import matplotlib.patches as patches import cv2 def plot_image_and_bboxes(img, bboxes): fig, ax", "bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show() def get_image(img_name): return np.array(Image.open(img_name)) def get_bbox(annots):", "bboxes = [list(annot.values()) for annot in annots] return bboxes df = pd.read_csv('./slices_df.csv') df['annotations']", "edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show() def get_image(img_name): return np.array(Image.open(img_name)) def get_bbox(annots): bboxes = [list(annot.values())", "_, axs = plt.subplots(7, 7, figsize=(32, 32)) axs = axs.flatten() for img, ax", "for bbox in bboxes: img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0),", "in df.iterrows(): img = get_image(row[\"path\"]) bboxes = row[\"bboxes\"] if bboxes != []: for", "from PIL import Image import matplotlib.pyplot as plt import matplotlib.patches as patches import", "cv2 def plot_image_and_bboxes(img, bboxes): fig, ax = plt.subplots(1, figsize=(10, 8)) ax.axis('off') ax.imshow(img) for", "img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2) imglist.append(img) if (i+1)", "import pandas as pd import ast from PIL import Image import matplotlib.pyplot as", "[] for i, row in df.iterrows(): img = get_image(row[\"path\"]) bboxes = row[\"bboxes\"] if", "df = pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x)) df['bboxes'] = df.annotations.apply(get_bbox) imglist =", "rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show() def get_image(img_name):", "annot in annots] return bboxes df = pd.read_csv('./slices_df.csv') df['annotations'] = df['annotations'].apply(lambda x: ast.literal_eval(x))", "pd import ast from PIL import Image import matplotlib.pyplot as plt import matplotlib.patches", "figsize=(32, 32)) axs = axs.flatten() for img, ax in zip(imglist, axs): ax.axis('off') ax.imshow(img)", "return np.array(Image.open(img_name)) def get_bbox(annots): bboxes = [list(annot.values()) for annot in annots] return bboxes", "49 == 0: print(i, len(imglist)) _, axs = plt.subplots(7, 7, figsize=(32, 32)) axs", "0, 0), 2) imglist.append(img) if (i+1) % 49 == 0: print(i, len(imglist)) _,", "= row[\"bboxes\"] if bboxes != []: for bbox in bboxes: img = cv2.rectangle(img,", "8)) ax.axis('off') ax.imshow(img) for bbox in bboxes: rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3],", "row[\"bboxes\"] if bboxes != []: for bbox in bboxes: img = cv2.rectangle(img, (bbox[0],bbox[1]),", "= patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show() def get_image(img_name): return", "figsize=(10, 8)) ax.axis('off') ax.imshow(img) for bbox in bboxes: rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2],", "in bboxes: img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2) imglist.append(img)", "bbox[3], linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show() def get_image(img_name): return np.array(Image.open(img_name)) def get_bbox(annots): bboxes", "bboxes != []: for bbox in bboxes: img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]),", "linewidth=2, edgecolor='r', facecolor=\"none\") ax.add_patch(rect) plt.show() def get_image(img_name): return np.array(Image.open(img_name)) def get_bbox(annots): bboxes =", "(255, 0, 0), 2) imglist.append(img) if (i+1) % 49 == 0: print(i, len(imglist))", "2) imglist.append(img) if (i+1) % 49 == 0: print(i, len(imglist)) _, axs =", "PIL import Image import matplotlib.pyplot as plt import matplotlib.patches as patches import cv2", "get_image(img_name): return np.array(Image.open(img_name)) def get_bbox(annots): bboxes = [list(annot.values()) for annot in annots] return", "plt.show() def get_image(img_name): return np.array(Image.open(img_name)) def get_bbox(annots): bboxes = [list(annot.values()) for annot in", "plt.subplots(7, 7, figsize=(32, 32)) axs = axs.flatten() for img, ax in zip(imglist, axs):", "as patches import cv2 def plot_image_and_bboxes(img, bboxes): fig, ax = plt.subplots(1, figsize=(10, 8))", "bbox in bboxes: img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2)", "axs = axs.flatten() for img, ax in zip(imglist, axs): ax.axis('off') ax.imshow(img) print(row) plt.show()", "bboxes: img = cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2) imglist.append(img) if", "cv2.rectangle(img, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 2) imglist.append(img) if (i+1) % 49", "fig, ax = plt.subplots(1, figsize=(10, 8)) ax.axis('off') ax.imshow(img) for bbox in bboxes: rect", "for img, ax in zip(imglist, axs): ax.axis('off') ax.imshow(img) print(row) plt.show() imglist = []", "= get_image(row[\"path\"]) bboxes = row[\"bboxes\"] if bboxes != []: for bbox in bboxes:", "ast from PIL import Image import matplotlib.pyplot as plt import matplotlib.patches as patches" ]
[ "# Set transmiting position tx_pos = [0, 15, 0] # Set receiving position", "Tracer(OBJ_FILE_PATH) # Set transmiting position tx_pos = [0, 15, 0] # Set receiving", "Tracer # Scene File Path in obj OBJ_FILE_PATH = \"./assets/poznan.obj\" # Initialize Tracer", "tx_pos = [0, 15, 0] # Set receiving position rx_pos = [-30, 1.5,", "Path in obj OBJ_FILE_PATH = \"./assets/poznan.obj\" # Initialize Tracer my_tracer = Tracer(OBJ_FILE_PATH) #", "# Set receiving position rx_pos = [-30, 1.5, 45] # Return the traced", "import Tracer # Scene File Path in obj OBJ_FILE_PATH = \"./assets/poznan.obj\" # Initialize", "0] # Set receiving position rx_pos = [-30, 1.5, 45] # Return the", "<reponame>tamsri/murt from murt import Tracer # Scene File Path in obj OBJ_FILE_PATH =", "Set receiving position rx_pos = [-30, 1.5, 45] # Return the traced paths", "rx_pos = [-30, 1.5, 45] # Return the traced paths results = my_tracer.trace(tx_pos,", "Scene File Path in obj OBJ_FILE_PATH = \"./assets/poznan.obj\" # Initialize Tracer my_tracer =", "in obj OBJ_FILE_PATH = \"./assets/poznan.obj\" # Initialize Tracer my_tracer = Tracer(OBJ_FILE_PATH) # Set", "my_tracer = Tracer(OBJ_FILE_PATH) # Set transmiting position tx_pos = [0, 15, 0] #", "Tracer my_tracer = Tracer(OBJ_FILE_PATH) # Set transmiting position tx_pos = [0, 15, 0]", "# Initialize Tracer my_tracer = Tracer(OBJ_FILE_PATH) # Set transmiting position tx_pos = [0,", "= [-30, 1.5, 45] # Return the traced paths results = my_tracer.trace(tx_pos, rx_pos)", "OBJ_FILE_PATH = \"./assets/poznan.obj\" # Initialize Tracer my_tracer = Tracer(OBJ_FILE_PATH) # Set transmiting position", "15, 0] # Set receiving position rx_pos = [-30, 1.5, 45] # Return", "[-30, 1.5, 45] # Return the traced paths results = my_tracer.trace(tx_pos, rx_pos) print(results)", "# Scene File Path in obj OBJ_FILE_PATH = \"./assets/poznan.obj\" # Initialize Tracer my_tracer", "obj OBJ_FILE_PATH = \"./assets/poznan.obj\" # Initialize Tracer my_tracer = Tracer(OBJ_FILE_PATH) # Set transmiting", "= \"./assets/poznan.obj\" # Initialize Tracer my_tracer = Tracer(OBJ_FILE_PATH) # Set transmiting position tx_pos", "murt import Tracer # Scene File Path in obj OBJ_FILE_PATH = \"./assets/poznan.obj\" #", "position tx_pos = [0, 15, 0] # Set receiving position rx_pos = [-30,", "Initialize Tracer my_tracer = Tracer(OBJ_FILE_PATH) # Set transmiting position tx_pos = [0, 15,", "transmiting position tx_pos = [0, 15, 0] # Set receiving position rx_pos =", "receiving position rx_pos = [-30, 1.5, 45] # Return the traced paths results", "File Path in obj OBJ_FILE_PATH = \"./assets/poznan.obj\" # Initialize Tracer my_tracer = Tracer(OBJ_FILE_PATH)", "from murt import Tracer # Scene File Path in obj OBJ_FILE_PATH = \"./assets/poznan.obj\"", "[0, 15, 0] # Set receiving position rx_pos = [-30, 1.5, 45] #", "\"./assets/poznan.obj\" # Initialize Tracer my_tracer = Tracer(OBJ_FILE_PATH) # Set transmiting position tx_pos =", "Set transmiting position tx_pos = [0, 15, 0] # Set receiving position rx_pos", "= [0, 15, 0] # Set receiving position rx_pos = [-30, 1.5, 45]", "position rx_pos = [-30, 1.5, 45] # Return the traced paths results =", "= Tracer(OBJ_FILE_PATH) # Set transmiting position tx_pos = [0, 15, 0] # Set" ]
[ "(not altered - you often want to have used urllib.urlencode) When you use", "_fritz_sid) #print fetchurl data = urlfetch(fetchurl) jd = json.loads( data )[0]# [0]: assume", "time. The _fritz_sid keeps the login token so if you can keep the", "jd ) return jd if __name__ == '__main__': import pprint pprint.pprint( fritz_fetch() )", "returns the data at an URL - if return_reqresp==True, returns the request and", "dict): data=urllib.urlencode(data) req = urllib2.Request(url, data=data) if headers!=None: for k in headers: vv", "global _fritz_sid, _fritz_lastfetched, _fritz_lastdata td = time.time() - _fritz_lastfetched if td < 5.0", "return _fritz_lastdata try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data = urlfetch(fetchurl) except urllib2.HTTPError, e:", "- you often want to have used urllib.urlencode) When you use this parameter,", "timeout=60) if return_reqresp: return req,response else: data = response.read() return data except (socket.timeout),", "= urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m = re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0] def fritz_fetch(): \"", "= response.read() return data except (socket.timeout), e: if raise_as_none: sys.stderr.write( 'Timeout fetching %r\\n'%url", "challenge = m.groups()[0] m5h = hashlib.md5() hashstr = '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response =", "transfers. \"\"\" _fritz_sid = None _fritz_lastfetched = 0 _fritz_lastdata = None IP =", "return_reqresp=False): \"\"\" Returns: - if return_reqresp==False (default), returns the data at an URL", "if our last fetch was less than 5 seconds ago, we're not going", "you often want to have used urllib.urlencode) When you use this parameter, the", "fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge = m.groups()[0] m5h =", "this parameter, the request becomes a POST instead of the default GET (and", "'Timeout fetching %r\\n'%url ) return None else: raise except (socket.error, urllib2.URLError, httplib.HTTPException), e:", "urllib2.Request(url, data=data) if headers!=None: for k in headers: vv = headers[k] if type(vv)", "fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data = urlfetch(fetchurl) except urllib2.HTTPError, e: if e.code==403: #print", "fritz_fetch(): \" Fetches ul/dl graph data \" global _fritz_sid, _fritz_lastfetched, _fritz_lastdata td =", "= headers[k] if type(vv) in (list,tuple): for v in vv: req.add_header(k,v) else: #", "raise_as_none: In cases where you want to treat common connection failures as 'try", "we're not going to get a new answer return _fritz_lastdata try: fetchurl =", "data except (socket.timeout), e: if raise_as_none: sys.stderr.write( 'Timeout fetching %r\\n'%url ) return None", "td = time.time() - _fritz_lastfetched if td < 5.0 and _fritz_lastdata!=None: # if", "None _fritz_lastfetched = 0 _fritz_lastdata = None IP = '192.168.178.1' username = ''", "can keep the interpreter running you can get fster fetches CONSIDER: fetching things", "parameter, the request becomes a POST instead of the default GET (and seems", "httplib import json \"\"\" Being lazy with globals because you probably don't have", "lazy with globals because you probably don't have more than one in your", "raise_as_none: #print 'Networking problem, %s: %s'%(e.__class__, str(e)) return None else: raise def fritz_login():", "a POST instead of the default GET (and seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>)", "if e.code==403: #print \"Forbidden, tryin to log in for new SID\" _fritz_sid =", "catching \"\"\" try: if type(data) in (tuple, dict): data=urllib.urlencode(data) req = urllib2.Request(url, data=data)", "time.time() - _fritz_lastfetched if td < 5.0 and _fritz_lastdata!=None: # if our last", "again later', using True here can save a bunch of your own typing", "data = urlfetch(fetchurl) jd = json.loads( data )[0]# [0]: assume it's one main", "def fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge = m.groups()[0] m5h", "urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False): \"\"\" Returns: - if return_reqresp==False (default), returns the", "you use this parameter, the request becomes a POST instead of the default", "{'response':'%s-%s'%(challenge, response)}) m = re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0] def fritz_fetch(): \" Fetches ul/dl", "sys import urllib, urllib2, socket, httplib import json \"\"\" Being lazy with globals", "running you can get fster fetches CONSIDER: fetching things beyond transfers. \"\"\" _fritz_sid", "= hashlib.md5() hashstr = '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,),", "response = m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m = re.search('<SID>([0-9a-f]+)</SID>', data) return", "if you can keep the interpreter running you can get fster fetches CONSIDER:", "Returns: - if return_reqresp==False (default), returns the data at an URL - if", "e: if raise_as_none: sys.stderr.write( 'Timeout fetching %r\\n'%url ) return None else: raise except", "ago, we're not going to get a new answer return _fritz_lastdata try: fetchurl", "_fritz_sid) data = urlfetch(fetchurl) except urllib2.HTTPError, e: if e.code==403: #print \"Forbidden, tryin to", "have more than one in your LAN Note that login takes a little", "to have used urllib.urlencode) When you use this parameter, the request becomes a", "the default GET (and seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict of additional", "because you probably don't have more than one in your LAN Note that", "a sequence of tuples (will be encoded), - a string (not altered -", "here can save a bunch of your own typing in error catching \"\"\"", "unicode req.add_header(k,vv) response = urllib2.urlopen(req, timeout=60) if return_reqresp: return req,response else: data =", "don't have more than one in your LAN Note that login takes a", "GET (and seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict of additional headers (each", "m = re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0] def fritz_fetch(): \" Fetches ul/dl graph data", "headers: vv = headers[k] if type(vv) in (list,tuple): for v in vv: req.add_header(k,v)", "= None _fritz_lastfetched = 0 _fritz_lastdata = None IP = '192.168.178.1' username =", "return_reqresp==True, returns the request and response objects (can be useful for streams) data:", "The _fritz_sid keeps the login token so if you can keep the interpreter", "re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0] def fritz_fetch(): \" Fetches ul/dl graph data \" global", "0 _fritz_lastdata = None IP = '192.168.178.1' username = '' password = '<PASSWORD>'", "(will be encoded), - a string (not altered - you often want to", "of the default GET (and seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict of", "'192.168.178.1' username = '' password = '<PASSWORD>' def urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False):", "data=data) if headers!=None: for k in headers: vv = headers[k] if type(vv) in", "None IP = '192.168.178.1' username = '' password = '<PASSWORD>' def urlfetch(url, data=None,", "Being lazy with globals because you probably don't have more than one in", "hashlib, pprint, re, sys import urllib, urllib2, socket, httplib import json \"\"\" Being", "password = '<PASSWORD>' def urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False): \"\"\" Returns: - if", "you can get fster fetches CONSIDER: fetching things beyond transfers. \"\"\" _fritz_sid =", "5.0 and _fritz_lastdata!=None: # if our last fetch was less than 5 seconds", "else: raise except (socket.error, urllib2.URLError, httplib.HTTPException), e: if raise_as_none: #print 'Networking problem, %s:", "fster fetches CONSIDER: fetching things beyond transfers. \"\"\" _fritz_sid = None _fritz_lastfetched =", "altered - you often want to have used urllib.urlencode) When you use this", "_fritz_sid = fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl data = urlfetch(fetchurl) jd", "import urllib, urllib2, socket, httplib import json \"\"\" Being lazy with globals because", "login takes a little time. The _fritz_sid keeps the login token so if", "than 5 seconds ago, we're not going to get a new answer return", "response)}) m = re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0] def fritz_fetch(): \" Fetches ul/dl graph", "password) m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m = re.search('<SID>([0-9a-f]+)</SID>',", "_fritz_lastdata!=None: # if our last fetch was less than 5 seconds ago, we're", "save a bunch of your own typing in error catching \"\"\" try: if", "= urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge = m.groups()[0] m5h = hashlib.md5() hashstr", "type(vv) in (list,tuple): for v in vv: req.add_header(k,v) else: # assume single string.", "= m.groups()[0] m5h = hashlib.md5() hashstr = '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest()", "takes a little time. The _fritz_sid keeps the login token so if you", "(can be useful for streams) data: May be - a dict - a", "sequence of tuples (will be encoded), - a string (not altered - you", "headers=None, raise_as_none=False, return_reqresp=False): \"\"\" Returns: - if return_reqresp==False (default), returns the data at", "= re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0] def fritz_fetch(): \" Fetches ul/dl graph data \"", "(default), returns the data at an URL - if return_reqresp==True, returns the request", "using True here can save a bunch of your own typing in error", "return None else: raise def fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data)", "vv = headers[k] if type(vv) in (list,tuple): for v in vv: req.add_header(k,v) else:", "things beyond transfers. \"\"\" _fritz_sid = None _fritz_lastfetched = 0 _fritz_lastdata = None", "be - a dict - a sequence of tuples (will be encoded), -", "error catching \"\"\" try: if type(data) in (tuple, dict): data=urllib.urlencode(data) req = urllib2.Request(url,", "= 0 _fritz_lastdata = None IP = '192.168.178.1' username = '' password =", "force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict of additional headers (each is add_header()'d) raise_as_none: In", "and _fritz_lastdata!=None: # if our last fetch was less than 5 seconds ago,", "can save a bunch of your own typing in error catching \"\"\" try:", "for k in headers: vv = headers[k] if type(vv) in (list,tuple): for v", "TODO: consider unicode req.add_header(k,vv) response = urllib2.urlopen(req, timeout=60) if return_reqresp: return req,response else:", "less than 5 seconds ago, we're not going to get a new answer", "than one in your LAN Note that login takes a little time. The", "fetching things beyond transfers. \"\"\" _fritz_sid = None _fritz_lastfetched = 0 _fritz_lastdata =", "raise_as_none: sys.stderr.write( 'Timeout fetching %r\\n'%url ) return None else: raise except (socket.error, urllib2.URLError,", "raise except (socket.error, urllib2.URLError, httplib.HTTPException), e: if raise_as_none: #print 'Networking problem, %s: %s'%(e.__class__,", "'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data = urlfetch(fetchurl) except urllib2.HTTPError, e: if e.code==403: #print \"Forbidden, tryin", "< 5.0 and _fritz_lastdata!=None: # if our last fetch was less than 5", "= '' password = '<PASSWORD>' def urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False): \"\"\" Returns:", "(socket.timeout), e: if raise_as_none: sys.stderr.write( 'Timeout fetching %r\\n'%url ) return None else: raise", "used urllib.urlencode) When you use this parameter, the request becomes a POST instead", "# if our last fetch was less than 5 seconds ago, we're not", "#print fetchurl data = urlfetch(fetchurl) jd = json.loads( data )[0]# [0]: assume it's", "to treat common connection failures as 'try again later', using True here can", "that login takes a little time. The _fritz_sid keeps the login token so", "in (list,tuple): for v in vv: req.add_header(k,v) else: # assume single string. TODO:", "raise def fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge = m.groups()[0]", "one main interface _fritz_lastfetched = time.time() _fritz_lastdata = jd #pprint.pprint( jd ) return", "#print \"Forbidden, tryin to log in for new SID\" _fritz_sid = fritz_login() fetchurl", "m5h = hashlib.md5() hashstr = '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest() data =", "m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m = re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0] def", "you probably don't have more than one in your LAN Note that login", "try: if type(data) in (tuple, dict): data=urllib.urlencode(data) req = urllib2.Request(url, data=data) if headers!=None:", "an URL - if return_reqresp==True, returns the request and response objects (can be", "= urlfetch(fetchurl) except urllib2.HTTPError, e: if e.code==403: #print \"Forbidden, tryin to log in", "_fritz_lastdata = jd #pprint.pprint( jd ) return jd if __name__ == '__main__': import", "return req,response else: data = response.read() return data except (socket.timeout), e: if raise_as_none:", "for v in vv: req.add_header(k,v) else: # assume single string. TODO: consider unicode", "data) return m.groups()[0] def fritz_fetch(): \" Fetches ul/dl graph data \" global _fritz_sid,", "- if return_reqresp==True, returns the request and response objects (can be useful for", "last fetch was less than 5 seconds ago, we're not going to get", "the request becomes a POST instead of the default GET (and seems to", "headers: dict of additional headers (each is add_header()'d) raise_as_none: In cases where you", "LAN Note that login takes a little time. The _fritz_sid keeps the login", "get fster fetches CONSIDER: fetching things beyond transfers. \"\"\" _fritz_sid = None _fritz_lastfetched", "data: May be - a dict - a sequence of tuples (will be", "<gh_stars>0 import time, urllib, urllib2, hashlib, pprint, re, sys import urllib, urllib2, socket,", "type(data) in (tuple, dict): data=urllib.urlencode(data) req = urllib2.Request(url, data=data) if headers!=None: for k", "POST instead of the default GET (and seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers:", "= fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl data = urlfetch(fetchurl) jd =", "in vv: req.add_header(k,v) else: # assume single string. TODO: consider unicode req.add_header(k,vv) response", "td < 5.0 and _fritz_lastdata!=None: # if our last fetch was less than", "_fritz_lastdata td = time.time() - _fritz_lastfetched if td < 5.0 and _fritz_lastdata!=None: #", "json \"\"\" Being lazy with globals because you probably don't have more than", "dict of additional headers (each is add_header()'d) raise_as_none: In cases where you want", "IP = '192.168.178.1' username = '' password = '<PASSWORD>' def urlfetch(url, data=None, headers=None,", "req.add_header(k,v) else: # assume single string. TODO: consider unicode req.add_header(k,vv) response = urllib2.urlopen(req,", "re, sys import urllib, urllib2, socket, httplib import json \"\"\" Being lazy with", "main interface _fritz_lastfetched = time.time() _fritz_lastdata = jd #pprint.pprint( jd ) return jd", "= re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge = m.groups()[0] m5h = hashlib.md5() hashstr = '%s-%s'%(challenge, password)", "data) challenge = m.groups()[0] m5h = hashlib.md5() hashstr = '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response", "raise_as_none=False, return_reqresp=False): \"\"\" Returns: - if return_reqresp==False (default), returns the data at an", "urllib, urllib2, socket, httplib import json \"\"\" Being lazy with globals because you", "vv: req.add_header(k,v) else: # assume single string. TODO: consider unicode req.add_header(k,vv) response =", "if td < 5.0 and _fritz_lastdata!=None: # if our last fetch was less", "fetches CONSIDER: fetching things beyond transfers. \"\"\" _fritz_sid = None _fritz_lastfetched = 0", "fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl data = urlfetch(fetchurl) jd = json.loads(", "Note that login takes a little time. The _fritz_sid keeps the login token", "else: # assume single string. TODO: consider unicode req.add_header(k,vv) response = urllib2.urlopen(req, timeout=60)", "'try again later', using True here can save a bunch of your own", "string. TODO: consider unicode req.add_header(k,vv) response = urllib2.urlopen(req, timeout=60) if return_reqresp: return req,response", "urllib2.HTTPError, e: if e.code==403: #print \"Forbidden, tryin to log in for new SID\"", "\"\"\" Returns: - if return_reqresp==False (default), returns the data at an URL -", "jd #pprint.pprint( jd ) return jd if __name__ == '__main__': import pprint pprint.pprint(", "response objects (can be useful for streams) data: May be - a dict", "\"\"\" Being lazy with globals because you probably don't have more than one", "v in vv: req.add_header(k,v) else: # assume single string. TODO: consider unicode req.add_header(k,vv)", "encoded), - a string (not altered - you often want to have used", "often want to have used urllib.urlencode) When you use this parameter, the request", "= urllib2.urlopen(req, timeout=60) if return_reqresp: return req,response else: data = response.read() return data", "hashstr = '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)})", "re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge = m.groups()[0] m5h = hashlib.md5() hashstr = '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le'))", "\" global _fritz_sid, _fritz_lastfetched, _fritz_lastdata td = time.time() - _fritz_lastfetched if td <", "= None IP = '192.168.178.1' username = '' password = '<PASSWORD>' def urlfetch(url,", "common connection failures as 'try again later', using True here can save a", "= json.loads( data )[0]# [0]: assume it's one main interface _fritz_lastfetched = time.time()", "data = response.read() return data except (socket.timeout), e: if raise_as_none: sys.stderr.write( 'Timeout fetching", "(list,tuple): for v in vv: req.add_header(k,v) else: # assume single string. TODO: consider", "get a new answer return _fritz_lastdata try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data =", "urlfetch(fetchurl) except urllib2.HTTPError, e: if e.code==403: #print \"Forbidden, tryin to log in for", "returns the request and response objects (can be useful for streams) data: May", "our last fetch was less than 5 seconds ago, we're not going to", "SID\" _fritz_sid = fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl data = urlfetch(fetchurl)", "request and response objects (can be useful for streams) data: May be -", "fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl data = urlfetch(fetchurl) jd = json.loads( data", "m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge = m.groups()[0] m5h = hashlib.md5() hashstr = '%s-%s'%(challenge,", "None else: raise def fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge", "_fritz_sid keeps the login token so if you can keep the interpreter running", "be encoded), - a string (not altered - you often want to have", "e: if e.code==403: #print \"Forbidden, tryin to log in for new SID\" _fritz_sid", "seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict of additional headers (each is add_header()'d)", "the login token so if you can keep the interpreter running you can", "urllib, urllib2, hashlib, pprint, re, sys import urllib, urllib2, socket, httplib import json", "more than one in your LAN Note that login takes a little time.", "becomes a POST instead of the default GET (and seems to force <tt>Content-type:", "own typing in error catching \"\"\" try: if type(data) in (tuple, dict): data=urllib.urlencode(data)", "json.loads( data )[0]# [0]: assume it's one main interface _fritz_lastfetched = time.time() _fritz_lastdata", "request becomes a POST instead of the default GET (and seems to force", "additional headers (each is add_header()'d) raise_as_none: In cases where you want to treat", "%s: %s'%(e.__class__, str(e)) return None else: raise def fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,)) m", "req,response else: data = response.read() return data except (socket.timeout), e: if raise_as_none: sys.stderr.write(", "return data except (socket.timeout), e: if raise_as_none: sys.stderr.write( 'Timeout fetching %r\\n'%url ) return", "for new SID\" _fritz_sid = fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl data", "a bunch of your own typing in error catching \"\"\" try: if type(data)", "return_reqresp==False (default), returns the data at an URL - if return_reqresp==True, returns the", "data=urllib.urlencode(data) req = urllib2.Request(url, data=data) if headers!=None: for k in headers: vv =", "\"\"\" try: if type(data) in (tuple, dict): data=urllib.urlencode(data) req = urllib2.Request(url, data=data) if", "import json \"\"\" Being lazy with globals because you probably don't have more", "%s'%(e.__class__, str(e)) return None else: raise def fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,)) m =", "(each is add_header()'d) raise_as_none: In cases where you want to treat common connection", "use this parameter, the request becomes a POST instead of the default GET", "_fritz_lastfetched if td < 5.0 and _fritz_lastdata!=None: # if our last fetch was", "- if return_reqresp==False (default), returns the data at an URL - if return_reqresp==True,", "ul/dl graph data \" global _fritz_sid, _fritz_lastfetched, _fritz_lastdata td = time.time() - _fritz_lastfetched", "one in your LAN Note that login takes a little time. The _fritz_sid", "May be - a dict - a sequence of tuples (will be encoded),", "_fritz_lastfetched = time.time() _fritz_lastdata = jd #pprint.pprint( jd ) return jd if __name__", "of tuples (will be encoded), - a string (not altered - you often", "pprint, re, sys import urllib, urllib2, socket, httplib import json \"\"\" Being lazy", "problem, %s: %s'%(e.__class__, str(e)) return None else: raise def fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,))", "'%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m =", "if type(data) in (tuple, dict): data=urllib.urlencode(data) req = urllib2.Request(url, data=data) if headers!=None: for", "seconds ago, we're not going to get a new answer return _fritz_lastdata try:", "CONSIDER: fetching things beyond transfers. \"\"\" _fritz_sid = None _fritz_lastfetched = 0 _fritz_lastdata", "username = '' password = '<PASSWORD>' def urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False): \"\"\"", "fetching %r\\n'%url ) return None else: raise except (socket.error, urllib2.URLError, httplib.HTTPException), e: if", "- a dict - a sequence of tuples (will be encoded), - a", "<tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict of additional headers (each is add_header()'d) raise_as_none: In cases", "# assume single string. TODO: consider unicode req.add_header(k,vv) response = urllib2.urlopen(req, timeout=60) if", "assume it's one main interface _fritz_lastfetched = time.time() _fritz_lastdata = jd #pprint.pprint( jd", "treat common connection failures as 'try again later', using True here can save", ") return None else: raise except (socket.error, urllib2.URLError, httplib.HTTPException), e: if raise_as_none: #print", "instead of the default GET (and seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict", "the request and response objects (can be useful for streams) data: May be", "single string. TODO: consider unicode req.add_header(k,vv) response = urllib2.urlopen(req, timeout=60) if return_reqresp: return", "import time, urllib, urllib2, hashlib, pprint, re, sys import urllib, urllib2, socket, httplib", "dict - a sequence of tuples (will be encoded), - a string (not", "assume single string. TODO: consider unicode req.add_header(k,vv) response = urllib2.urlopen(req, timeout=60) if return_reqresp:", "token so if you can keep the interpreter running you can get fster", "graph data \" global _fritz_sid, _fritz_lastfetched, _fritz_lastdata td = time.time() - _fritz_lastfetched if", "response = urllib2.urlopen(req, timeout=60) if return_reqresp: return req,response else: data = response.read() return", "if headers!=None: for k in headers: vv = headers[k] if type(vv) in (list,tuple):", "k in headers: vv = headers[k] if type(vv) in (list,tuple): for v in", "'Networking problem, %s: %s'%(e.__class__, str(e)) return None else: raise def fritz_login(): data =", "going to get a new answer return _fritz_lastdata try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid)", "m.groups()[0] def fritz_fetch(): \" Fetches ul/dl graph data \" global _fritz_sid, _fritz_lastfetched, _fritz_lastdata", "consider unicode req.add_header(k,vv) response = urllib2.urlopen(req, timeout=60) if return_reqresp: return req,response else: data", "globals because you probably don't have more than one in your LAN Note", "want to have used urllib.urlencode) When you use this parameter, the request becomes", "was less than 5 seconds ago, we're not going to get a new", "connection failures as 'try again later', using True here can save a bunch", "'' password = '<PASSWORD>' def urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False): \"\"\" Returns: -", "so if you can keep the interpreter running you can get fster fetches", "jd = json.loads( data )[0]# [0]: assume it's one main interface _fritz_lastfetched =", "return_reqresp: return req,response else: data = response.read() return data except (socket.timeout), e: if", "= time.time() _fritz_lastdata = jd #pprint.pprint( jd ) return jd if __name__ ==", "application/x-www-form-urlencoded</tt>) headers: dict of additional headers (each is add_header()'d) raise_as_none: In cases where", "socket, httplib import json \"\"\" Being lazy with globals because you probably don't", "cases where you want to treat common connection failures as 'try again later',", "streams) data: May be - a dict - a sequence of tuples (will", "_fritz_lastfetched = 0 _fritz_lastdata = None IP = '192.168.178.1' username = '' password", "tuples (will be encoded), - a string (not altered - you often want", "keep the interpreter running you can get fster fetches CONSIDER: fetching things beyond", "in headers: vv = headers[k] if type(vv) in (list,tuple): for v in vv:", "= time.time() - _fritz_lastfetched if td < 5.0 and _fritz_lastdata!=None: # if our", "return m.groups()[0] def fritz_fetch(): \" Fetches ul/dl graph data \" global _fritz_sid, _fritz_lastfetched,", "useful for streams) data: May be - a dict - a sequence of", "typing in error catching \"\"\" try: if type(data) in (tuple, dict): data=urllib.urlencode(data) req", "log in for new SID\" _fritz_sid = fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print", "the interpreter running you can get fster fetches CONSIDER: fetching things beyond transfers.", "return None else: raise except (socket.error, urllib2.URLError, httplib.HTTPException), e: if raise_as_none: #print 'Networking", "5 seconds ago, we're not going to get a new answer return _fritz_lastdata", "to log in for new SID\" _fritz_sid = fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid)", "urllib2, socket, httplib import json \"\"\" Being lazy with globals because you probably", "except (socket.error, urllib2.URLError, httplib.HTTPException), e: if raise_as_none: #print 'Networking problem, %s: %s'%(e.__class__, str(e))", "Fetches ul/dl graph data \" global _fritz_sid, _fritz_lastfetched, _fritz_lastdata td = time.time() -", "with globals because you probably don't have more than one in your LAN", "be useful for streams) data: May be - a dict - a sequence", "later', using True here can save a bunch of your own typing in", "fetch was less than 5 seconds ago, we're not going to get a", "urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge = m.groups()[0] m5h = hashlib.md5() hashstr =", "failures as 'try again later', using True here can save a bunch of", "req.add_header(k,vv) response = urllib2.urlopen(req, timeout=60) if return_reqresp: return req,response else: data = response.read()", "fetchurl data = urlfetch(fetchurl) jd = json.loads( data )[0]# [0]: assume it's one", "where you want to treat common connection failures as 'try again later', using", "probably don't have more than one in your LAN Note that login takes", "= m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m = re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0]", "req = urllib2.Request(url, data=data) if headers!=None: for k in headers: vv = headers[k]", "your LAN Note that login takes a little time. The _fritz_sid keeps the", "def urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False): \"\"\" Returns: - if return_reqresp==False (default), returns", "= '<PASSWORD>' def urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False): \"\"\" Returns: - if return_reqresp==False", "urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m = re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0] def fritz_fetch(): \" Fetches", "sys.stderr.write( 'Timeout fetching %r\\n'%url ) return None else: raise except (socket.error, urllib2.URLError, httplib.HTTPException),", "as 'try again later', using True here can save a bunch of your", "not going to get a new answer return _fritz_lastdata try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP,", "answer return _fritz_lastdata try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data = urlfetch(fetchurl) except urllib2.HTTPError,", "(and seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict of additional headers (each is", "default GET (and seems to force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict of additional headers", "your own typing in error catching \"\"\" try: if type(data) in (tuple, dict):", "a new answer return _fritz_lastdata try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data = urlfetch(fetchurl)", "beyond transfers. \"\"\" _fritz_sid = None _fritz_lastfetched = 0 _fritz_lastdata = None IP", "urlfetch(fetchurl) jd = json.loads( data )[0]# [0]: assume it's one main interface _fritz_lastfetched", "data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m = re.search('<SID>([0-9a-f]+)</SID>', data) return m.groups()[0] def fritz_fetch():", "_fritz_sid = None _fritz_lastfetched = 0 _fritz_lastdata = None IP = '192.168.178.1' username", "if return_reqresp: return req,response else: data = response.read() return data except (socket.timeout), e:", "you want to treat common connection failures as 'try again later', using True", "new answer return _fritz_lastdata try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data = urlfetch(fetchurl) except", "to get a new answer return _fritz_lastdata try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data", "_fritz_lastdata = None IP = '192.168.178.1' username = '' password = '<PASSWORD>' def", ")[0]# [0]: assume it's one main interface _fritz_lastfetched = time.time() _fritz_lastdata = jd", "data = urlfetch(fetchurl) except urllib2.HTTPError, e: if e.code==403: #print \"Forbidden, tryin to log", "data at an URL - if return_reqresp==True, returns the request and response objects", "e: if raise_as_none: #print 'Networking problem, %s: %s'%(e.__class__, str(e)) return None else: raise", "_fritz_lastdata try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data = urlfetch(fetchurl) except urllib2.HTTPError, e: if", "if raise_as_none: #print 'Networking problem, %s: %s'%(e.__class__, str(e)) return None else: raise def", "= urllib2.Request(url, data=data) if headers!=None: for k in headers: vv = headers[k] if", "def fritz_fetch(): \" Fetches ul/dl graph data \" global _fritz_sid, _fritz_lastfetched, _fritz_lastdata td", "httplib.HTTPException), e: if raise_as_none: #print 'Networking problem, %s: %s'%(e.__class__, str(e)) return None else:", "in for new SID\" _fritz_sid = fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl", "%r\\n'%url ) return None else: raise except (socket.error, urllib2.URLError, httplib.HTTPException), e: if raise_as_none:", "#print 'Networking problem, %s: %s'%(e.__class__, str(e)) return None else: raise def fritz_login(): data", "data=None, headers=None, raise_as_none=False, return_reqresp=False): \"\"\" Returns: - if return_reqresp==False (default), returns the data", "the data at an URL - if return_reqresp==True, returns the request and response", "True here can save a bunch of your own typing in error catching", "data = urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge = m.groups()[0] m5h = hashlib.md5()", "little time. The _fritz_sid keeps the login token so if you can keep", "- _fritz_lastfetched if td < 5.0 and _fritz_lastdata!=None: # if our last fetch", "(socket.error, urllib2.URLError, httplib.HTTPException), e: if raise_as_none: #print 'Networking problem, %s: %s'%(e.__class__, str(e)) return", "headers[k] if type(vv) in (list,tuple): for v in vv: req.add_header(k,v) else: # assume", "m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m = re.search('<SID>([0-9a-f]+)</SID>', data)", "if raise_as_none: sys.stderr.write( 'Timeout fetching %r\\n'%url ) return None else: raise except (socket.error,", "headers!=None: for k in headers: vv = headers[k] if type(vv) in (list,tuple): for", "and response objects (can be useful for streams) data: May be - a", "= jd #pprint.pprint( jd ) return jd if __name__ == '__main__': import pprint", "at an URL - if return_reqresp==True, returns the request and response objects (can", "'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl data = urlfetch(fetchurl) jd = json.loads( data )[0]# [0]:", "time, urllib, urllib2, hashlib, pprint, re, sys import urllib, urllib2, socket, httplib import", "In cases where you want to treat common connection failures as 'try again", "a dict - a sequence of tuples (will be encoded), - a string", "can get fster fetches CONSIDER: fetching things beyond transfers. \"\"\" _fritz_sid = None", "None else: raise except (socket.error, urllib2.URLError, httplib.HTTPException), e: if raise_as_none: #print 'Networking problem,", "urllib2.urlopen(req, timeout=60) if return_reqresp: return req,response else: data = response.read() return data except", "_fritz_sid, _fritz_lastfetched, _fritz_lastdata td = time.time() - _fritz_lastfetched if td < 5.0 and", "- a string (not altered - you often want to have used urllib.urlencode)", "'<PASSWORD>' def urlfetch(url, data=None, headers=None, raise_as_none=False, return_reqresp=False): \"\"\" Returns: - if return_reqresp==False (default),", "\" Fetches ul/dl graph data \" global _fritz_sid, _fritz_lastfetched, _fritz_lastdata td = time.time()", "a little time. The _fritz_sid keeps the login token so if you can", "to force <tt>Content-type: application/x-www-form-urlencoded</tt>) headers: dict of additional headers (each is add_header()'d) raise_as_none:", "- a sequence of tuples (will be encoded), - a string (not altered", "if type(vv) in (list,tuple): for v in vv: req.add_header(k,v) else: # assume single", "e.code==403: #print \"Forbidden, tryin to log in for new SID\" _fritz_sid = fritz_login()", "\"\"\" _fritz_sid = None _fritz_lastfetched = 0 _fritz_lastdata = None IP = '192.168.178.1'", "_fritz_lastfetched, _fritz_lastdata td = time.time() - _fritz_lastfetched if td < 5.0 and _fritz_lastdata!=None:", "urllib2.URLError, httplib.HTTPException), e: if raise_as_none: #print 'Networking problem, %s: %s'%(e.__class__, str(e)) return None", "data )[0]# [0]: assume it's one main interface _fritz_lastfetched = time.time() _fritz_lastdata =", "response.read() return data except (socket.timeout), e: if raise_as_none: sys.stderr.write( 'Timeout fetching %r\\n'%url )", "try: fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data = urlfetch(fetchurl) except urllib2.HTTPError, e: if e.code==403:", "except urllib2.HTTPError, e: if e.code==403: #print \"Forbidden, tryin to log in for new", "\"Forbidden, tryin to log in for new SID\" _fritz_sid = fritz_login() fetchurl =", "in error catching \"\"\" try: if type(data) in (tuple, dict): data=urllib.urlencode(data) req =", "time.time() _fritz_lastdata = jd #pprint.pprint( jd ) return jd if __name__ == '__main__':", "new SID\" _fritz_sid = fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl data =", "interpreter running you can get fster fetches CONSIDER: fetching things beyond transfers. \"\"\"", "When you use this parameter, the request becomes a POST instead of the", "else: data = response.read() return data except (socket.timeout), e: if raise_as_none: sys.stderr.write( 'Timeout", "in your LAN Note that login takes a little time. The _fritz_sid keeps", "a string (not altered - you often want to have used urllib.urlencode) When", "= 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) #print fetchurl data = urlfetch(fetchurl) jd = json.loads( data )[0]#", "URL - if return_reqresp==True, returns the request and response objects (can be useful", "objects (can be useful for streams) data: May be - a dict -", "else: raise def fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>', data) challenge =", "data \" global _fritz_sid, _fritz_lastfetched, _fritz_lastdata td = time.time() - _fritz_lastfetched if td", "urllib.urlencode) When you use this parameter, the request becomes a POST instead of", "have used urllib.urlencode) When you use this parameter, the request becomes a POST", "= 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP, _fritz_sid) data = urlfetch(fetchurl) except urllib2.HTTPError, e: if e.code==403: #print \"Forbidden,", "for streams) data: May be - a dict - a sequence of tuples", "is add_header()'d) raise_as_none: In cases where you want to treat common connection failures", "except (socket.timeout), e: if raise_as_none: sys.stderr.write( 'Timeout fetching %r\\n'%url ) return None else:", "you can keep the interpreter running you can get fster fetches CONSIDER: fetching", "of additional headers (each is add_header()'d) raise_as_none: In cases where you want to", "bunch of your own typing in error catching \"\"\" try: if type(data) in", "of your own typing in error catching \"\"\" try: if type(data) in (tuple,", "[0]: assume it's one main interface _fritz_lastfetched = time.time() _fritz_lastdata = jd #pprint.pprint(", "hashlib.md5() hashstr = '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge,", "urllib2, hashlib, pprint, re, sys import urllib, urllib2, socket, httplib import json \"\"\"", "headers (each is add_header()'d) raise_as_none: In cases where you want to treat common", "(tuple, dict): data=urllib.urlencode(data) req = urllib2.Request(url, data=data) if headers!=None: for k in headers:", "= urlfetch(fetchurl) jd = json.loads( data )[0]# [0]: assume it's one main interface", "tryin to log in for new SID\" _fritz_sid = fritz_login() fetchurl = 'http://%s/internet/inetstat_monitor.lua?sid=%s&myXhr=1&action=get_graphic&useajax=1&xhr=1'%(IP,", "add_header()'d) raise_as_none: In cases where you want to treat common connection failures as", "want to treat common connection failures as 'try again later', using True here", "= '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest() data = urlfetch('http://%s/login_sid.lua'%(IP,), {'response':'%s-%s'%(challenge, response)}) m", "m.groups()[0] m5h = hashlib.md5() hashstr = '%s-%s'%(challenge, password) m5h.update(hashstr.encode('utf_16_le')) response = m5h.hexdigest() data", "it's one main interface _fritz_lastfetched = time.time() _fritz_lastdata = jd #pprint.pprint( jd )", "if return_reqresp==False (default), returns the data at an URL - if return_reqresp==True, returns", "str(e)) return None else: raise def fritz_login(): data = urlfetch('http://%s/login_sid.lua'%(IP,)) m = re.search('<Challenge>([0-9a-f]+)</Challenge>',", "login token so if you can keep the interpreter running you can get", "if return_reqresp==True, returns the request and response objects (can be useful for streams)", "string (not altered - you often want to have used urllib.urlencode) When you", "#pprint.pprint( jd ) return jd if __name__ == '__main__': import pprint pprint.pprint( fritz_fetch()", "interface _fritz_lastfetched = time.time() _fritz_lastdata = jd #pprint.pprint( jd ) return jd if", "= '192.168.178.1' username = '' password = '<PASSWORD>' def urlfetch(url, data=None, headers=None, raise_as_none=False,", "keeps the login token so if you can keep the interpreter running you", "in (tuple, dict): data=urllib.urlencode(data) req = urllib2.Request(url, data=data) if headers!=None: for k in" ]
[ "0042 0043 0044 ABCD 4 0045 0046 0047 0048 EFGH 8 0049 004A", "TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent=' ', subsequent_indent=' ').fill DESCRIPTION = '\\n\\n'.join([", "dest='linelength', metavar='LENGTH', help=_( 'format output using this much input characters. ' 'Default is", "-n 1 0 0001 .''', TW(_('* Finally learn what your favorite Emoji is", "the current version of Python’s unicodedata doesn’t know of ' 'this character yet.')),", "formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*', metavar='FILE', default=('-',), help=_( 'input files. Use “-” or keep", "# pylint: disable=unused-import from typing import List, IO, Any # pylint: enable=unused-import from", "character) is replaced with a dot here, ' 'because the current version of", "EFGH 8 0049 004A 004B 004C IJKL 12 004D 004E 004F 0050 MNOP''',", "\\\\ unidump -n 5 0 1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/>", "and whitespace are safely rendered:')), ''' echo -n -e '\\\\x01' | unidump -n", "command analyses the ' 'input and then prints three columns: the raw byte", "TW(_('This will replace every unknown byte from the input file with “X” '", "0046 0047 0048 EFGH 8 0049 004A 004B 004C IJKL 12 004D 004E", "rendered:')), ''' echo -n -e '\\\\x01' | unidump -n 1 0 0001 .''',", "unidump( infile, env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat, output=sys.stdout)) except KeyboardInterrupt: sys.stdout.flush() return 1 else:", "you pipe it through `wc -l`.')), TW(_('This is version {} of unidump, using", "in a stream of code points in hex notation, each on a '", "TW(_('* Basic usage with stdin:')), ''' echo -n 'ABCDEFGHIJKLMNOP' | unidump -n 4", "import List, IO, Any # pylint: enable=unused-import from unicodedata import unidata_version from unidump", "with “.”.')), TW(_('* Only print the code points of the input:')), ''' unidump", "byte counter or rendering of actual data. You can ' 'use this to", "the hex ' 'value enclosed in question marks, e.g., “?F5?”.')), TW(_('You can pipe", "= parser.parse_args(args) try: for filename in options.files: infile = None # type: IO[bytes]", "“X” and with the hex ' 'value enclosed in question marks, e.g., “?F5?”.')),", "or rendering of actual data. You can ' 'use this to count the", "at once, or ' 'even mix all those input methods together.')), ]) EPILOG", "action='version', version=_('%(prog)s {} using Unicode {} data').format( VERSION, unidata_version)) options = parser.parse_args(args) try:", "to be UTF-8 encoded, disregarding locale Do not type-check this: error: Incompatible types", "unicodedata doesn’t know of ' 'this character yet.')), TW(_('* Use it like strings(1):')),", "\"StreamWriter\", variable has type \"TextIO\") error: \"TextIO\" has no attribute \"detach\" \\\\o/ \"\"\"", "whitespace are safely rendered:')), ''' echo -n -e '\\\\x01' | unidump -n 1", "here, ' 'because the current version of Python’s unicodedata doesn’t know of '", "code point in this row, code points in their hex notation, ' 'and", "to raw ' 'bytes) in a file, if you pipe it through `wc", "without byte counter or rendering of actual data. You can ' 'use this", "'%(default)s. You can choose any encoding that ' 'Python supports, e.g. “latin-1”.')) parser.add_argument('-e',", "typing import List, IO, Any # pylint: enable=unused-import from unicodedata import unidata_version from", "0049 004A 004B 004C IJKL 12 004D 004E 004F 0050 MNOP''', TW(_('* Dump", "' 'because the current version of Python’s unicodedata doesn’t know of ' 'this", "translated from another encoding:')), ' unidump -c latin-1 some-legacy-file', TW(_('* Dump many files", "unidump -e \\'{data}\\' some-file.bin', TW(_('This will replace every unknown byte from the input", "200D 2642 FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. ' 'The “elf” emoji (the", "once, or ' 'even mix all those input methods together.')), ]) EPILOG =", "no attribute \"detach\" \\\\o/ \"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str] = None)", "unidump -c latin-1 some-legacy-file', TW(_('* Dump many files at the same time:')), '", "import argparse import codecs import gettext from os.path import dirname from shutil import", "'--length', type=int, default=16, dest='linelength', metavar='LENGTH', help=_( 'format output using this much input characters.", "keep empty for stdin.')) parser.add_argument('-n', '--length', type=int, default=16, dest='linelength', metavar='LENGTH', help=_( 'format output", "with the hex ' 'value enclosed in question marks, e.g., “?F5?”.')), TW(_('You can", "5 0 1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. '", "line, without byte counter or rendering of actual data. You can ' 'use", "TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _ = TL.gettext TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')),", "type-check this: error: Incompatible types in assignment (expression has type \"StreamWriter\", variable has", "unidump call \"\"\" import argparse import codecs import gettext from os.path import dirname", "get_terminal_size import sys from textwrap import TextWrapper # pylint: disable=unused-import from typing import", "List, IO, Any # pylint: enable=unused-import from unicodedata import unidata_version from unidump import", "input in this encoding. Default is ' '%(default)s. You can choose any encoding", "code points in their hex notation, ' 'and finally the raw input characters", "in question marks, e.g., “?F5?”.')), TW(_('You can pipe in data from stdin, select", "= gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _ = TL.gettext TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True,", "this encoding. Default is ' '%(default)s. You can choose any encoding that '", "Default is ' '%(default)s. You can choose any encoding that ' 'Python supports,", "using Unicode {} data.') .format(VERSION, unidata_version)).lstrip() + '\\n' ]) def force_stdout_to_utf8(): \"\"\"force stdout", "'The “elf” emoji (the first character) is replaced with a dot here, '", "enable=unused-import from unicodedata import unidata_version from unidump import VERSION, unidump from unidump.env import", "\"\"\"entry-point for an unidump CLI call\"\"\" force_stdout_to_utf8() if args is None: args =", "Incompatible types in assignment (expression has type \"StreamWriter\", variable has type \"TextIO\") error:", "many files at the same time:')), ' unidump foo-*.txt', TW(_('* Control characters and", ".format(filename)) continue unidump( infile, env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat, output=sys.stdout)) except KeyboardInterrupt: sys.stdout.flush() return", "004C IJKL 12 004D 004E 004F 0050 MNOP''', TW(_('* Dump the code points", "unidump from unidump.env import Env TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _ = TL.gettext", "dump.')), TW(_('Think of it as hexdump(1) for Unicode. The command analyses the '", "version {} of unidump, using Unicode {} data.') .format(VERSION, unidata_version)).lstrip() + '\\n' ])", "\"\"\" handle the CLI logic for a unidump call \"\"\" import argparse import", "some-legacy-file', TW(_('* Dump many files at the same time:')), ' unidump foo-*.txt', TW(_('*", "in a file, if you pipe it through `wc -l`.')), TW(_('This is version", "IO, Any # pylint: enable=unused-import from unicodedata import unidata_version from unidump import VERSION,", "not type-check this: error: Incompatible types in assignment (expression has type \"StreamWriter\", variable", "input characters. ' 'Default is %(default)s characters.')) parser.add_argument('-c', '--encoding', type=str, default='utf-8', metavar='ENC', help=_(", "parser.add_argument('files', nargs='*', metavar='FILE', default=('-',), help=_( 'input files. Use “-” or keep empty for", "sys.stdin.buffer else: try: infile = open(filename, 'rb') except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {} not", "Unicode. The command analyses the ' 'input and then prints three columns: the", "You can choose any encoding that ' 'Python supports, e.g. “latin-1”.')) parser.add_argument('-e', '--format',", "sys.stdout.flush() sys.stderr.write(_('File {} not found.\\n') .format(filename)) continue except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is a", "three columns: the raw byte index of the ' 'first code point in", "has no attribute \"detach\" \\\\o/ \"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str] =", "description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*', metavar='FILE', default=('-',), help=_( 'input files. Use “-”", "if filename == '-': infile = sys.stdin.buffer else: try: infile = open(filename, 'rb')", "pylint: enable=unused-import from unicodedata import unidata_version from unidump import VERSION, unidump from unidump.env", "and then prints three columns: the raw byte index of the ' 'first", "\"TextIO\" has no attribute \"detach\" \\\\o/ \"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str]", "file, if you pipe it through `wc -l`.')), TW(_('This is version {} of", "those input methods together.')), ]) EPILOG = '\\n\\n'.join([ _('Examples:'), TW(_('* Basic usage with", "on a ' 'new line, without byte counter or rendering of actual data.", "can choose any encoding that ' 'Python supports, e.g. “latin-1”.')) parser.add_argument('-e', '--format', type=str,", "columns: the raw byte index of the ' 'first code point in this", "' 'even mix all those input methods together.')), ]) EPILOG = '\\n\\n'.join([ _('Examples:'),", "examples below on how to use this option.' ) % Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v',", "how to use this option.' ) % Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version', action='version', version=_('%(prog)s", "it as hexdump(1) for Unicode. The command analyses the ' 'input and then", "-e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) | \\\\ unidump", "default='utf-8', metavar='ENC', help=_( 'interpret input in this encoding. Default is ' '%(default)s. You", "“-” or keep empty for stdin.')) parser.add_argument('-n', '--length', type=int, default=16, dest='linelength', metavar='LENGTH', help=_(", "' 'Default is “%s”. ' 'See examples below on how to use this", "1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. ' 'The “elf”", "List[str] = None) -> int: \"\"\"entry-point for an unidump CLI call\"\"\" force_stdout_to_utf8() if", "parser.add_argument('-c', '--encoding', type=str, default='utf-8', metavar='ENC', help=_( 'interpret input in this encoding. Default is", "''' echo -n -e '\\\\x01' | unidump -n 1 0 0001 .''', TW(_('*", "'first code point in this row, code points in their hex notation, '", "import TextWrapper # pylint: disable=unused-import from typing import List, IO, Any # pylint:", "filename in options.files: infile = None # type: IO[bytes] if filename == '-':", "getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent=' ', subsequent_indent=' ').fill DESCRIPTION = '\\n\\n'.join([ TW(_('A Unicode code", ".''', TW(_('* Finally learn what your favorite Emoji is composed of:')), ''' (", ".🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. ' 'The “elf” emoji (the first character) is", "infile = sys.stdin.buffer else: try: infile = open(filename, 'rb') except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File", "stdin.')) parser.add_argument('-n', '--length', type=int, default=16, dest='linelength', metavar='LENGTH', help=_( 'format output using this much", "Dump the code points translated from another encoding:')), ' unidump -c latin-1 some-legacy-file',", "' 'and finally the raw input characters with control and whitespace ' 'replaced", "control and whitespace character with “.”.')), TW(_('* Only print the code points of", "= codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str] = None) -> int: \"\"\"entry-point for an unidump", "unicodedata import unidata_version from unidump import VERSION, unidump from unidump.env import Env TL", "encoding that ' 'Python supports, e.g. “latin-1”.')) parser.add_argument('-e', '--format', type=str, default=None, dest='lineformat', metavar='FORMAT',", "echo -n -e '\\\\x01' | unidump -n 1 0 0001 .''', TW(_('* Finally", "is replaced with a dot here, ' 'because the current version of Python’s", "Python’s unicodedata doesn’t know of ' 'this character yet.')), TW(_('* Use it like", "and with the hex ' 'value enclosed in question marks, e.g., “?F5?”.')), TW(_('You", "'value enclosed in question marks, e.g., “?F5?”.')), TW(_('You can pipe in data from", "unidump -n 5 0 1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for", "be UTF-8 encoded, disregarding locale Do not type-check this: error: Incompatible types in", "this row, code points in their hex notation, ' 'and finally the raw", "data.') .format(VERSION, unidata_version)).lstrip() + '\\n' ]) def force_stdout_to_utf8(): \"\"\"force stdout to be UTF-8", "a ' 'new line, without byte counter or rendering of actual data. You", "MNOP''', TW(_('* Dump the code points translated from another encoding:')), ' unidump -c", "-n 4 0 0041 0042 0043 0044 ABCD 4 0045 0046 0047 0048", "every control and whitespace character with “.”.')), TW(_('* Only print the code points", "TW(_('This results in a stream of code points in hex notation, each on", "learn what your favorite Emoji is composed of:')), ''' ( echo -n -e", "a directory.\\n') .format(filename)) continue unidump( infile, env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat, output=sys.stdout)) except KeyboardInterrupt:", "{} notation. ' 'Default is “%s”. ' 'See examples below on how to", "You can ' 'use this to count the total amount of characters (as", "help=_( 'format output using this much input characters. ' 'Default is %(default)s characters.'))", "row, code points in their hex notation, ' 'and finally the raw input", "TW(_('* Control characters and whitespace are safely rendered:')), ''' echo -n -e '\\\\x01'", "= '\\n\\n'.join([ _('Examples:'), TW(_('* Basic usage with stdin:')), ''' echo -n 'ABCDEFGHIJKLMNOP' |", "composed of:')), ''' ( echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo -n -e", "unidata_version)) options = parser.parse_args(args) try: for filename in options.files: infile = None #", "of unidump, using Unicode {} data.') .format(VERSION, unidata_version)).lstrip() + '\\n' ]) def force_stdout_to_utf8():", "'See examples below on how to use this option.' ) % Env.lineformat.replace('\\n', '\\\\n'))", "' unidump foo-*.txt', TW(_('* Control characters and whitespace are safely rendered:')), ''' echo", "{} using Unicode {} data').format( VERSION, unidata_version)) options = parser.parse_args(args) try: for filename", "default=('-',), help=_( 'input files. Use “-” or keep empty for stdin.')) parser.add_argument('-n', '--length',", "current version of Python’s unicodedata doesn’t know of ' 'this character yet.')), TW(_('*", "', subsequent_indent=' ').fill DESCRIPTION = '\\n\\n'.join([ TW(_('A Unicode code point dump.')), TW(_('Think of", "-n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) | \\\\", "-n -e '\\\\x01' | unidump -n 1 0 0001 .''', TW(_('* Finally learn", "Dump many files at the same time:')), ' unidump foo-*.txt', TW(_('* Control characters", "of code points in hex notation, each on a ' 'new line, without", "can ' 'use this to count the total amount of characters (as opposed", "from stdin, select several files at once, or ' 'even mix all those", "1 0 0001 .''', TW(_('* Finally learn what your favorite Emoji is composed", "TW(_('A Unicode code point dump.')), TW(_('Think of it as hexdump(1) for Unicode. The", "type: IO[bytes] if filename == '-': infile = sys.stdin.buffer else: try: infile =", "Use it like strings(1):')), ' unidump -e \\'{data}\\' some-file.bin', TW(_('This will replace every", "dirname from shutil import get_terminal_size import sys from textwrap import TextWrapper # pylint:", "'--encoding', type=str, default='utf-8', metavar='ENC', help=_( 'interpret input in this encoding. Default is '", "'and finally the raw input characters with control and whitespace ' 'replaced by", "select several files at once, or ' 'even mix all those input methods", "disregarding locale Do not type-check this: error: Incompatible types in assignment (expression has", "error: \"TextIO\" has no attribute \"detach\" \\\\o/ \"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args:", "004F 0050 MNOP''', TW(_('* Dump the code points translated from another encoding:')), '", "unidump -e '{repr}'$'\\\\n' -n 1 some-file.txt''', TW(_('This results in a stream of code", "'use this to count the total amount of characters (as opposed to raw", "“%s”. ' 'See examples below on how to use this option.' ) %", "from textwrap import TextWrapper # pylint: disable=unused-import from typing import List, IO, Any", "foo-*.txt', TW(_('* Control characters and whitespace are safely rendered:')), ''' echo -n -e", "argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*', metavar='FILE', default=('-',), help=_( 'input files.", "'\\\\x01' | unidump -n 1 0 0001 .''', TW(_('* Finally learn what your", "'-': infile = sys.stdin.buffer else: try: infile = open(filename, 'rb') except FileNotFoundError: sys.stdout.flush()", "hex notation, each on a ' 'new line, without byte counter or rendering", "all those input methods together.')), ]) EPILOG = '\\n\\n'.join([ _('Examples:'), TW(_('* Basic usage", "TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. ' 'The “elf” emoji (the first character) is replaced", "file with “X” ' 'and every control and whitespace character with “.”.')), TW(_('*", "The command analyses the ' 'input and then prints three columns: the raw", "'Default is %(default)s characters.')) parser.add_argument('-c', '--encoding', type=str, default='utf-8', metavar='ENC', help=_( 'interpret input in", "this option.' ) % Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version', action='version', version=_('%(prog)s {} using Unicode", "the same time:')), ' unidump foo-*.txt', TW(_('* Control characters and whitespace are safely", "another encoding:')), ' unidump -c latin-1 some-legacy-file', TW(_('* Dump many files at the", "an unidump CLI call\"\"\" force_stdout_to_utf8() if args is None: args = sys.argv[1:] parser", "hex ' 'value enclosed in question marks, e.g., “?F5?”.')), TW(_('You can pipe in", "from unicodedata import unidata_version from unidump import VERSION, unidump from unidump.env import Env", "TextWrapper # pylint: disable=unused-import from typing import List, IO, Any # pylint: enable=unused-import", "004B 004C IJKL 12 004D 004E 004F 0050 MNOP''', TW(_('* Dump the code", "methods together.')), ]) EPILOG = '\\n\\n'.join([ _('Examples:'), TW(_('* Basic usage with stdin:')), '''", "1F3FD 200D 2642 FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. ' 'The “elf” emoji", "`wc -l`.')), TW(_('This is version {} of unidump, using Unicode {} data.') .format(VERSION,", "IO[bytes] if filename == '-': infile = sys.stdin.buffer else: try: infile = open(filename,", "'bytes) in a file, if you pipe it through `wc -l`.')), TW(_('This is", "characters.')) parser.add_argument('-c', '--encoding', type=str, default='utf-8', metavar='ENC', help=_( 'interpret input in this encoding. Default", "gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _ = TL.gettext TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent='", "' 'Python supports, e.g. “latin-1”.')) parser.add_argument('-e', '--format', type=str, default=None, dest='lineformat', metavar='FORMAT', help=_( 'specify", "None: args = sys.argv[1:] parser = argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files',", "-e \\'{data}\\' some-file.bin', TW(_('This will replace every unknown byte from the input file", "“elf” emoji (the first character) is replaced with a dot here, ' 'because", "Use “-” or keep empty for stdin.')) parser.add_argument('-n', '--length', type=int, default=16, dest='linelength', metavar='LENGTH',", "help=_( 'specify a custom format in Python’s {} notation. ' 'Default is “%s”.", "force_stdout_to_utf8() if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser( prog='unidump', description=DESCRIPTION,", "amount of characters (as opposed to raw ' 'bytes) in a file, if", "or ' 'even mix all those input methods together.')), ]) EPILOG = '\\n\\n'.join([", "much input characters. ' 'Default is %(default)s characters.')) parser.add_argument('-c', '--encoding', type=str, default='utf-8', metavar='ENC',", "every unknown byte from the input file with “X” ' 'and every control", "together.')), ]) EPILOG = '\\n\\n'.join([ _('Examples:'), TW(_('* Basic usage with stdin:')), ''' echo", "' 'See examples below on how to use this option.' ) % Env.lineformat.replace('\\n',", "1 some-file.txt''', TW(_('This results in a stream of code points in hex notation,", "is None: args = sys.argv[1:] parser = argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter )", "import codecs import gettext from os.path import dirname from shutil import get_terminal_size import", "except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {} not found.\\n') .format(filename)) continue except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{}", "except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is a directory.\\n') .format(filename)) continue unidump( infile, env=Env( linelength=options.linelength,", "4 0 0041 0042 0043 0044 ABCD 4 0045 0046 0047 0048 EFGH", "'\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) | \\\\ unidump -n", "each on a ' 'new line, without byte counter or rendering of actual", "metavar='ENC', help=_( 'interpret input in this encoding. Default is ' '%(default)s. You can", "Do not type-check this: error: Incompatible types in assignment (expression has type \"StreamWriter\",", "TW(_('Invalid byte sequences are represented with an “X” and with the hex '", "infile = open(filename, 'rb') except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {} not found.\\n') .format(filename)) continue", "analyses the ' 'input and then prints three columns: the raw byte index", "subsequent_indent=' ').fill DESCRIPTION = '\\n\\n'.join([ TW(_('A Unicode code point dump.')), TW(_('Think of it", "| \\\\ unidump -n 5 0 1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''', TW(_('See", "from os.path import dirname from shutil import get_terminal_size import sys from textwrap import", "the CLI logic for a unidump call \"\"\" import argparse import codecs import", "import VERSION, unidump from unidump.env import Env TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _", "initial_indent=' ', subsequent_indent=' ').fill DESCRIPTION = '\\n\\n'.join([ TW(_('A Unicode code point dump.')), TW(_('Think", "infile, env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat, output=sys.stdout)) except KeyboardInterrupt: sys.stdout.flush() return 1 else: return", "| unidump -n 4 0 0041 0042 0043 0044 ABCD 4 0045 0046", "points in hex notation, each on a ' 'new line, without byte counter", "0048 EFGH 8 0049 004A 004B 004C IJKL 12 004D 004E 004F 0050", "version=_('%(prog)s {} using Unicode {} data').format( VERSION, unidata_version)) options = parser.parse_args(args) try: for", "import get_terminal_size import sys from textwrap import TextWrapper # pylint: disable=unused-import from typing", "' 'bytes) in a file, if you pipe it through `wc -l`.')), TW(_('This", "-c latin-1 some-legacy-file', TW(_('* Dump many files at the same time:')), ' unidump", "{} of unidump, using Unicode {} data.') .format(VERSION, unidata_version)).lstrip() + '\\n' ]) def", "“latin-1”.')) parser.add_argument('-e', '--format', type=str, default=None, dest='lineformat', metavar='FORMAT', help=_( 'specify a custom format in", "results in a stream of code points in hex notation, each on a", "TW(_('You can pipe in data from stdin, select several files at once, or", "unidump, using Unicode {} data.') .format(VERSION, unidata_version)).lstrip() + '\\n' ]) def force_stdout_to_utf8(): \"\"\"force", "Control characters and whitespace are safely rendered:')), ''' echo -n -e '\\\\x01' |", "it like strings(1):')), ' unidump -e \\'{data}\\' some-file.bin', TW(_('This will replace every unknown", "'because the current version of Python’s unicodedata doesn’t know of ' 'this character", "a stream of code points in hex notation, each on a ' 'new", "TW(_('* Use it like strings(1):')), ' unidump -e \\'{data}\\' some-file.bin', TW(_('This will replace", "0001 .''', TW(_('* Finally learn what your favorite Emoji is composed of:')), '''", "'{repr}'$'\\\\n' -n 1 some-file.txt''', TW(_('This results in a stream of code points in", "parser = argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*', metavar='FILE', default=('-',), help=_(", "metavar='LENGTH', help=_( 'format output using this much input characters. ' 'Default is %(default)s", "by a dot.')), TW(_('Invalid byte sequences are represented with an “X” and with", "# type: IO[bytes] if filename == '-': infile = sys.stdin.buffer else: try: infile", "to count the total amount of characters (as opposed to raw ' 'bytes)", "'\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) | \\\\ unidump -n 5 0 1F9DD 1F3FD 200D 2642", "input methods together.')), ]) EPILOG = '\\n\\n'.join([ _('Examples:'), TW(_('* Basic usage with stdin:')),", "replaced with a dot here, ' 'because the current version of Python’s unicodedata", "characters and whitespace are safely rendered:')), ''' echo -n -e '\\\\x01' | unidump", "character with “.”.')), TW(_('* Only print the code points of the input:')), '''", "echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) |", "' 'and every control and whitespace character with “.”.')), TW(_('* Only print the", "\"TextIO\") error: \"TextIO\" has no attribute \"detach\" \\\\o/ \"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def", "= '\\n\\n'.join([ TW(_('A Unicode code point dump.')), TW(_('Think of it as hexdump(1) for", "input file with “X” ' 'and every control and whitespace character with “.”.')),", "actual data. You can ' 'use this to count the total amount of", "(as opposed to raw ' 'bytes) in a file, if you pipe it", "doesn’t know of ' 'this character yet.')), TW(_('* Use it like strings(1):')), '", "import unidata_version from unidump import VERSION, unidump from unidump.env import Env TL =", "-e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) | \\\\ unidump -n 5 0 1F9DD 1F3FD 200D", "0 1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. ' 'The", "int: \"\"\"entry-point for an unidump CLI call\"\"\" force_stdout_to_utf8() if args is None: args", "files at the same time:')), ' unidump foo-*.txt', TW(_('* Control characters and whitespace", "from unidump.env import Env TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _ = TL.gettext TW", "(the first character) is replaced with a dot here, ' 'because the current", "'new line, without byte counter or rendering of actual data. You can '", "type \"TextIO\") error: \"TextIO\" has no attribute \"detach\" \\\\o/ \"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach())", ") | \\\\ unidump -n 5 0 1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''',", "supports, e.g. “latin-1”.')) parser.add_argument('-e', '--format', type=str, default=None, dest='lineformat', metavar='FORMAT', help=_( 'specify a custom", "for a unidump call \"\"\" import argparse import codecs import gettext from os.path", "Python’s {} notation. ' 'Default is “%s”. ' 'See examples below on how", "found.\\n') .format(filename)) continue except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is a directory.\\n') .format(filename)) continue unidump(", "of characters (as opposed to raw ' 'bytes) in a file, if you", "'replaced by a dot.')), TW(_('Invalid byte sequences are represented with an “X” and", "unidump -n 1 0 0001 .''', TW(_('* Finally learn what your favorite Emoji", "main(args: List[str] = None) -> int: \"\"\"entry-point for an unidump CLI call\"\"\" force_stdout_to_utf8()", "parser.parse_args(args) try: for filename in options.files: infile = None # type: IO[bytes] if", "e.g., “?F5?”.')), TW(_('You can pipe in data from stdin, select several files at", "-l`.')), TW(_('This is version {} of unidump, using Unicode {} data.') .format(VERSION, unidata_version)).lstrip()", "any encoding that ' 'Python supports, e.g. “latin-1”.')) parser.add_argument('-e', '--format', type=str, default=None, dest='lineformat',", "def main(args: List[str] = None) -> int: \"\"\"entry-point for an unidump CLI call\"\"\"", "import gettext from os.path import dirname from shutil import get_terminal_size import sys from", "_ = TL.gettext TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent=' ', subsequent_indent=' ').fill", "the ' 'first code point in this row, code points in their hex", "continue except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is a directory.\\n') .format(filename)) continue unidump( infile, env=Env(", "is “%s”. ' 'See examples below on how to use this option.' )", "argparse import codecs import gettext from os.path import dirname from shutil import get_terminal_size", "a dot.')), TW(_('Invalid byte sequences are represented with an “X” and with the", "0050 MNOP''', TW(_('* Dump the code points translated from another encoding:')), ' unidump", "+ '\\n' ]) def force_stdout_to_utf8(): \"\"\"force stdout to be UTF-8 encoded, disregarding locale", "variable has type \"TextIO\") error: \"TextIO\" has no attribute \"detach\" \\\\o/ \"\"\" sys.stdout", "favorite Emoji is composed of:')), ''' ( echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\", "can pipe in data from stdin, select several files at once, or '", "gettext from os.path import dirname from shutil import get_terminal_size import sys from textwrap", "time:')), ' unidump foo-*.txt', TW(_('* Control characters and whitespace are safely rendered:')), '''", "' unidump -e \\'{data}\\' some-file.bin', TW(_('This will replace every unknown byte from the", "know of ' 'this character yet.')), TW(_('* Use it like strings(1):')), ' unidump", "using this much input characters. ' 'Default is %(default)s characters.')) parser.add_argument('-c', '--encoding', type=str,", "type=str, default='utf-8', metavar='ENC', help=_( 'interpret input in this encoding. Default is ' '%(default)s.", "whitespace character with “.”.')), TW(_('* Only print the code points of the input:')),", "with control and whitespace ' 'replaced by a dot.')), TW(_('Invalid byte sequences are", "else: try: infile = open(filename, 'rb') except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {} not found.\\n')", "' unidump -c latin-1 some-legacy-file', TW(_('* Dump many files at the same time:')),", "004D 004E 004F 0050 MNOP''', TW(_('* Dump the code points translated from another", "logic for a unidump call \"\"\" import argparse import codecs import gettext from", "of ' 'this character yet.')), TW(_('* Use it like strings(1):')), ' unidump -e", "dot.')), TW(_('Invalid byte sequences are represented with an “X” and with the hex", "ABCD 4 0045 0046 0047 0048 EFGH 8 0049 004A 004B 004C IJKL", "{} data.') .format(VERSION, unidata_version)).lstrip() + '\\n' ]) def force_stdout_to_utf8(): \"\"\"force stdout to be", "'and every control and whitespace character with “.”.')), TW(_('* Only print the code", "print the code points of the input:')), ''' unidump -e '{repr}'$'\\\\n' -n 1", "-n 5 0 1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images.", "output using this much input characters. ' 'Default is %(default)s characters.')) parser.add_argument('-c', '--encoding',", "TW(_('This is version {} of unidump, using Unicode {} data.') .format(VERSION, unidata_version)).lstrip() +", "'\\n\\n'.join([ TW(_('A Unicode code point dump.')), TW(_('Think of it as hexdump(1) for Unicode.", "code point dump.')), TW(_('Think of it as hexdump(1) for Unicode. The command analyses", "or keep empty for stdin.')) parser.add_argument('-n', '--length', type=int, default=16, dest='linelength', metavar='LENGTH', help=_( 'format", "0 0041 0042 0043 0044 ABCD 4 0045 0046 0047 0048 EFGH 8", "some-file.txt''', TW(_('This results in a stream of code points in hex notation, each", "\\\\o/ \"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str] = None) -> int: \"\"\"entry-point", "in Python’s {} notation. ' 'Default is “%s”. ' 'See examples below on", "CLI call\"\"\" force_stdout_to_utf8() if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser(", ") parser.add_argument('files', nargs='*', metavar='FILE', default=('-',), help=_( 'input files. Use “-” or keep empty", "the raw input characters with control and whitespace ' 'replaced by a dot.')),", "control and whitespace ' 'replaced by a dot.')), TW(_('Invalid byte sequences are represented", "-e '{repr}'$'\\\\n' -n 1 some-file.txt''', TW(_('This results in a stream of code points", "replace every unknown byte from the input file with “X” ' 'and every", "encoded, disregarding locale Do not type-check this: error: Incompatible types in assignment (expression", "this much input characters. ' 'Default is %(default)s characters.')) parser.add_argument('-c', '--encoding', type=str, default='utf-8',", "files at once, or ' 'even mix all those input methods together.')), ])", "in this encoding. Default is ' '%(default)s. You can choose any encoding that", "shutil import get_terminal_size import sys from textwrap import TextWrapper # pylint: disable=unused-import from", "from another encoding:')), ' unidump -c latin-1 some-legacy-file', TW(_('* Dump many files at", "in this row, code points in their hex notation, ' 'and finally the", "is composed of:')), ''' ( echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo -n", "unidata_version)).lstrip() + '\\n' ]) def force_stdout_to_utf8(): \"\"\"force stdout to be UTF-8 encoded, disregarding", "handle the CLI logic for a unidump call \"\"\" import argparse import codecs", "0044 ABCD 4 0045 0046 0047 0048 EFGH 8 0049 004A 004B 004C", "metavar='FORMAT', help=_( 'specify a custom format in Python’s {} notation. ' 'Default is", "opposed to raw ' 'bytes) in a file, if you pipe it through", "\\\\ echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) | \\\\ unidump -n 5 0", "represented with an “X” and with the hex ' 'value enclosed in question", "are represented with an “X” and with the hex ' 'value enclosed in", "stdout to be UTF-8 encoded, disregarding locale Do not type-check this: error: Incompatible", "'--version', action='version', version=_('%(prog)s {} using Unicode {} data').format( VERSION, unidata_version)) options = parser.parse_args(args)", "it through `wc -l`.')), TW(_('This is version {} of unidump, using Unicode {}", "if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG,", "pipe in data from stdin, select several files at once, or ' 'even", "; \\\\ echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) | \\\\ unidump -n 5", "notation. ' 'Default is “%s”. ' 'See examples below on how to use", "args = sys.argv[1:] parser = argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*',", "some-file.bin', TW(_('This will replace every unknown byte from the input file with “X”", "options = parser.parse_args(args) try: for filename in options.files: infile = None # type:", "\"detach\" \\\\o/ \"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str] = None) -> int:", "-e '\\\\x01' | unidump -n 1 0 0001 .''', TW(_('* Finally learn what", "fallback=True) _ = TL.gettext TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent=' ', subsequent_indent='", "' 'The “elf” emoji (the first character) is replaced with a dot here,", "total amount of characters (as opposed to raw ' 'bytes) in a file,", "# pylint: enable=unused-import from unicodedata import unidata_version from unidump import VERSION, unidump from", "Unicode code point dump.')), TW(_('Think of it as hexdump(1) for Unicode. The command", "use this option.' ) % Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version', action='version', version=_('%(prog)s {} using", "rendering of actual data. You can ' 'use this to count the total", "parser.add_argument('-n', '--length', type=int, default=16, dest='linelength', metavar='LENGTH', help=_( 'format output using this much input", "from the input file with “X” ' 'and every control and whitespace character", "has type \"StreamWriter\", variable has type \"TextIO\") error: \"TextIO\" has no attribute \"detach\"", "metavar='FILE', default=('-',), help=_( 'input files. Use “-” or keep empty for stdin.')) parser.add_argument('-n',", "raw ' 'bytes) in a file, if you pipe it through `wc -l`.')),", "' 'value enclosed in question marks, e.g., “?F5?”.')), TW(_('You can pipe in data", "format in Python’s {} notation. ' 'Default is “%s”. ' 'See examples below", ") % Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version', action='version', version=_('%(prog)s {} using Unicode {} data').format(", "of:')), ''' ( echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f'", "'rb') except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {} not found.\\n') .format(filename)) continue except IsADirectoryError: sys.stdout.flush()", "of actual data. You can ' 'use this to count the total amount", "“.”.')), TW(_('* Only print the code points of the input:')), ''' unidump -e", "{} not found.\\n') .format(filename)) continue except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is a directory.\\n') .format(filename))", "'specify a custom format in Python’s {} notation. ' 'Default is “%s”. '", "echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) | \\\\ unidump -n 5 0 1F9DD", "will replace every unknown byte from the input file with “X” ' 'and", "input characters with control and whitespace ' 'replaced by a dot.')), TW(_('Invalid byte", "TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent=' ', subsequent_indent=' ').fill DESCRIPTION = '\\n\\n'.join([ TW(_('A Unicode", "Only print the code points of the input:')), ''' unidump -e '{repr}'$'\\\\n' -n", "' 'use this to count the total amount of characters (as opposed to", "import sys from textwrap import TextWrapper # pylint: disable=unused-import from typing import List,", "byte sequences are represented with an “X” and with the hex ' 'value", "a dot here, ' 'because the current version of Python’s unicodedata doesn’t know", "with stdin:')), ''' echo -n 'ABCDEFGHIJKLMNOP' | unidump -n 4 0 0041 0042", "for an unidump CLI call\"\"\" force_stdout_to_utf8() if args is None: args = sys.argv[1:]", "<http://emojipedia.org/man-elf-medium-skin-tone/> for images. ' 'The “elf” emoji (the first character) is replaced with", "env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat, output=sys.stdout)) except KeyboardInterrupt: sys.stdout.flush() return 1 else: return 0", "replace_whitespace=True, initial_indent=' ', subsequent_indent=' ').fill DESCRIPTION = '\\n\\n'.join([ TW(_('A Unicode code point dump.')),", "data').format( VERSION, unidata_version)) options = parser.parse_args(args) try: for filename in options.files: infile =", "{} data').format( VERSION, unidata_version)) options = parser.parse_args(args) try: for filename in options.files: infile", "version of Python’s unicodedata doesn’t know of ' 'this character yet.')), TW(_('* Use", "-> int: \"\"\"entry-point for an unidump CLI call\"\"\" force_stdout_to_utf8() if args is None:", "on how to use this option.' ) % Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version', action='version',", "like strings(1):')), ' unidump -e \\'{data}\\' some-file.bin', TW(_('This will replace every unknown byte", "encoding. Default is ' '%(default)s. You can choose any encoding that ' 'Python", "= open(filename, 'rb') except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {} not found.\\n') .format(filename)) continue except", "= None # type: IO[bytes] if filename == '-': infile = sys.stdin.buffer else:", "' 'input and then prints three columns: the raw byte index of the", "default=16, dest='linelength', metavar='LENGTH', help=_( 'format output using this much input characters. ' 'Default", "prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*', metavar='FILE', default=('-',), help=_( 'input files. Use", "' 'Default is %(default)s characters.')) parser.add_argument('-c', '--encoding', type=str, default='utf-8', metavar='ENC', help=_( 'interpret input", "; ) | \\\\ unidump -n 5 0 1F9DD 1F3FD 200D 2642 FE0F", "TW(_('* Dump many files at the same time:')), ' unidump foo-*.txt', TW(_('* Control", "an “X” and with the hex ' 'value enclosed in question marks, e.g.,", "then prints three columns: the raw byte index of the ' 'first code", "FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. ' 'The “elf” emoji (the first character)", "help=_( 'interpret input in this encoding. Default is ' '%(default)s. You can choose", "“?F5?”.')), TW(_('You can pipe in data from stdin, select several files at once,", "sys.argv[1:] parser = argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*', metavar='FILE', default=('-',),", "files. Use “-” or keep empty for stdin.')) parser.add_argument('-n', '--length', type=int, default=16, dest='linelength',", "None # type: IO[bytes] if filename == '-': infile = sys.stdin.buffer else: try:", "_('Examples:'), TW(_('* Basic usage with stdin:')), ''' echo -n 'ABCDEFGHIJKLMNOP' | unidump -n", "the input file with “X” ' 'and every control and whitespace character with", "= argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*', metavar='FILE', default=('-',), help=_( 'input", "import Env TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _ = TL.gettext TW = TextWrapper(width=min(80,", "= sys.argv[1:] parser = argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*', metavar='FILE',", "TW(_('Think of it as hexdump(1) for Unicode. The command analyses the ' 'input", "Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version', action='version', version=_('%(prog)s {} using Unicode {} data').format( VERSION, unidata_version))", "' 'first code point in this row, code points in their hex notation,", "same time:')), ' unidump foo-*.txt', TW(_('* Control characters and whitespace are safely rendered:')),", "unidump.env import Env TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _ = TL.gettext TW =", "attribute \"detach\" \\\\o/ \"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str] = None) ->", "''' ( echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ;", "encoding:')), ' unidump -c latin-1 some-legacy-file', TW(_('* Dump many files at the same", "'even mix all those input methods together.')), ]) EPILOG = '\\n\\n'.join([ _('Examples:'), TW(_('*", "sys.stderr.write(_('File {} not found.\\n') .format(filename)) continue except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is a directory.\\n')", "TW(_('* Dump the code points translated from another encoding:')), ' unidump -c latin-1", "counter or rendering of actual data. You can ' 'use this to count", "for images. ' 'The “elf” emoji (the first character) is replaced with a", "% Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version', action='version', version=_('%(prog)s {} using Unicode {} data').format( VERSION,", "Unicode {} data').format( VERSION, unidata_version)) options = parser.parse_args(args) try: for filename in options.files:", "'columns')), replace_whitespace=True, initial_indent=' ', subsequent_indent=' ').fill DESCRIPTION = '\\n\\n'.join([ TW(_('A Unicode code point", "with a dot here, ' 'because the current version of Python’s unicodedata doesn’t", "from unidump import VERSION, unidump from unidump.env import Env TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale',", "disable=unused-import from typing import List, IO, Any # pylint: enable=unused-import from unicodedata import", "point dump.')), TW(_('Think of it as hexdump(1) for Unicode. The command analyses the", "a unidump call \"\"\" import argparse import codecs import gettext from os.path import", "localedir=dirname(__file__)+'/locale', fallback=True) _ = TL.gettext TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent=' ',", "enclosed in question marks, e.g., “?F5?”.')), TW(_('You can pipe in data from stdin,", "help=_( 'input files. Use “-” or keep empty for stdin.')) parser.add_argument('-n', '--length', type=int,", "nargs='*', metavar='FILE', default=('-',), help=_( 'input files. Use “-” or keep empty for stdin.'))", "your favorite Emoji is composed of:')), ''' ( echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ;", "unidump foo-*.txt', TW(_('* Control characters and whitespace are safely rendered:')), ''' echo -n", "code points translated from another encoding:')), ' unidump -c latin-1 some-legacy-file', TW(_('* Dump", "of the input:')), ''' unidump -e '{repr}'$'\\\\n' -n 1 some-file.txt''', TW(_('This results in", "if you pipe it through `wc -l`.')), TW(_('This is version {} of unidump,", "'--format', type=str, default=None, dest='lineformat', metavar='FORMAT', help=_( 'specify a custom format in Python’s {}", "from typing import List, IO, Any # pylint: enable=unused-import from unicodedata import unidata_version", "with an “X” and with the hex ' 'value enclosed in question marks,", "options.files: infile = None # type: IO[bytes] if filename == '-': infile =", "using Unicode {} data').format( VERSION, unidata_version)) options = parser.parse_args(args) try: for filename in", "are safely rendered:')), ''' echo -n -e '\\\\x01' | unidump -n 1 0", "]) EPILOG = '\\n\\n'.join([ _('Examples:'), TW(_('* Basic usage with stdin:')), ''' echo -n", "0041 0042 0043 0044 ABCD 4 0045 0046 0047 0048 EFGH 8 0049", "== '-': infile = sys.stdin.buffer else: try: infile = open(filename, 'rb') except FileNotFoundError:", "8 0049 004A 004B 004C IJKL 12 004D 004E 004F 0050 MNOP''', TW(_('*", "\"\"\" import argparse import codecs import gettext from os.path import dirname from shutil", "IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is a directory.\\n') .format(filename)) continue unidump( infile, env=Env( linelength=options.linelength, encoding=options.encoding,", "'\\n' ]) def force_stdout_to_utf8(): \"\"\"force stdout to be UTF-8 encoded, disregarding locale Do", "whitespace ' 'replaced by a dot.')), TW(_('Invalid byte sequences are represented with an", "'Python supports, e.g. “latin-1”.')) parser.add_argument('-e', '--format', type=str, default=None, dest='lineformat', metavar='FORMAT', help=_( 'specify a", "locale Do not type-check this: error: Incompatible types in assignment (expression has type", "= sys.stdin.buffer else: try: infile = open(filename, 'rb') except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {}", "of Python’s unicodedata doesn’t know of ' 'this character yet.')), TW(_('* Use it", "DESCRIPTION = '\\n\\n'.join([ TW(_('A Unicode code point dump.')), TW(_('Think of it as hexdump(1)", "types in assignment (expression has type \"StreamWriter\", variable has type \"TextIO\") error: \"TextIO\"", "(expression has type \"StreamWriter\", variable has type \"TextIO\") error: \"TextIO\" has no attribute", "import dirname from shutil import get_terminal_size import sys from textwrap import TextWrapper #", "the code points translated from another encoding:')), ' unidump -c latin-1 some-legacy-file', TW(_('*", "with “X” ' 'and every control and whitespace character with “.”.')), TW(_('* Only", "this to count the total amount of characters (as opposed to raw '", "choose any encoding that ' 'Python supports, e.g. “latin-1”.')) parser.add_argument('-e', '--format', type=str, default=None,", "args is None: args = sys.argv[1:] parser = argparse.ArgumentParser( prog='unidump', description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter", "point in this row, code points in their hex notation, ' 'and finally", "usage with stdin:')), ''' echo -n 'ABCDEFGHIJKLMNOP' | unidump -n 4 0 0041", "CLI logic for a unidump call \"\"\" import argparse import codecs import gettext", "the raw byte index of the ' 'first code point in this row,", "in assignment (expression has type \"StreamWriter\", variable has type \"TextIO\") error: \"TextIO\" has", "the total amount of characters (as opposed to raw ' 'bytes) in a", "-n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; ) | \\\\ unidump -n 5 0 1F9DD 1F3FD", "and whitespace ' 'replaced by a dot.')), TW(_('Invalid byte sequences are represented with", "points of the input:')), ''' unidump -e '{repr}'$'\\\\n' -n 1 some-file.txt''', TW(_('This results", "question marks, e.g., “?F5?”.')), TW(_('You can pipe in data from stdin, select several", "'this character yet.')), TW(_('* Use it like strings(1):')), ' unidump -e \\'{data}\\' some-file.bin',", "stdin:')), ''' echo -n 'ABCDEFGHIJKLMNOP' | unidump -n 4 0 0041 0042 0043", "their hex notation, ' 'and finally the raw input characters with control and", "code points in hex notation, each on a ' 'new line, without byte", "not found.\\n') .format(filename)) continue except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is a directory.\\n') .format(filename)) continue", "below on how to use this option.' ) % Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version',", "sequences are represented with an “X” and with the hex ' 'value enclosed", "'\\\\n')) parser.add_argument('-v', '--version', action='version', version=_('%(prog)s {} using Unicode {} data').format( VERSION, unidata_version)) options", "unidump import VERSION, unidump from unidump.env import Env TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True)", "the code points of the input:')), ''' unidump -e '{repr}'$'\\\\n' -n 1 some-file.txt''',", "marks, e.g., “?F5?”.')), TW(_('You can pipe in data from stdin, select several files", "unknown byte from the input file with “X” ' 'and every control and", "VERSION, unidata_version)) options = parser.parse_args(args) try: for filename in options.files: infile = None", "byte index of the ' 'first code point in this row, code points", "4 0045 0046 0047 0048 EFGH 8 0049 004A 004B 004C IJKL 12", "force_stdout_to_utf8(): \"\"\"force stdout to be UTF-8 encoded, disregarding locale Do not type-check this:", "type=str, default=None, dest='lineformat', metavar='FORMAT', help=_( 'specify a custom format in Python’s {} notation.", "unidata_version from unidump import VERSION, unidump from unidump.env import Env TL = gettext.translation('unidump',", "unidump CLI call\"\"\" force_stdout_to_utf8() if args is None: args = sys.argv[1:] parser =", "of the ' 'first code point in this row, code points in their", "004A 004B 004C IJKL 12 004D 004E 004F 0050 MNOP''', TW(_('* Dump the", "004E 004F 0050 MNOP''', TW(_('* Dump the code points translated from another encoding:')),", "= TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent=' ', subsequent_indent=' ').fill DESCRIPTION = '\\n\\n'.join([ TW(_('A", "directory.\\n') .format(filename)) continue unidump( infile, env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat, output=sys.stdout)) except KeyboardInterrupt: sys.stdout.flush()", "Env TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _ = TL.gettext TW = TextWrapper(width=min(80, getattr(get_terminal_size(),", "def force_stdout_to_utf8(): \"\"\"force stdout to be UTF-8 encoded, disregarding locale Do not type-check", "pipe it through `wc -l`.')), TW(_('This is version {} of unidump, using Unicode", "codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str] = None) -> int: \"\"\"entry-point for an unidump CLI", "type=int, default=16, dest='linelength', metavar='LENGTH', help=_( 'format output using this much input characters. '", "'input and then prints three columns: the raw byte index of the '", "“X” ' 'and every control and whitespace character with “.”.')), TW(_('* Only print", "Any # pylint: enable=unused-import from unicodedata import unidata_version from unidump import VERSION, unidump", "]) def force_stdout_to_utf8(): \"\"\"force stdout to be UTF-8 encoded, disregarding locale Do not", "is version {} of unidump, using Unicode {} data.') .format(VERSION, unidata_version)).lstrip() + '\\n'", ".format(VERSION, unidata_version)).lstrip() + '\\n' ]) def force_stdout_to_utf8(): \"\"\"force stdout to be UTF-8 encoded,", "'input files. Use “-” or keep empty for stdin.')) parser.add_argument('-n', '--length', type=int, default=16,", "option.' ) % Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version', action='version', version=_('%(prog)s {} using Unicode {}", "'Default is “%s”. ' 'See examples below on how to use this option.'", "images. ' 'The “elf” emoji (the first character) is replaced with a dot", "finally the raw input characters with control and whitespace ' 'replaced by a", "VERSION, unidump from unidump.env import Env TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale', fallback=True) _ =", "( echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo -n -e '\\\\x80\\\\x8d\\\\xe2\\\\x99\\\\x82\\\\xef\\\\xb8\\\\x8f' ; )", "custom format in Python’s {} notation. ' 'Default is “%s”. ' 'See examples", "that ' 'Python supports, e.g. “latin-1”.')) parser.add_argument('-e', '--format', type=str, default=None, dest='lineformat', metavar='FORMAT', help=_(", "stdin, select several files at once, or ' 'even mix all those input", "from shutil import get_terminal_size import sys from textwrap import TextWrapper # pylint: disable=unused-import", "strings(1):')), ' unidump -e \\'{data}\\' some-file.bin', TW(_('This will replace every unknown byte from", "byte from the input file with “X” ' 'and every control and whitespace", "| unidump -n 1 0 0001 .''', TW(_('* Finally learn what your favorite", "dot here, ' 'because the current version of Python’s unicodedata doesn’t know of", "for stdin.')) parser.add_argument('-n', '--length', type=int, default=16, dest='linelength', metavar='LENGTH', help=_( 'format output using this", "in hex notation, each on a ' 'new line, without byte counter or", "data. You can ' 'use this to count the total amount of characters", "Emoji is composed of:')), ''' ( echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2' ; \\\\ echo", "is ' '%(default)s. You can choose any encoding that ' 'Python supports, e.g.", "mix all those input methods together.')), ]) EPILOG = '\\n\\n'.join([ _('Examples:'), TW(_('* Basic", "pylint: disable=unused-import from typing import List, IO, Any # pylint: enable=unused-import from unicodedata", "%(default)s characters.')) parser.add_argument('-c', '--encoding', type=str, default='utf-8', metavar='ENC', help=_( 'interpret input in this encoding.", "12 004D 004E 004F 0050 MNOP''', TW(_('* Dump the code points translated from", "has type \"TextIO\") error: \"TextIO\" has no attribute \"detach\" \\\\o/ \"\"\" sys.stdout =", "Finally learn what your favorite Emoji is composed of:')), ''' ( echo -n", "TW(_('* Only print the code points of the input:')), ''' unidump -e '{repr}'$'\\\\n'", "open(filename, 'rb') except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {} not found.\\n') .format(filename)) continue except IsADirectoryError:", "2642 FE0F .🏽.♂️''', TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. ' 'The “elf” emoji (the first", "' '%(default)s. You can choose any encoding that ' 'Python supports, e.g. “latin-1”.'))", "' 'this character yet.')), TW(_('* Use it like strings(1):')), ' unidump -e \\'{data}\\'", "count the total amount of characters (as opposed to raw ' 'bytes) in", "default=None, dest='lineformat', metavar='FORMAT', help=_( 'specify a custom format in Python’s {} notation. '", "TW(_('* Finally learn what your favorite Emoji is composed of:')), ''' ( echo", "sys.stdout.flush() sys.stderr.write(_('{} is a directory.\\n') .format(filename)) continue unidump( infile, env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat,", "the ' 'input and then prints three columns: the raw byte index of", "several files at once, or ' 'even mix all those input methods together.')),", "emoji (the first character) is replaced with a dot here, ' 'because the", "for Unicode. The command analyses the ' 'input and then prints three columns:", "type \"StreamWriter\", variable has type \"TextIO\") error: \"TextIO\" has no attribute \"detach\" \\\\o/", "sys from textwrap import TextWrapper # pylint: disable=unused-import from typing import List, IO,", "is %(default)s characters.')) parser.add_argument('-c', '--encoding', type=str, default='utf-8', metavar='ENC', help=_( 'interpret input in this", "dest='lineformat', metavar='FORMAT', help=_( 'specify a custom format in Python’s {} notation. ' 'Default", "try: infile = open(filename, 'rb') except FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {} not found.\\n') .format(filename))", "character yet.')), TW(_('* Use it like strings(1):')), ' unidump -e \\'{data}\\' some-file.bin', TW(_('This", "'interpret input in this encoding. Default is ' '%(default)s. You can choose any", "textwrap import TextWrapper # pylint: disable=unused-import from typing import List, IO, Any #", "the input:')), ''' unidump -e '{repr}'$'\\\\n' -n 1 some-file.txt''', TW(_('This results in a", "call\"\"\" force_stdout_to_utf8() if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser( prog='unidump',", "points in their hex notation, ' 'and finally the raw input characters with", "unidump -n 4 0 0041 0042 0043 0044 ABCD 4 0045 0046 0047", "sys.stderr.write(_('{} is a directory.\\n') .format(filename)) continue unidump( infile, env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat, output=sys.stdout))", "Basic usage with stdin:')), ''' echo -n 'ABCDEFGHIJKLMNOP' | unidump -n 4 0", "raw byte index of the ' 'first code point in this row, code", "filename == '-': infile = sys.stdin.buffer else: try: infile = open(filename, 'rb') except", "in their hex notation, ' 'and finally the raw input characters with control", "index of the ' 'first code point in this row, code points in", "''' echo -n 'ABCDEFGHIJKLMNOP' | unidump -n 4 0 0041 0042 0043 0044", "is a directory.\\n') .format(filename)) continue unidump( infile, env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat, output=sys.stdout)) except", ".format(filename)) continue except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is a directory.\\n') .format(filename)) continue unidump( infile,", "try: for filename in options.files: infile = None # type: IO[bytes] if filename", "codecs import gettext from os.path import dirname from shutil import get_terminal_size import sys", "input:')), ''' unidump -e '{repr}'$'\\\\n' -n 1 some-file.txt''', TW(_('This results in a stream", "stream of code points in hex notation, each on a ' 'new line,", "code points of the input:')), ''' unidump -e '{repr}'$'\\\\n' -n 1 some-file.txt''', TW(_('This", "in data from stdin, select several files at once, or ' 'even mix", "for filename in options.files: infile = None # type: IO[bytes] if filename ==", "a custom format in Python’s {} notation. ' 'Default is “%s”. ' 'See", "of it as hexdump(1) for Unicode. The command analyses the ' 'input and", "epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument('files', nargs='*', metavar='FILE', default=('-',), help=_( 'input files. Use “-” or", "infile = None # type: IO[bytes] if filename == '-': infile = sys.stdin.buffer", "\"\"\" sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str] = None) -> int: \"\"\"entry-point for", "latin-1 some-legacy-file', TW(_('* Dump many files at the same time:')), ' unidump foo-*.txt',", "IJKL 12 004D 004E 004F 0050 MNOP''', TW(_('* Dump the code points translated", "''' unidump -e '{repr}'$'\\\\n' -n 1 some-file.txt''', TW(_('This results in a stream of", "-n 1 some-file.txt''', TW(_('This results in a stream of code points in hex", "' 'replaced by a dot.')), TW(_('Invalid byte sequences are represented with an “X”", "0045 0046 0047 0048 EFGH 8 0049 004A 004B 004C IJKL 12 004D", "in options.files: infile = None # type: IO[bytes] if filename == '-': infile", "empty for stdin.')) parser.add_argument('-n', '--length', type=int, default=16, dest='linelength', metavar='LENGTH', help=_( 'format output using", "raw input characters with control and whitespace ' 'replaced by a dot.')), TW(_('Invalid", "call \"\"\" import argparse import codecs import gettext from os.path import dirname from", "continue unidump( infile, env=Env( linelength=options.linelength, encoding=options.encoding, lineformat=options.lineformat, output=sys.stdout)) except KeyboardInterrupt: sys.stdout.flush() return 1", "echo -n 'ABCDEFGHIJKLMNOP' | unidump -n 4 0 0041 0042 0043 0044 ABCD", "a file, if you pipe it through `wc -l`.')), TW(_('This is version {}", "yet.')), TW(_('* Use it like strings(1):')), ' unidump -e \\'{data}\\' some-file.bin', TW(_('This will", "sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach()) def main(args: List[str] = None) -> int: \"\"\"entry-point for an", "os.path import dirname from shutil import get_terminal_size import sys from textwrap import TextWrapper", "and whitespace character with “.”.')), TW(_('* Only print the code points of the", "data from stdin, select several files at once, or ' 'even mix all", "0 0001 .''', TW(_('* Finally learn what your favorite Emoji is composed of:')),", "'\\n\\n'.join([ _('Examples:'), TW(_('* Basic usage with stdin:')), ''' echo -n 'ABCDEFGHIJKLMNOP' | unidump", "first character) is replaced with a dot here, ' 'because the current version", "'format output using this much input characters. ' 'Default is %(default)s characters.')) parser.add_argument('-c',", "prints three columns: the raw byte index of the ' 'first code point", "notation, each on a ' 'new line, without byte counter or rendering of", "'ABCDEFGHIJKLMNOP' | unidump -n 4 0 0041 0042 0043 0044 ABCD 4 0045", "e.g. “latin-1”.')) parser.add_argument('-e', '--format', type=str, default=None, dest='lineformat', metavar='FORMAT', help=_( 'specify a custom format", "EPILOG = '\\n\\n'.join([ _('Examples:'), TW(_('* Basic usage with stdin:')), ''' echo -n 'ABCDEFGHIJKLMNOP'", "through `wc -l`.')), TW(_('This is version {} of unidump, using Unicode {} data.')", "Unicode {} data.') .format(VERSION, unidata_version)).lstrip() + '\\n' ]) def force_stdout_to_utf8(): \"\"\"force stdout to", "0043 0044 ABCD 4 0045 0046 0047 0048 EFGH 8 0049 004A 004B", "this: error: Incompatible types in assignment (expression has type \"StreamWriter\", variable has type", "parser.add_argument('-v', '--version', action='version', version=_('%(prog)s {} using Unicode {} data').format( VERSION, unidata_version)) options =", "characters. ' 'Default is %(default)s characters.')) parser.add_argument('-c', '--encoding', type=str, default='utf-8', metavar='ENC', help=_( 'interpret", "what your favorite Emoji is composed of:')), ''' ( echo -n -e '\\\\xf0\\\\x9f\\\\xa7\\\\x9d\\\\xf0\\\\x9f\\\\x8f\\\\xbd\\\\xe2'", "to use this option.' ) % Env.lineformat.replace('\\n', '\\\\n')) parser.add_argument('-v', '--version', action='version', version=_('%(prog)s {}", "as hexdump(1) for Unicode. The command analyses the ' 'input and then prints", "characters (as opposed to raw ' 'bytes) in a file, if you pipe", "TL.gettext TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent=' ', subsequent_indent=' ').fill DESCRIPTION =", "characters with control and whitespace ' 'replaced by a dot.')), TW(_('Invalid byte sequences", "= TL.gettext TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')), replace_whitespace=True, initial_indent=' ', subsequent_indent=' ').fill DESCRIPTION", "= None) -> int: \"\"\"entry-point for an unidump CLI call\"\"\" force_stdout_to_utf8() if args", "').fill DESCRIPTION = '\\n\\n'.join([ TW(_('A Unicode code point dump.')), TW(_('Think of it as", "-n 'ABCDEFGHIJKLMNOP' | unidump -n 4 0 0041 0042 0043 0044 ABCD 4", "hexdump(1) for Unicode. The command analyses the ' 'input and then prints three", "hex notation, ' 'and finally the raw input characters with control and whitespace", "0047 0048 EFGH 8 0049 004A 004B 004C IJKL 12 004D 004E 004F", "points translated from another encoding:')), ' unidump -c latin-1 some-legacy-file', TW(_('* Dump many", "' 'new line, without byte counter or rendering of actual data. You can", "at the same time:')), ' unidump foo-*.txt', TW(_('* Control characters and whitespace are", "\"\"\"force stdout to be UTF-8 encoded, disregarding locale Do not type-check this: error:", "notation, ' 'and finally the raw input characters with control and whitespace '", "parser.add_argument('-e', '--format', type=str, default=None, dest='lineformat', metavar='FORMAT', help=_( 'specify a custom format in Python’s", "error: Incompatible types in assignment (expression has type \"StreamWriter\", variable has type \"TextIO\")", "\\'{data}\\' some-file.bin', TW(_('This will replace every unknown byte from the input file with", "assignment (expression has type \"StreamWriter\", variable has type \"TextIO\") error: \"TextIO\" has no", "None) -> int: \"\"\"entry-point for an unidump CLI call\"\"\" force_stdout_to_utf8() if args is", "FileNotFoundError: sys.stdout.flush() sys.stderr.write(_('File {} not found.\\n') .format(filename)) continue except IsADirectoryError: sys.stdout.flush() sys.stderr.write(_('{} is", "safely rendered:')), ''' echo -n -e '\\\\x01' | unidump -n 1 0 0001", "UTF-8 encoded, disregarding locale Do not type-check this: error: Incompatible types in assignment" ]
[ "from rest_framework import serializers from api.models import * class ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model", "ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model = ParliamentarySession fields = ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class Meta:", "class FileSerializer(serializers.ModelSerializer): class Meta: model = File fields = ('long_filename', 'short_filename', 'path') class", "File fields = ('long_filename', 'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer =", "('id', 'motion_id', 'session', 'title', 'parliamentary_group', 'proposer', 'files') class MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer", "model = ParliamentarySession fields = ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model = CouncilPerson", "('name', 'academic_degree', 'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class Meta: model = File fields =", "fields = ('id', 'motion_id', 'session', 'title', 'parliamentary_group', 'proposer', 'files') class MotionSerializer(serializers.ModelSerializer): session =", "fields = ('name', 'academic_degree', 'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class Meta: model = File", "'motion_id', 'session', 'title', 'parliamentary_group', 'proposer', 'files') class MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer =", "api.models import * class ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model = ParliamentaryGroup fields = ('id',", "AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) class Meta: model", "'proposer', 'files') class MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True)", "'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True)", "'title', 'parliamentary_group', 'proposer', 'files') class MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files", "ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model = ParliamentaryGroup fields = ('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class", "Meta: model = ParliamentaryGroup fields = ('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model", "= CouncilPersonSerializer() files = FileSerializer(many=True) answers = AnswerSerializer(many=True) class Meta: model = Motion", "proposer = CouncilPersonSerializer() files = FileSerializer(many=True) class Meta: model = Motion fields =", "= ('name', 'academic_degree', 'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class Meta: model = File fields", "CouncilPersonSerializer() files = FileSerializer(many=True) answers = AnswerSerializer(many=True) class Meta: model = Motion fields", "= ('id', 'motion_id', 'session', 'title', 'parliamentary_group', 'proposer', 'files') class MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField()", "fields = ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model = CouncilPerson fields = ('name',", "Meta: model = ParliamentarySession fields = ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model =", "import * class ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model = ParliamentaryGroup fields = ('id', 'name')", "FileSerializer(serializers.ModelSerializer): class Meta: model = File fields = ('long_filename', 'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer):", "fields = ('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model = ParliamentarySession fields =", "* class ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model = ParliamentaryGroup fields = ('id', 'name') class", "Motion fields = ('id', 'motion_id', 'session', 'title', 'parliamentary_group', 'proposer', 'files') class MotionSerializer(serializers.ModelSerializer): session", "'parliamentary_group', 'proposer', 'files') class MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files =", "= ('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model = ParliamentarySession fields = ('session_date',)", "model = Motion fields = ('id', 'motion_id', 'session', 'title', 'motion_type', 'parliamentary_group', 'proposer', 'files',", "= ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model = CouncilPerson fields = ('name', 'academic_degree',", "class CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model = CouncilPerson fields = ('name', 'academic_degree', 'email', 'parliamentary_group')", "serializers from api.models import * class ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model = ParliamentaryGroup fields", "files = FileSerializer(many=True) class Meta: model = Motion fields = ('id', 'motion_id', 'session',", "class Meta: model = CouncilPerson fields = ('name', 'academic_degree', 'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer):", "import serializers from api.models import * class ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model = ParliamentaryGroup", "model = Motion fields = ('id', 'motion_id', 'session', 'title', 'parliamentary_group', 'proposer', 'files') class", "class Meta: model = ParliamentaryGroup fields = ('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta:", "FileSerializer(many=True) answers = AnswerSerializer(many=True) class Meta: model = Motion fields = ('id', 'motion_id',", "= Motion fields = ('id', 'motion_id', 'session', 'title', 'parliamentary_group', 'proposer', 'files') class MotionSerializer(serializers.ModelSerializer):", "model = ParliamentaryGroup fields = ('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model =", "('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model = CouncilPerson fields = ('name', 'academic_degree', 'email',", "Meta: model = Motion fields = ('id', 'motion_id', 'session', 'title', 'motion_type', 'parliamentary_group', 'proposer',", "CouncilPerson fields = ('name', 'academic_degree', 'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class Meta: model =", "'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model = ParliamentarySession fields = ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer):", "fields = ('long_filename', 'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer()", "= FileSerializer(many=True) answers = AnswerSerializer(many=True) class Meta: model = Motion fields = ('id',", "CouncilPersonSerializer() files = FileSerializer(many=True) class Meta: model = Motion fields = ('id', 'motion_id',", "'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class Meta: model = File fields = ('long_filename', 'short_filename', 'path')", "CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model = CouncilPerson fields = ('name', 'academic_degree', 'email', 'parliamentary_group') class", "= CouncilPerson fields = ('name', 'academic_degree', 'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class Meta: model", "class Meta: model = Motion fields = ('id', 'motion_id', 'session', 'title', 'motion_type', 'parliamentary_group',", "= serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) class Meta: model = Motion", "= Motion fields = ('id', 'motion_id', 'session', 'title', 'motion_type', 'parliamentary_group', 'proposer', 'files', 'answers')", "= FileSerializer(many=True) class Meta: model = Motion fields = ('id', 'motion_id', 'session', 'title',", "= ('long_filename', 'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files", "files = FileSerializer(many=True) answers = AnswerSerializer(many=True) class Meta: model = Motion fields =", "Meta: model = Motion fields = ('id', 'motion_id', 'session', 'title', 'parliamentary_group', 'proposer', 'files')", "'academic_degree', 'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class Meta: model = File fields = ('long_filename',", "Meta: model = File fields = ('long_filename', 'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer): session =", "class Meta: model = File fields = ('long_filename', 'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer): session", "session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) answers = AnswerSerializer(many=True) class", "proposer = CouncilPersonSerializer() files = FileSerializer(many=True) answers = AnswerSerializer(many=True) class Meta: model =", "serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) answers = AnswerSerializer(many=True) class Meta: model", "= ParliamentarySession fields = ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model = CouncilPerson fields", "class AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) class Meta:", "model = File fields = ('long_filename', 'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField()", "FileSerializer(many=True) class Meta: model = Motion fields = ('id', 'motion_id', 'session', 'title', 'parliamentary_group',", "class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model = ParliamentarySession fields = ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class", "class ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model = ParliamentaryGroup fields = ('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer):", "model = CouncilPerson fields = ('name', 'academic_degree', 'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class Meta:", "from api.models import * class ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model = ParliamentaryGroup fields =", "= ParliamentaryGroup fields = ('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model = ParliamentarySession", "session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) class Meta: model =", "MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) answers = AnswerSerializer(many=True)", "= CouncilPersonSerializer() files = FileSerializer(many=True) class Meta: model = Motion fields = ('id',", "'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class Meta: model = File fields = ('long_filename', 'short_filename',", "serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) class Meta: model = Motion fields", "rest_framework import serializers from api.models import * class ParliamentaryGroupSerializer(serializers.ModelSerializer): class Meta: model =", "'files') class MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) answers", "Meta: model = CouncilPerson fields = ('name', 'academic_degree', 'email', 'parliamentary_group') class FileSerializer(serializers.ModelSerializer): class", "class MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) answers =", "ParliamentarySession fields = ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model = CouncilPerson fields =", "= serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) answers = AnswerSerializer(many=True) class Meta:", "('long_filename', 'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files =", "= AnswerSerializer(many=True) class Meta: model = Motion fields = ('id', 'motion_id', 'session', 'title',", "('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model = ParliamentarySession fields = ('session_date',) class", "'session', 'title', 'parliamentary_group', 'proposer', 'files') class MotionSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer()", "ParliamentaryGroup fields = ('id', 'name') class ParliamentarySessionSerializer(serializers.ModelSerializer): class Meta: model = ParliamentarySession fields", "= File fields = ('long_filename', 'short_filename', 'path') class AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer", "'path') class AnswerSerializer(serializers.ModelSerializer): session = serializers.StringRelatedField() proposer = CouncilPersonSerializer() files = FileSerializer(many=True) class", "class Meta: model = Motion fields = ('id', 'motion_id', 'session', 'title', 'parliamentary_group', 'proposer',", "AnswerSerializer(many=True) class Meta: model = Motion fields = ('id', 'motion_id', 'session', 'title', 'motion_type',", "answers = AnswerSerializer(many=True) class Meta: model = Motion fields = ('id', 'motion_id', 'session',", "class Meta: model = ParliamentarySession fields = ('session_date',) class CouncilPersonSerializer(serializers.ModelSerializer): class Meta: model" ]
[ "import network import torch current = State(None, None) net = network.ResNet() focusMoves =", "enumerate(current.moves): if focus==move[0] and idx==move[-1]: # a legal move for j, move2 in", "current.child[i] for j in range(1000): MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best = current.child[0]", "not(current.mandatory): for move in focusMoves: GUI.SetBoard(False, *move) _, focusMoves = current.GetMoves(focus) for move", "net = network.ResNet() focusMoves = [] focus = 0 def MCTS(root): global net", "net(Variable(data)).data[0, 0] root.child[-1].v = delta delta *= -1 else: best = root.BestChild() if", "in current.child: if c.n > best.n: best = c move2 = current.moves[current.child.index(best)] GUI.Move(move2,", "if focus==move[0] and idx==move[-1]: # a legal move for j, move2 in enumerate(current.moves):", "j in range(1000): MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best = current.child[0] for c", "None) net = network.ResNet() focusMoves = [] focus = 0 def MCTS(root): global", "root.child[-1].v = delta delta *= -1 else: best = root.BestChild() if best ==", "i: GUI.SetBoard(False, *move2) # clear other highlights GUI.Move(move, current.pos[move[0]]) current = current.child[i] for", "-1 else: delta = -MCTS(best) root.v += delta root.n += 1 return delta", "move in focusMoves: GUI.SetBoard(False, *move) _, focusMoves = current.GetMoves(focus) for move in focusMoves:", "range(1000): MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best = current.child[0] for c in current.child:", "= torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory: for move2 in current.moves: GUI.SetBoard(True, *move2) break f", "* from graphics import Graphics from state import State from torch.autograd import Variable", "+= 1 return delta def Callback(idx, place): global GUI global current global focusMoves", "== STONE: focus = idx if current.GetColour(idx)==current.player and not(current.mandatory): for move in focusMoves:", "= best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory: for move2 in current.moves: GUI.SetBoard(True,", "*move) _, focusMoves = current.GetMoves(focus) for move in focusMoves: GUI.SetBoard(True, *move) else: for", "== None: delta = -1 else: delta = -MCTS(best) root.v += delta root.n", "-1 else: best = root.BestChild() if best == None: delta = -1 else:", "j != i: GUI.SetBoard(False, *move2) # clear other highlights GUI.Move(move, current.pos[move[0]]) current =", "focusMoves: GUI.SetBoard(False, *move) _, focusMoves = current.GetMoves(focus) for move in focusMoves: GUI.SetBoard(True, *move)", "for j in range(1000): MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best = current.child[0] for", "root.n += 1 return delta def Callback(idx, place): global GUI global current global", "in current.moves: GUI.SetBoard(True, *move2) break f = torch.load(PATH_PARAM) net.load_state_dict(f) net.eval() for i in", "focus = 0 def MCTS(root): global net if root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta", "0 def MCTS(root): global net if root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0,", "enumerate(current.moves): if j != i: GUI.SetBoard(False, *move2) # clear other highlights GUI.Move(move, current.pos[move[0]])", "if c.n > best.n: best = c move2 = current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current", "best = current.child[0] for c in current.child: if c.n > best.n: best =", "network import torch current = State(None, None) net = network.ResNet() focusMoves = []", "from torch.autograd import Variable import network import torch current = State(None, None) net", "from state import State from torch.autograd import Variable import network import torch current", "if j != i: GUI.SetBoard(False, *move2) # clear other highlights GUI.Move(move, current.pos[move[0]]) current", "global focusMoves global focus if place == STONE: focus = idx if current.GetColour(idx)==current.player", "*move2) # clear other highlights GUI.Move(move, current.pos[move[0]]) current = current.child[i] for j in", "if place == STONE: focus = idx if current.GetColour(idx)==current.player and not(current.mandatory): for move", "> best.n: best = c move2 = current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current = best", "global GUI global current global focusMoves global focus if place == STONE: focus", "print(net(Variable(data)).data[0,0]) best = current.child[0] for c in current.child: if c.n > best.n: best", "[] focus = 0 def MCTS(root): global net if root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0)", "best = c move2 = current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current = best data =", "graphics import Graphics from state import State from torch.autograd import Variable import network", "Graphics from state import State from torch.autograd import Variable import network import torch", "torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory: for move2 in current.moves: GUI.SetBoard(True, *move2) break f =", "current global focusMoves global focus if place == STONE: focus = idx if", "= net(Variable(data)).data[0, 0] root.child[-1].v = delta delta *= -1 else: best = root.BestChild()", "from graphics import Graphics from state import State from torch.autograd import Variable import", "focusMoves = current.GetMoves(focus) for move in focusMoves: GUI.SetBoard(True, *move) else: for i, move", "def MCTS(root): global net if root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0, 0]", "current.moves: GUI.SetBoard(True, *move2) break f = torch.load(PATH_PARAM) net.load_state_dict(f) net.eval() for i in range(50):", "= delta delta *= -1 else: best = root.BestChild() if best == None:", "current = current.child[i] for j in range(1000): MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best", "highlights GUI.Move(move, current.pos[move[0]]) current = current.child[i] for j in range(1000): MCTS(current) data =", "GUI.SetBoard(False, *move) _, focusMoves = current.GetMoves(focus) for move in focusMoves: GUI.SetBoard(True, *move) else:", "= -1 else: delta = -MCTS(best) root.v += delta root.n += 1 return", "= torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0, 0] root.child[-1].v = delta delta *= -1 else:", "move2 in enumerate(current.moves): if j != i: GUI.SetBoard(False, *move2) # clear other highlights", "Variable import network import torch current = State(None, None) net = network.ResNet() focusMoves", "a legal move for j, move2 in enumerate(current.moves): if j != i: GUI.SetBoard(False,", "current.child: if c.n > best.n: best = c move2 = current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]])", "= [] focus = 0 def MCTS(root): global net if root.Expand(): data =", "delta = net(Variable(data)).data[0, 0] root.child[-1].v = delta delta *= -1 else: best =", "focusMoves: GUI.SetBoard(True, *move) else: for i, move in enumerate(current.moves): if focus==move[0] and idx==move[-1]:", "current.pos[move2[0]]) current = best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory: for move2 in", "GUI.SetBoard(True, *move2) break f = torch.load(PATH_PARAM) net.load_state_dict(f) net.eval() for i in range(50): MCTS(current)", "break f = torch.load(PATH_PARAM) net.load_state_dict(f) net.eval() for i in range(50): MCTS(current) GUI =", "*move) else: for i, move in enumerate(current.moves): if focus==move[0] and idx==move[-1]: # a", "in focusMoves: GUI.SetBoard(True, *move) else: for i, move in enumerate(current.moves): if focus==move[0] and", "1 return delta def Callback(idx, place): global GUI global current global focusMoves global", "for move2 in current.moves: GUI.SetBoard(True, *move2) break f = torch.load(PATH_PARAM) net.load_state_dict(f) net.eval() for", "def Callback(idx, place): global GUI global current global focusMoves global focus if place", "global net if root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0, 0] root.child[-1].v =", "move in enumerate(current.moves): if focus==move[0] and idx==move[-1]: # a legal move for j,", "move for j, move2 in enumerate(current.moves): if j != i: GUI.SetBoard(False, *move2) #", "= 0 def MCTS(root): global net if root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta =", "for move in focusMoves: GUI.SetBoard(True, *move) else: for i, move in enumerate(current.moves): if", "data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best = current.child[0] for c in current.child: if c.n", "focusMoves global focus if place == STONE: focus = idx if current.GetColour(idx)==current.player and", "else: best = root.BestChild() if best == None: delta = -1 else: delta", "= torch.load(PATH_PARAM) net.load_state_dict(f) net.eval() for i in range(50): MCTS(current) GUI = Graphics(Callback) GUI.Run()", "# a legal move for j, move2 in enumerate(current.moves): if j != i:", "delta def Callback(idx, place): global GUI global current global focusMoves global focus if", "current.child[0] for c in current.child: if c.n > best.n: best = c move2", "and idx==move[-1]: # a legal move for j, move2 in enumerate(current.moves): if j", "current = State(None, None) net = network.ResNet() focusMoves = [] focus = 0", "torch current = State(None, None) net = network.ResNet() focusMoves = [] focus =", "delta *= -1 else: best = root.BestChild() if best == None: delta =", "state import State from torch.autograd import Variable import network import torch current =", "State(None, None) net = network.ResNet() focusMoves = [] focus = 0 def MCTS(root):", "place == STONE: focus = idx if current.GetColour(idx)==current.player and not(current.mandatory): for move in", "in enumerate(current.moves): if j != i: GUI.SetBoard(False, *move2) # clear other highlights GUI.Move(move,", "root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0, 0] root.child[-1].v = delta delta *=", "global current global focusMoves global focus if place == STONE: focus = idx", "best.n: best = c move2 = current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current = best data", "utils import * from graphics import Graphics from state import State from torch.autograd", "for move in focusMoves: GUI.SetBoard(False, *move) _, focusMoves = current.GetMoves(focus) for move in", "legal move for j, move2 in enumerate(current.moves): if j != i: GUI.SetBoard(False, *move2)", "= network.ResNet() focusMoves = [] focus = 0 def MCTS(root): global net if", "from utils import * from graphics import Graphics from state import State from", "else: for i, move in enumerate(current.moves): if focus==move[0] and idx==move[-1]: # a legal", "if current.GetColour(idx)==current.player and not(current.mandatory): for move in focusMoves: GUI.SetBoard(False, *move) _, focusMoves =", "GUI.Move(move, current.pos[move[0]]) current = current.child[i] for j in range(1000): MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0)", "if root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0, 0] root.child[-1].v = delta delta", "# clear other highlights GUI.Move(move, current.pos[move[0]]) current = current.child[i] for j in range(1000):", "_, focusMoves = current.GetMoves(focus) for move in focusMoves: GUI.SetBoard(True, *move) else: for i,", "-MCTS(best) root.v += delta root.n += 1 return delta def Callback(idx, place): global", "State from torch.autograd import Variable import network import torch current = State(None, None)", "return delta def Callback(idx, place): global GUI global current global focusMoves global focus", "focus if place == STONE: focus = idx if current.GetColour(idx)==current.player and not(current.mandatory): for", "current.mandatory: for move2 in current.moves: GUI.SetBoard(True, *move2) break f = torch.load(PATH_PARAM) net.load_state_dict(f) net.eval()", "focusMoves = [] focus = 0 def MCTS(root): global net if root.Expand(): data", "in enumerate(current.moves): if focus==move[0] and idx==move[-1]: # a legal move for j, move2", "+= delta root.n += 1 return delta def Callback(idx, place): global GUI global", "= c move2 = current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current = best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0)", "!= i: GUI.SetBoard(False, *move2) # clear other highlights GUI.Move(move, current.pos[move[0]]) current = current.child[i]", "idx==move[-1]: # a legal move for j, move2 in enumerate(current.moves): if j !=", "in focusMoves: GUI.SetBoard(False, *move) _, focusMoves = current.GetMoves(focus) for move in focusMoves: GUI.SetBoard(True,", "import * from graphics import Graphics from state import State from torch.autograd import", "0] root.child[-1].v = delta delta *= -1 else: best = root.BestChild() if best", "current.GetMoves(focus) for move in focusMoves: GUI.SetBoard(True, *move) else: for i, move in enumerate(current.moves):", "current.pos[move[0]]) current = current.child[i] for j in range(1000): MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0])", "MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best = current.child[0] for c in current.child: if", "clear other highlights GUI.Move(move, current.pos[move[0]]) current = current.child[i] for j in range(1000): MCTS(current)", "for i, move in enumerate(current.moves): if focus==move[0] and idx==move[-1]: # a legal move", "*= -1 else: best = root.BestChild() if best == None: delta = -1", "= torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best = current.child[0] for c in current.child: if c.n >", "for c in current.child: if c.n > best.n: best = c move2 =", "= -MCTS(best) root.v += delta root.n += 1 return delta def Callback(idx, place):", "delta = -MCTS(best) root.v += delta root.n += 1 return delta def Callback(idx,", "else: delta = -MCTS(best) root.v += delta root.n += 1 return delta def", "torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0, 0] root.child[-1].v = delta delta *= -1 else: best", "import Graphics from state import State from torch.autograd import Variable import network import", "GUI.SetBoard(True, *move) else: for i, move in enumerate(current.moves): if focus==move[0] and idx==move[-1]: #", "= current.child[0] for c in current.child: if c.n > best.n: best = c", "delta delta *= -1 else: best = root.BestChild() if best == None: delta", "current.GetColour(idx)==current.player and not(current.mandatory): for move in focusMoves: GUI.SetBoard(False, *move) _, focusMoves = current.GetMoves(focus)", "= current.child[i] for j in range(1000): MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best =", "import torch current = State(None, None) net = network.ResNet() focusMoves = [] focus", "c.n > best.n: best = c move2 = current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current =", "other highlights GUI.Move(move, current.pos[move[0]]) current = current.child[i] for j in range(1000): MCTS(current) data", "focus==move[0] and idx==move[-1]: # a legal move for j, move2 in enumerate(current.moves): if", "None: delta = -1 else: delta = -MCTS(best) root.v += delta root.n +=", "focus = idx if current.GetColour(idx)==current.player and not(current.mandatory): for move in focusMoves: GUI.SetBoard(False, *move)", "*move2) break f = torch.load(PATH_PARAM) net.load_state_dict(f) net.eval() for i in range(50): MCTS(current) GUI", "import State from torch.autograd import Variable import network import torch current = State(None,", "best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory: for move2 in current.moves: GUI.SetBoard(True, *move2)", "net if root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0, 0] root.child[-1].v = delta", "torch.autograd import Variable import network import torch current = State(None, None) net =", "best == None: delta = -1 else: delta = -MCTS(best) root.v += delta", "= idx if current.GetColour(idx)==current.player and not(current.mandatory): for move in focusMoves: GUI.SetBoard(False, *move) _,", "current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current = best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory: for", "GUI.SetBoard(False, *move2) # clear other highlights GUI.Move(move, current.pos[move[0]]) current = current.child[i] for j", "j, move2 in enumerate(current.moves): if j != i: GUI.SetBoard(False, *move2) # clear other", "= State(None, None) net = network.ResNet() focusMoves = [] focus = 0 def", "Callback(idx, place): global GUI global current global focusMoves global focus if place ==", "<gh_stars>0 from utils import * from graphics import Graphics from state import State", "move2 in current.moves: GUI.SetBoard(True, *move2) break f = torch.load(PATH_PARAM) net.load_state_dict(f) net.eval() for i", "data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory: for move2 in current.moves: GUI.SetBoard(True, *move2) break", "torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best = current.child[0] for c in current.child: if c.n > best.n:", "GUI global current global focusMoves global focus if place == STONE: focus =", "in range(1000): MCTS(current) data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) best = current.child[0] for c in", "idx if current.GetColour(idx)==current.player and not(current.mandatory): for move in focusMoves: GUI.SetBoard(False, *move) _, focusMoves", "delta root.n += 1 return delta def Callback(idx, place): global GUI global current", "if current.mandatory: for move2 in current.moves: GUI.SetBoard(True, *move2) break f = torch.load(PATH_PARAM) net.load_state_dict(f)", "c in current.child: if c.n > best.n: best = c move2 = current.moves[current.child.index(best)]", "root.v += delta root.n += 1 return delta def Callback(idx, place): global GUI", "network.ResNet() focusMoves = [] focus = 0 def MCTS(root): global net if root.Expand():", "best = root.BestChild() if best == None: delta = -1 else: delta =", "and not(current.mandatory): for move in focusMoves: GUI.SetBoard(False, *move) _, focusMoves = current.GetMoves(focus) for", "GUI.Move(move2, current.pos[move2[0]]) current = best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory: for move2", "= current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current = best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory:", "i, move in enumerate(current.moves): if focus==move[0] and idx==move[-1]: # a legal move for", "root.BestChild() if best == None: delta = -1 else: delta = -MCTS(best) root.v", "move in focusMoves: GUI.SetBoard(True, *move) else: for i, move in enumerate(current.moves): if focus==move[0]", "for j, move2 in enumerate(current.moves): if j != i: GUI.SetBoard(False, *move2) # clear", "delta = -1 else: delta = -MCTS(best) root.v += delta root.n += 1", "import Variable import network import torch current = State(None, None) net = network.ResNet()", "STONE: focus = idx if current.GetColour(idx)==current.player and not(current.mandatory): for move in focusMoves: GUI.SetBoard(False,", "print(net(Variable(data)).data[0,0]) if current.mandatory: for move2 in current.moves: GUI.SetBoard(True, *move2) break f = torch.load(PATH_PARAM)", "= root.BestChild() if best == None: delta = -1 else: delta = -MCTS(best)", "if best == None: delta = -1 else: delta = -MCTS(best) root.v +=", "= current.GetMoves(focus) for move in focusMoves: GUI.SetBoard(True, *move) else: for i, move in", "place): global GUI global current global focusMoves global focus if place == STONE:", "data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0, 0] root.child[-1].v = delta delta *= -1", "move2 = current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current = best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if", "global focus if place == STONE: focus = idx if current.GetColour(idx)==current.player and not(current.mandatory):", "MCTS(root): global net if root.Expand(): data = torch.FloatTensor(StateToImg(root.child[-1])).unsqueeze(0) delta = net(Variable(data)).data[0, 0] root.child[-1].v", "f = torch.load(PATH_PARAM) net.load_state_dict(f) net.eval() for i in range(50): MCTS(current) GUI = Graphics(Callback)", "current = best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0]) if current.mandatory: for move2 in current.moves:", "c move2 = current.moves[current.child.index(best)] GUI.Move(move2, current.pos[move2[0]]) current = best data = torch.FloatTensor(StateToImg(current)).unsqueeze(0) print(net(Variable(data)).data[0,0])" ]
[ "Language :: English', 'Programming Language :: Python :: 2', 'Programming Language :: Python", "+ '\\n\\n' + changelog, author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package in find_packages()", "Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python", "URI functions and template tags for Django\" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() readme", "Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language", "Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language", "= read('README.rst') changelog = read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme + '\\n\\n' +", "package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ], license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[", "url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package in find_packages() if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[", "Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved ::", ":: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language ::", "functions and template tags for Django\" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() readme =", "Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English',", "'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural", ":: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python ::", "return open(os.path.join(os.path.dirname(__file__), fname)).read() readme = read('README.rst') changelog = read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__,", "2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7',", "], license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework", "Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python", ":: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python ::", "zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: Django', 'Intended", "'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache", "author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package in find_packages() if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11',", "English', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6',", "Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python", "keywords='django-absoluteuri', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: Django', 'Intended Audience", "def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() readme = read('README.rst') changelog = read('CHANGELOG.rst') setup( name='django-absoluteuri',", "setup, find_packages __doc__ = \"Absolute URI functions and template tags for Django\" def", "- Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved", "import os from setuptools import setup, find_packages __doc__ = \"Absolute URI functions and", "'django-setuptest', ], license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development Status :: 5 - Production/Stable',", "if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ], license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri',", "in find_packages() if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ], license=\"Apache 2.0\",", "tags for Django\" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() readme = read('README.rst') changelog =", "python import os from setuptools import setup, find_packages __doc__ = \"Absolute URI functions", ":: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language ::", ":: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python ::", "'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming", "'\\n\\n' + changelog, author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package in find_packages() if", "Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package in find_packages() if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ],", "changelog = read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme + '\\n\\n' + changelog, author='Fusionbox,", "Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License',", "name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme + '\\n\\n' + changelog, author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package", "+ changelog, author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package in find_packages() if package.startswith('absoluteuri')],", ":: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language ::", ":: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language ::", "author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package in find_packages() if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite',", "package in find_packages() if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ], license=\"Apache", "Status :: 5 - Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers', 'License", "template tags for Django\" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() readme = read('README.rst') changelog", "read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme + '\\n\\n' + changelog, author='Fusionbox, Inc.', author_email='<EMAIL>',", "read('README.rst') changelog = read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme + '\\n\\n' + changelog,", "2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3',", "classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: Django', 'Intended Audience ::", "'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming", "license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework ::", "'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming", "'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming", "'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ],", "fname)).read() readme = read('README.rst') changelog = read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme +", ":: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language ::", "version='1.3.1.dev0', description=__doc__, long_description=readme + '\\n\\n' + changelog, author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for", "\"Absolute URI functions and template tags for Django\" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read()", "test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ], license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development Status :: 5", "description=__doc__, long_description=readme + '\\n\\n' + changelog, author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package", "install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ], license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development", ":: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software", "for Django\" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() readme = read('README.rst') changelog = read('CHANGELOG.rst')", "find_packages __doc__ = \"Absolute URI functions and template tags for Django\" def read(fname):", ":: 5 - Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers', 'License ::", "import setup, find_packages __doc__ = \"Absolute URI functions and template tags for Django\"", "Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], )", "'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ], license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development Status", "readme = read('README.rst') changelog = read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme + '\\n\\n'", "for package in find_packages() if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ],", "2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3',", "Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python", "Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language", "Software License', 'Natural Language :: English', 'Programming Language :: Python :: 2', 'Programming", ":: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python ::", "read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() readme = read('README.rst') changelog = read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0',", "open(os.path.join(os.path.dirname(__file__), fname)).read() readme = read('README.rst') changelog = read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme", "__doc__ = \"Absolute URI functions and template tags for Django\" def read(fname): return", "long_description=readme + '\\n\\n' + changelog, author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package in", "#!/usr/bin/env python import os from setuptools import setup, find_packages __doc__ = \"Absolute URI", "'Natural Language :: English', 'Programming Language :: Python :: 2', 'Programming Language ::", "5 - Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI", "= read('CHANGELOG.rst') setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme + '\\n\\n' + changelog, author='Fusionbox, Inc.',", "and template tags for Django\" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() readme = read('README.rst')", "setuptools import setup, find_packages __doc__ = \"Absolute URI functions and template tags for", "License', 'Natural Language :: English', 'Programming Language :: Python :: 2', 'Programming Language", "Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language", "'Development Status :: 5 - Production/Stable', 'Framework :: Django', 'Intended Audience :: Developers',", "tests_require=[ 'django-setuptest', ], license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development Status :: 5 -", "changelog, author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri', packages=[package for package in find_packages() if package.startswith('absoluteuri')], install_requires=[", "os from setuptools import setup, find_packages __doc__ = \"Absolute URI functions and template", "2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Framework :: Django',", "packages=[package for package in find_packages() if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest',", "OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language ::", "find_packages() if package.startswith('absoluteuri')], install_requires=[ 'Django>=1.11', ], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ], license=\"Apache 2.0\", zip_safe=True,", "Django\" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() readme = read('README.rst') changelog = read('CHANGELOG.rst') setup(", "<reponame>bashu/django-absoluteuri #!/usr/bin/env python import os from setuptools import setup, find_packages __doc__ = \"Absolute", "setup( name='django-absoluteuri', version='1.3.1.dev0', description=__doc__, long_description=readme + '\\n\\n' + changelog, author='Fusionbox, Inc.', author_email='<EMAIL>', url='https://github.com/fusionbox/django-absoluteuri',", ":: English', 'Programming Language :: Python :: 2', 'Programming Language :: Python ::", "Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python", "3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4',", "], test_suite='setuptest.setuptest.SetupTestSuite', tests_require=[ 'django-setuptest', ], license=\"Apache 2.0\", zip_safe=True, keywords='django-absoluteuri', classifiers=[ 'Development Status ::", "'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming", ":: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language", "from setuptools import setup, find_packages __doc__ = \"Absolute URI functions and template tags", "Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language", "= \"Absolute URI functions and template tags for Django\" def read(fname): return open(os.path.join(os.path.dirname(__file__),", "Apache Software License', 'Natural Language :: English', 'Programming Language :: Python :: 2',", ":: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python ::" ]
[ "+= ' ' + str(user) fout_items.write(line + '\\n') for user in sorted(users, key=lambda", "user, friend = [int(x.strip()) for x in line.split('\\t')] if user not in umap", "item_data[imap[item]].append(umap[user]) for line in finn: user, friend = [int(x.strip()) for x in line.split('\\t')]", "in fin: user, item, rating = [int(x.strip()) for x in line.split('\\t')] users[user].append(item) items.add(item)", "for item in items: imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\" % (item, imap[item])) fmap_users.close() fmap_items.close()", "user_data[umap[user]]: line += ' ' + str(item) fout_users.write(line + '\\n') line = str(len(item_data[len(imap)", "(item, imap[item])) fmap_users.close() fmap_items.close() user_data = defaultdict(list) item_data = defaultdict(list) for user in", "import defaultdict path = sys.argv[1] undir = (len(sys.argv) == 3) fin = open(path", "key=lambda x: imap[x]): line = str(len(item_data[imap[item]])) for user in item_data[imap[item]]: line += '", "fout_users.write(line + '\\n') line = str(len(item_data[len(imap) + umap[user]])) for user in item_data[len(imap) +", "imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\" % (item, imap[item])) fmap_users.close() fmap_items.close() user_data = defaultdict(list) item_data", "open(path +'/items_sorec.dat', 'w+') fout_users = open(path +'/users_sorec.dat', 'w+') users = defaultdict(list) items =", "imap = {} fmap_items = open(path +'/item_map_sorec.dat', 'w+') fmap_users = open(path +'/user_map_sorec.dat', 'w+')", "fmap_users.write(\"%d,%d\\n\" % (user, umap[user])) for item in items: imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\" %", "from collections import defaultdict path = sys.argv[1] undir = (len(sys.argv) == 3) fin", "'w+') fout_users = open(path +'/users_sorec.dat', 'w+') users = defaultdict(list) items = set() for", "in umap or friend not in umap: continue user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap) +", "item in items: imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\" % (item, imap[item])) fmap_users.close() fmap_items.close() user_data", "= defaultdict(list) item_data = defaultdict(list) for user in users: for item in users[user]:", "defaultdict(list) item_data = defaultdict(list) for user in users: for item in users[user]: user_data[umap[user]].append(imap[item])", "for user in users: for item in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line in", "in finn: user, friend = [int(x.strip()) for x in line.split('\\t')] if user not", "item, rating = [int(x.strip()) for x in line.split('\\t')] users[user].append(item) items.add(item) umap = {}", "umap[friend]].append(umap[user]) # undirected if undir: user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap) + umap[user]].append(umap[friend]) for item", "+ umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user]) # undirected if undir: user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap)", "users: for item in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line in finn: user, friend", "'\\n') line = str(len(item_data[len(imap) + umap[user]])) for user in item_data[len(imap) + umap[user]]: line", "items = set() for line in fin: user, item, rating = [int(x.strip()) for", "users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line in finn: user, friend = [int(x.strip()) for x", "= {} imap = {} fmap_items = open(path +'/item_map_sorec.dat', 'w+') fmap_users = open(path", "str(len(item_data[len(imap) + umap[user]])) for user in item_data[len(imap) + umap[user]]: line += ' '", "+'/items_sorec.dat', 'w+') fout_users = open(path +'/users_sorec.dat', 'w+') users = defaultdict(list) items = set()", "for line in fin: user, item, rating = [int(x.strip()) for x in line.split('\\t')]", "if undir: user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap) + umap[user]].append(umap[friend]) for item in sorted(items, key=lambda", "line.split('\\t')] if user not in umap or friend not in umap: continue user_data[umap[user]].append(len(imap)", "= defaultdict(list) items = set() for line in fin: user, item, rating =", "item_data[len(imap) + umap[user]].append(umap[friend]) for item in sorted(items, key=lambda x: imap[x]): line = str(len(item_data[imap[item]]))", "fmap_users.close() fmap_items.close() user_data = defaultdict(list) item_data = defaultdict(list) for user in users: for", "sys from collections import defaultdict path = sys.argv[1] undir = (len(sys.argv) == 3)", "user, item, rating = [int(x.strip()) for x in line.split('\\t')] users[user].append(item) items.add(item) umap =", "user in users: for item in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line in finn:", "= len(imap) fmap_items.write(\"%d,%d\\n\" % (item, imap[item])) fmap_users.close() fmap_items.close() user_data = defaultdict(list) item_data =", "= len(umap) fmap_users.write(\"%d,%d\\n\" % (user, umap[user])) for item in items: imap[item] = len(imap)", "open(path +'/user_map_sorec.dat', 'w+') # this should be the same for user in users:", "[int(x.strip()) for x in line.split('\\t')] if user not in umap or friend not", "for user in item_data[len(imap) + umap[user]]: line += ' ' + str(user) fout_items.write(line", "= {} fmap_items = open(path +'/item_map_sorec.dat', 'w+') fmap_users = open(path +'/user_map_sorec.dat', 'w+') #", "= defaultdict(list) for user in users: for item in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for", "friend not in umap: continue user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user]) # undirected", "in users: for item in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line in finn: user,", "str(item) fout_users.write(line + '\\n') line = str(len(item_data[len(imap) + umap[user]])) for user in item_data[len(imap)", "umap[user]) item_data[len(imap) + umap[user]].append(umap[friend]) for item in sorted(items, key=lambda x: imap[x]): line =", "+ str(item) fout_users.write(line + '\\n') line = str(len(item_data[len(imap) + umap[user]])) for user in", "line += ' ' + str(item) fout_users.write(line + '\\n') line = str(len(item_data[len(imap) +", "items.add(item) umap = {} imap = {} fmap_items = open(path +'/item_map_sorec.dat', 'w+') fmap_users", "' ' + str(user) fout_items.write(line + '\\n') for user in sorted(users, key=lambda x:", "= [int(x.strip()) for x in line.split('\\t')] if user not in umap or friend", "item_data[len(imap) + umap[user]]: line += ' ' + str(user) fout_items.write(line + '\\n') fout_items.close()", "x: umap[x]): line = str(len(user_data[umap[user]])) for item in user_data[umap[user]]: line += ' '", "% (item, imap[item])) fmap_users.close() fmap_items.close() user_data = defaultdict(list) item_data = defaultdict(list) for user", "in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line in finn: user, friend = [int(x.strip()) for", "= set() for line in fin: user, item, rating = [int(x.strip()) for x", "= (len(sys.argv) == 3) fin = open(path +'/train.tsv') finn = open(path +'/network.tsv') fout_items", "the same for user in users: umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\" % (user, umap[user]))", "(len(sys.argv) == 3) fin = open(path +'/train.tsv') finn = open(path +'/network.tsv') fout_items =", "user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line in finn: user, friend = [int(x.strip()) for x in", "umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user]) # undirected if undir: user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap) +", "fin: user, item, rating = [int(x.strip()) for x in line.split('\\t')] users[user].append(item) items.add(item) umap", "+'/network.tsv') fout_items = open(path +'/items_sorec.dat', 'w+') fout_users = open(path +'/users_sorec.dat', 'w+') users =", "= str(len(user_data[umap[user]])) for item in user_data[umap[user]]: line += ' ' + str(item) fout_users.write(line", "{} imap = {} fmap_items = open(path +'/item_map_sorec.dat', 'w+') fmap_users = open(path +'/user_map_sorec.dat',", "in user_data[umap[user]]: line += ' ' + str(item) fout_users.write(line + '\\n') line =", "open(path +'/users_sorec.dat', 'w+') users = defaultdict(list) items = set() for line in fin:", "+ umap[user]) item_data[len(imap) + umap[user]].append(umap[friend]) for item in sorted(items, key=lambda x: imap[x]): line", "user in users: umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\" % (user, umap[user])) for item in", "umap or friend not in umap: continue user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user])", "in umap: continue user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user]) # undirected if undir:", "+ umap[friend]].append(umap[user]) # undirected if undir: user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap) + umap[user]].append(umap[friend]) for", "undir: user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap) + umap[user]].append(umap[friend]) for item in sorted(items, key=lambda x:", "line = str(len(item_data[imap[item]])) for user in item_data[imap[item]]: line += ' ' + str(user)", "user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap) + umap[user]].append(umap[friend]) for item in sorted(items, key=lambda x: imap[x]):", "{} fmap_items = open(path +'/item_map_sorec.dat', 'w+') fmap_users = open(path +'/user_map_sorec.dat', 'w+') # this", "[int(x.strip()) for x in line.split('\\t')] users[user].append(item) items.add(item) umap = {} imap = {}", "<reponame>ajbc/spf import sys from collections import defaultdict path = sys.argv[1] undir = (len(sys.argv)", "this should be the same for user in users: umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\"", "x: imap[x]): line = str(len(item_data[imap[item]])) for user in item_data[imap[item]]: line += ' '", "item in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line in finn: user, friend = [int(x.strip())", "users: umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\" % (user, umap[user])) for item in items: imap[item]", "len(imap) fmap_items.write(\"%d,%d\\n\" % (item, imap[item])) fmap_users.close() fmap_items.close() user_data = defaultdict(list) item_data = defaultdict(list)", "undirected if undir: user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap) + umap[user]].append(umap[friend]) for item in sorted(items,", "item_data[imap[item]]: line += ' ' + str(user) fout_items.write(line + '\\n') for user in", "open(path +'/network.tsv') fout_items = open(path +'/items_sorec.dat', 'w+') fout_users = open(path +'/users_sorec.dat', 'w+') users", "line in fin: user, item, rating = [int(x.strip()) for x in line.split('\\t')] users[user].append(item)", "for item in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line in finn: user, friend =", "str(len(item_data[imap[item]])) for user in item_data[imap[item]]: line += ' ' + str(user) fout_items.write(line +", "key=lambda x: umap[x]): line = str(len(user_data[umap[user]])) for item in user_data[umap[user]]: line += '", "for user in users: umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\" % (user, umap[user])) for item", "open(path +'/item_map_sorec.dat', 'w+') fmap_users = open(path +'/user_map_sorec.dat', 'w+') # this should be the", "'\\n') for user in sorted(users, key=lambda x: umap[x]): line = str(len(user_data[umap[user]])) for item", "+= ' ' + str(item) fout_users.write(line + '\\n') line = str(len(item_data[len(imap) + umap[user]]))", "' ' + str(item) fout_users.write(line + '\\n') line = str(len(item_data[len(imap) + umap[user]])) for", "if user not in umap or friend not in umap: continue user_data[umap[user]].append(len(imap) +", "import sys from collections import defaultdict path = sys.argv[1] undir = (len(sys.argv) ==", "= str(len(item_data[len(imap) + umap[user]])) for user in item_data[len(imap) + umap[user]]: line += '", "umap[user]])) for user in item_data[len(imap) + umap[user]]: line += ' ' + str(user)", "' + str(user) fout_items.write(line + '\\n') for user in sorted(users, key=lambda x: umap[x]):", "fmap_users = open(path +'/user_map_sorec.dat', 'w+') # this should be the same for user", "fmap_items.close() user_data = defaultdict(list) item_data = defaultdict(list) for user in users: for item", "imap[item])) fmap_users.close() fmap_items.close() user_data = defaultdict(list) item_data = defaultdict(list) for user in users:", "fmap_items = open(path +'/item_map_sorec.dat', 'w+') fmap_users = open(path +'/user_map_sorec.dat', 'w+') # this should", "len(umap) fmap_users.write(\"%d,%d\\n\" % (user, umap[user])) for item in items: imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\"", "= open(path +'/network.tsv') fout_items = open(path +'/items_sorec.dat', 'w+') fout_users = open(path +'/users_sorec.dat', 'w+')", "str(len(user_data[umap[user]])) for item in user_data[umap[user]]: line += ' ' + str(item) fout_users.write(line +", "finn = open(path +'/network.tsv') fout_items = open(path +'/items_sorec.dat', 'w+') fout_users = open(path +'/users_sorec.dat',", "= open(path +'/user_map_sorec.dat', 'w+') # this should be the same for user in", "rating = [int(x.strip()) for x in line.split('\\t')] users[user].append(item) items.add(item) umap = {} imap", "x in line.split('\\t')] users[user].append(item) items.add(item) umap = {} imap = {} fmap_items =", "item in user_data[umap[user]]: line += ' ' + str(item) fout_users.write(line + '\\n') line", "path = sys.argv[1] undir = (len(sys.argv) == 3) fin = open(path +'/train.tsv') finn", "in item_data[imap[item]]: line += ' ' + str(user) fout_items.write(line + '\\n') for user", "umap: continue user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user]) # undirected if undir: user_data[umap[friend]].append(len(imap)", "== 3) fin = open(path +'/train.tsv') finn = open(path +'/network.tsv') fout_items = open(path", "+'/train.tsv') finn = open(path +'/network.tsv') fout_items = open(path +'/items_sorec.dat', 'w+') fout_users = open(path", "defaultdict(list) items = set() for line in fin: user, item, rating = [int(x.strip())", "user not in umap or friend not in umap: continue user_data[umap[user]].append(len(imap) + umap[friend])", "# this should be the same for user in users: umap[user] = len(umap)", "umap[user])) for item in items: imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\" % (item, imap[item])) fmap_users.close()", "# undirected if undir: user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap) + umap[user]].append(umap[friend]) for item in", "user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user]) # undirected if undir: user_data[umap[friend]].append(len(imap) + umap[user])", "for item in sorted(items, key=lambda x: imap[x]): line = str(len(item_data[imap[item]])) for user in", "be the same for user in users: umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\" % (user,", "fout_items = open(path +'/items_sorec.dat', 'w+') fout_users = open(path +'/users_sorec.dat', 'w+') users = defaultdict(list)", "umap[user]].append(umap[friend]) for item in sorted(items, key=lambda x: imap[x]): line = str(len(item_data[imap[item]])) for user", "line.split('\\t')] users[user].append(item) items.add(item) umap = {} imap = {} fmap_items = open(path +'/item_map_sorec.dat',", "x in line.split('\\t')] if user not in umap or friend not in umap:", "user_data = defaultdict(list) item_data = defaultdict(list) for user in users: for item in", "fout_items.write(line + '\\n') for user in sorted(users, key=lambda x: umap[x]): line = str(len(user_data[umap[user]]))", "continue user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user]) # undirected if undir: user_data[umap[friend]].append(len(imap) +", "= sys.argv[1] undir = (len(sys.argv) == 3) fin = open(path +'/train.tsv') finn =", "= open(path +'/train.tsv') finn = open(path +'/network.tsv') fout_items = open(path +'/items_sorec.dat', 'w+') fout_users", "fin = open(path +'/train.tsv') finn = open(path +'/network.tsv') fout_items = open(path +'/items_sorec.dat', 'w+')", "in item_data[len(imap) + umap[user]]: line += ' ' + str(user) fout_items.write(line + '\\n')", "item in sorted(items, key=lambda x: imap[x]): line = str(len(item_data[imap[item]])) for user in item_data[imap[item]]:", "% (user, umap[user])) for item in items: imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\" % (item,", "(user, umap[user])) for item in items: imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\" % (item, imap[item]))", "fmap_items.write(\"%d,%d\\n\" % (item, imap[item])) fmap_users.close() fmap_items.close() user_data = defaultdict(list) item_data = defaultdict(list) for", "sorted(users, key=lambda x: umap[x]): line = str(len(user_data[umap[user]])) for item in user_data[umap[user]]: line +=", "users[user].append(item) items.add(item) umap = {} imap = {} fmap_items = open(path +'/item_map_sorec.dat', 'w+')", "for item in user_data[umap[user]]: line += ' ' + str(item) fout_users.write(line + '\\n')", "' + str(item) fout_users.write(line + '\\n') line = str(len(item_data[len(imap) + umap[user]])) for user", "+ umap[user]])) for user in item_data[len(imap) + umap[user]]: line += ' ' +", "umap = {} imap = {} fmap_items = open(path +'/item_map_sorec.dat', 'w+') fmap_users =", "in sorted(users, key=lambda x: umap[x]): line = str(len(user_data[umap[user]])) for item in user_data[umap[user]]: line", "'w+') fmap_users = open(path +'/user_map_sorec.dat', 'w+') # this should be the same for", "in sorted(items, key=lambda x: imap[x]): line = str(len(item_data[imap[item]])) for user in item_data[imap[item]]: line", "collections import defaultdict path = sys.argv[1] undir = (len(sys.argv) == 3) fin =", "3) fin = open(path +'/train.tsv') finn = open(path +'/network.tsv') fout_items = open(path +'/items_sorec.dat',", "for line in finn: user, friend = [int(x.strip()) for x in line.split('\\t')] if", "line = str(len(user_data[umap[user]])) for item in user_data[umap[user]]: line += ' ' + str(item)", "+ umap[user]]: line += ' ' + str(user) fout_items.write(line + '\\n') fout_items.close() fout_users.close()", "for user in sorted(users, key=lambda x: umap[x]): line = str(len(user_data[umap[user]])) for item in", "sys.argv[1] undir = (len(sys.argv) == 3) fin = open(path +'/train.tsv') finn = open(path", "open(path +'/train.tsv') finn = open(path +'/network.tsv') fout_items = open(path +'/items_sorec.dat', 'w+') fout_users =", "defaultdict(list) for user in users: for item in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user]) for line", "= str(len(item_data[imap[item]])) for user in item_data[imap[item]]: line += ' ' + str(user) fout_items.write(line", "= open(path +'/users_sorec.dat', 'w+') users = defaultdict(list) items = set() for line in", "set() for line in fin: user, item, rating = [int(x.strip()) for x in", "not in umap or friend not in umap: continue user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap)", "+'/item_map_sorec.dat', 'w+') fmap_users = open(path +'/user_map_sorec.dat', 'w+') # this should be the same", "line = str(len(item_data[len(imap) + umap[user]])) for user in item_data[len(imap) + umap[user]]: line +=", "fout_users = open(path +'/users_sorec.dat', 'w+') users = defaultdict(list) items = set() for line", "item_data[len(imap) + umap[friend]].append(umap[user]) # undirected if undir: user_data[umap[friend]].append(len(imap) + umap[user]) item_data[len(imap) + umap[user]].append(umap[friend])", "umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\" % (user, umap[user])) for item in items: imap[item] =", "user in item_data[imap[item]]: line += ' ' + str(user) fout_items.write(line + '\\n') for", "undir = (len(sys.argv) == 3) fin = open(path +'/train.tsv') finn = open(path +'/network.tsv')", "line += ' ' + str(user) fout_items.write(line + '\\n') for user in sorted(users,", "for user in item_data[imap[item]]: line += ' ' + str(user) fout_items.write(line + '\\n')", "= open(path +'/item_map_sorec.dat', 'w+') fmap_users = open(path +'/user_map_sorec.dat', 'w+') # this should be", "same for user in users: umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\" % (user, umap[user])) for", "'w+') users = defaultdict(list) items = set() for line in fin: user, item,", "imap[x]): line = str(len(item_data[imap[item]])) for user in item_data[imap[item]]: line += ' ' +", "user in item_data[len(imap) + umap[user]]: line += ' ' + str(user) fout_items.write(line +", "'w+') # this should be the same for user in users: umap[user] =", "+ str(user) fout_items.write(line + '\\n') for user in sorted(users, key=lambda x: umap[x]): line", "+'/users_sorec.dat', 'w+') users = defaultdict(list) items = set() for line in fin: user,", "users = defaultdict(list) items = set() for line in fin: user, item, rating", "= [int(x.strip()) for x in line.split('\\t')] users[user].append(item) items.add(item) umap = {} imap =", "for x in line.split('\\t')] if user not in umap or friend not in", "in line.split('\\t')] if user not in umap or friend not in umap: continue", "not in umap: continue user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user]) # undirected if", "+'/user_map_sorec.dat', 'w+') # this should be the same for user in users: umap[user]", "umap[x]): line = str(len(user_data[umap[user]])) for item in user_data[umap[user]]: line += ' ' +", "item_data = defaultdict(list) for user in users: for item in users[user]: user_data[umap[user]].append(imap[item]) item_data[imap[item]].append(umap[user])", "+ '\\n') for user in sorted(users, key=lambda x: umap[x]): line = str(len(user_data[umap[user]])) for", "should be the same for user in users: umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\" %", "user in sorted(users, key=lambda x: umap[x]): line = str(len(user_data[umap[user]])) for item in user_data[umap[user]]:", "sorted(items, key=lambda x: imap[x]): line = str(len(item_data[imap[item]])) for user in item_data[imap[item]]: line +=", "+ '\\n') line = str(len(item_data[len(imap) + umap[user]])) for user in item_data[len(imap) + umap[user]]:", "defaultdict path = sys.argv[1] undir = (len(sys.argv) == 3) fin = open(path +'/train.tsv')", "friend = [int(x.strip()) for x in line.split('\\t')] if user not in umap or", "in items: imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\" % (item, imap[item])) fmap_users.close() fmap_items.close() user_data =", "+ umap[user]].append(umap[friend]) for item in sorted(items, key=lambda x: imap[x]): line = str(len(item_data[imap[item]])) for", "= open(path +'/items_sorec.dat', 'w+') fout_users = open(path +'/users_sorec.dat', 'w+') users = defaultdict(list) items", "items: imap[item] = len(imap) fmap_items.write(\"%d,%d\\n\" % (item, imap[item])) fmap_users.close() fmap_items.close() user_data = defaultdict(list)", "or friend not in umap: continue user_data[umap[user]].append(len(imap) + umap[friend]) item_data[len(imap) + umap[friend]].append(umap[user]) #", "finn: user, friend = [int(x.strip()) for x in line.split('\\t')] if user not in", "str(user) fout_items.write(line + '\\n') for user in sorted(users, key=lambda x: umap[x]): line =", "in line.split('\\t')] users[user].append(item) items.add(item) umap = {} imap = {} fmap_items = open(path", "line in finn: user, friend = [int(x.strip()) for x in line.split('\\t')] if user", "in users: umap[user] = len(umap) fmap_users.write(\"%d,%d\\n\" % (user, umap[user])) for item in items:", "for x in line.split('\\t')] users[user].append(item) items.add(item) umap = {} imap = {} fmap_items" ]
[ "\"Would not like to reveal\"), ], ) HomeState = models.CharField(max_length=300) Country = models.CharField(max_length=75,", "\"Female\"), (\"N\", \"Non-Binary\"), (\"W\", \"Would not like to reveal\"), ], ) HomeState =", "models.CharField(max_length=300) Department_Head = models.CharField(max_length=300) Department_ContactDetails = models.IntegerField() class Meta: verbose_name_plural = \"Contact Us\"", "models from pytz import country_names as c from datetime import date dict_choices =", "models.DurationField() Event_Descripton = models.TextField(null=False, default=\"Empty Description\") class Meta: verbose_name_plural = \"Events and Notices\"", "from pytz import country_names as c from datetime import date dict_choices = dict(c)", "= models.CharField(max_length=300) Country = models.CharField(max_length=75, choices=_choices) ContactNumber = models.BigIntegerField() class ContactUs(models.Model): Department_Name =", "Us\" class Events(models.Model): Event_Name = models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration = models.DurationField()", "Events(models.Model): Event_Name = models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration = models.DurationField() Event_Descripton =", "Department_Head = models.CharField(max_length=300) Department_ContactDetails = models.IntegerField() class Meta: verbose_name_plural = \"Contact Us\" class", "Meta: verbose_name_plural = \"Contact Us\" class Events(models.Model): Event_Name = models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile,", "(\"N\", \"Non-Binary\"), (\"W\", \"Would not like to reveal\"), ], ) HomeState = models.CharField(max_length=300)", "models.CharField(max_length=300) Department_ContactDetails = models.IntegerField() class Meta: verbose_name_plural = \"Contact Us\" class Events(models.Model): Event_Name", "Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration = models.DurationField() Event_Descripton = models.TextField(null=False, default=\"Empty Description\") class", "\"Male\"), (\"F\", \"Female\"), (\"N\", \"Non-Binary\"), (\"W\", \"Would not like to reveal\"), ], )", "ContactUs(models.Model): Department_Name = models.CharField(max_length=300) Department_Head = models.CharField(max_length=300) Department_ContactDetails = models.IntegerField() class Meta: verbose_name_plural", "= models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration = models.DurationField() Event_Descripton = models.TextField(null=False, default=\"Empty", "= list(dict_choices.values()) if len(_keys) == len(_value): for i in range(len(_keys)): a = [_keys[i],", "list(dict_choices.values()) if len(_keys) == len(_value): for i in range(len(_keys)): a = [_keys[i], _value[i]]", "\"Non-Binary\"), (\"W\", \"Would not like to reveal\"), ], ) HomeState = models.CharField(max_length=300) Country", "Name = models.CharField(max_length=300) Application_Number = models.BigIntegerField() Date_Of_Birth = models.DateField() Gender = models.CharField( max_length=30,", "import country_names as c from datetime import date dict_choices = dict(c) _choices =", "_choices = [] _keys = list(dict_choices.keys()) _value = list(dict_choices.values()) if len(_keys) == len(_value):", "Date_Of_Birth = models.DateField() Gender = models.CharField( max_length=30, choices=[ (\"M\", \"Male\"), (\"F\", \"Female\"), (\"N\",", "django.db import models from pytz import country_names as c from datetime import date", "= models.CharField(max_length=75, choices=_choices) ContactNumber = models.BigIntegerField() class ContactUs(models.Model): Department_Name = models.CharField(max_length=300) Department_Head =", "pytz import country_names as c from datetime import date dict_choices = dict(c) _choices", "= models.CharField( max_length=30, choices=[ (\"M\", \"Male\"), (\"F\", \"Female\"), (\"N\", \"Non-Binary\"), (\"W\", \"Would not", "a = [_keys[i], _value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model): Name = models.CharField(max_length=300) Application_Number = models.BigIntegerField()", "models.CharField(max_length=75, choices=_choices) ContactNumber = models.BigIntegerField() class ContactUs(models.Model): Department_Name = models.CharField(max_length=300) Department_Head = models.CharField(max_length=300)", "= dict(c) _choices = [] _keys = list(dict_choices.keys()) _value = list(dict_choices.values()) if len(_keys)", "[] _keys = list(dict_choices.keys()) _value = list(dict_choices.values()) if len(_keys) == len(_value): for i", "_choices.append(tuple(a)) class StudentProfile(models.Model): Name = models.CharField(max_length=300) Application_Number = models.BigIntegerField() Date_Of_Birth = models.DateField() Gender", "Application_Number = models.BigIntegerField() Date_Of_Birth = models.DateField() Gender = models.CharField( max_length=30, choices=[ (\"M\", \"Male\"),", "for i in range(len(_keys)): a = [_keys[i], _value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model): Name =", "like to reveal\"), ], ) HomeState = models.CharField(max_length=300) Country = models.CharField(max_length=75, choices=_choices) ContactNumber", "country_names as c from datetime import date dict_choices = dict(c) _choices = []", "models.BigIntegerField() class ContactUs(models.Model): Department_Name = models.CharField(max_length=300) Department_Head = models.CharField(max_length=300) Department_ContactDetails = models.IntegerField() class", "import models from pytz import country_names as c from datetime import date dict_choices", "(\"W\", \"Would not like to reveal\"), ], ) HomeState = models.CharField(max_length=300) Country =", ") HomeState = models.CharField(max_length=300) Country = models.CharField(max_length=75, choices=_choices) ContactNumber = models.BigIntegerField() class ContactUs(models.Model):", "= list(dict_choices.keys()) _value = list(dict_choices.values()) if len(_keys) == len(_value): for i in range(len(_keys)):", "Country = models.CharField(max_length=75, choices=_choices) ContactNumber = models.BigIntegerField() class ContactUs(models.Model): Department_Name = models.CharField(max_length=300) Department_Head", "= models.CharField(max_length=300) Department_Head = models.CharField(max_length=300) Department_ContactDetails = models.IntegerField() class Meta: verbose_name_plural = \"Contact", "class Events(models.Model): Event_Name = models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration = models.DurationField() Event_Descripton", "models.CharField(max_length=300) Application_Number = models.BigIntegerField() Date_Of_Birth = models.DateField() Gender = models.CharField( max_length=30, choices=[ (\"M\",", "StudentProfile(models.Model): Name = models.CharField(max_length=300) Application_Number = models.BigIntegerField() Date_Of_Birth = models.DateField() Gender = models.CharField(", "to reveal\"), ], ) HomeState = models.CharField(max_length=300) Country = models.CharField(max_length=75, choices=_choices) ContactNumber =", "len(_keys) == len(_value): for i in range(len(_keys)): a = [_keys[i], _value[i]] _choices.append(tuple(a)) class", "class ContactUs(models.Model): Department_Name = models.CharField(max_length=300) Department_Head = models.CharField(max_length=300) Department_ContactDetails = models.IntegerField() class Meta:", "list(dict_choices.keys()) _value = list(dict_choices.values()) if len(_keys) == len(_value): for i in range(len(_keys)): a", "from django.db import models from pytz import country_names as c from datetime import", "if len(_keys) == len(_value): for i in range(len(_keys)): a = [_keys[i], _value[i]] _choices.append(tuple(a))", "ContactNumber = models.BigIntegerField() class ContactUs(models.Model): Department_Name = models.CharField(max_length=300) Department_Head = models.CharField(max_length=300) Department_ContactDetails =", "models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration = models.DurationField() Event_Descripton = models.TextField(null=False, default=\"Empty Description\") class Meta: verbose_name_plural", "= models.CharField(max_length=300) Application_Number = models.BigIntegerField() Date_Of_Birth = models.DateField() Gender = models.CharField( max_length=30, choices=[", "= \"Contact Us\" class Events(models.Model): Event_Name = models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration", "\"Contact Us\" class Events(models.Model): Event_Name = models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration =", "_value = list(dict_choices.values()) if len(_keys) == len(_value): for i in range(len(_keys)): a =", "class StudentProfile(models.Model): Name = models.CharField(max_length=300) Application_Number = models.BigIntegerField() Date_Of_Birth = models.DateField() Gender =", "models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration = models.DurationField() Event_Descripton = models.TextField(null=False, default=\"Empty Description\")", "in range(len(_keys)): a = [_keys[i], _value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model): Name = models.CharField(max_length=300) Application_Number", "Department_ContactDetails = models.IntegerField() class Meta: verbose_name_plural = \"Contact Us\" class Events(models.Model): Event_Name =", "= models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration = models.DurationField() Event_Descripton = models.TextField(null=False, default=\"Empty Description\") class Meta:", "i in range(len(_keys)): a = [_keys[i], _value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model): Name = models.CharField(max_length=300)", "models.DateField() Gender = models.CharField( max_length=30, choices=[ (\"M\", \"Male\"), (\"F\", \"Female\"), (\"N\", \"Non-Binary\"), (\"W\",", "not like to reveal\"), ], ) HomeState = models.CharField(max_length=300) Country = models.CharField(max_length=75, choices=_choices)", "Event_Name = models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING) Event_Duration = models.DurationField() Event_Descripton = models.TextField(null=False,", "verbose_name_plural = \"Contact Us\" class Events(models.Model): Event_Name = models.CharField(max_length=50) Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING)", "_keys = list(dict_choices.keys()) _value = list(dict_choices.values()) if len(_keys) == len(_value): for i in", "_value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model): Name = models.CharField(max_length=300) Application_Number = models.BigIntegerField() Date_Of_Birth = models.DateField()", "= models.IntegerField() class Meta: verbose_name_plural = \"Contact Us\" class Events(models.Model): Event_Name = models.CharField(max_length=50)", "class Meta: verbose_name_plural = \"Contact Us\" class Events(models.Model): Event_Name = models.CharField(max_length=50) Event_Head =", "len(_value): for i in range(len(_keys)): a = [_keys[i], _value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model): Name", "HomeState = models.CharField(max_length=300) Country = models.CharField(max_length=75, choices=_choices) ContactNumber = models.BigIntegerField() class ContactUs(models.Model): Department_Name", "choices=_choices) ContactNumber = models.BigIntegerField() class ContactUs(models.Model): Department_Name = models.CharField(max_length=300) Department_Head = models.CharField(max_length=300) Department_ContactDetails", "= models.DurationField() Event_Descripton = models.TextField(null=False, default=\"Empty Description\") class Meta: verbose_name_plural = \"Events and", "], ) HomeState = models.CharField(max_length=300) Country = models.CharField(max_length=75, choices=_choices) ContactNumber = models.BigIntegerField() class", "c from datetime import date dict_choices = dict(c) _choices = [] _keys =", "[_keys[i], _value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model): Name = models.CharField(max_length=300) Application_Number = models.BigIntegerField() Date_Of_Birth =", "== len(_value): for i in range(len(_keys)): a = [_keys[i], _value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model):", "choices=[ (\"M\", \"Male\"), (\"F\", \"Female\"), (\"N\", \"Non-Binary\"), (\"W\", \"Would not like to reveal\"),", "Gender = models.CharField( max_length=30, choices=[ (\"M\", \"Male\"), (\"F\", \"Female\"), (\"N\", \"Non-Binary\"), (\"W\", \"Would", "reveal\"), ], ) HomeState = models.CharField(max_length=300) Country = models.CharField(max_length=75, choices=_choices) ContactNumber = models.BigIntegerField()", "Department_Name = models.CharField(max_length=300) Department_Head = models.CharField(max_length=300) Department_ContactDetails = models.IntegerField() class Meta: verbose_name_plural =", "datetime import date dict_choices = dict(c) _choices = [] _keys = list(dict_choices.keys()) _value", "on_delete=models.DO_NOTHING) Event_Duration = models.DurationField() Event_Descripton = models.TextField(null=False, default=\"Empty Description\") class Meta: verbose_name_plural =", "date dict_choices = dict(c) _choices = [] _keys = list(dict_choices.keys()) _value = list(dict_choices.values())", "= [_keys[i], _value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model): Name = models.CharField(max_length=300) Application_Number = models.BigIntegerField() Date_Of_Birth", "models.IntegerField() class Meta: verbose_name_plural = \"Contact Us\" class Events(models.Model): Event_Name = models.CharField(max_length=50) Event_Head", "Event_Duration = models.DurationField() Event_Descripton = models.TextField(null=False, default=\"Empty Description\") class Meta: verbose_name_plural = \"Events", "from datetime import date dict_choices = dict(c) _choices = [] _keys = list(dict_choices.keys())", "max_length=30, choices=[ (\"M\", \"Male\"), (\"F\", \"Female\"), (\"N\", \"Non-Binary\"), (\"W\", \"Would not like to", "= models.DateField() Gender = models.CharField( max_length=30, choices=[ (\"M\", \"Male\"), (\"F\", \"Female\"), (\"N\", \"Non-Binary\"),", "models.BigIntegerField() Date_Of_Birth = models.DateField() Gender = models.CharField( max_length=30, choices=[ (\"M\", \"Male\"), (\"F\", \"Female\"),", "import date dict_choices = dict(c) _choices = [] _keys = list(dict_choices.keys()) _value =", "= models.BigIntegerField() class ContactUs(models.Model): Department_Name = models.CharField(max_length=300) Department_Head = models.CharField(max_length=300) Department_ContactDetails = models.IntegerField()", "(\"M\", \"Male\"), (\"F\", \"Female\"), (\"N\", \"Non-Binary\"), (\"W\", \"Would not like to reveal\"), ],", "= models.CharField(max_length=300) Department_ContactDetails = models.IntegerField() class Meta: verbose_name_plural = \"Contact Us\" class Events(models.Model):", "models.CharField(max_length=300) Country = models.CharField(max_length=75, choices=_choices) ContactNumber = models.BigIntegerField() class ContactUs(models.Model): Department_Name = models.CharField(max_length=300)", "dict_choices = dict(c) _choices = [] _keys = list(dict_choices.keys()) _value = list(dict_choices.values()) if", "dict(c) _choices = [] _keys = list(dict_choices.keys()) _value = list(dict_choices.values()) if len(_keys) ==", "= models.BigIntegerField() Date_Of_Birth = models.DateField() Gender = models.CharField( max_length=30, choices=[ (\"M\", \"Male\"), (\"F\",", "range(len(_keys)): a = [_keys[i], _value[i]] _choices.append(tuple(a)) class StudentProfile(models.Model): Name = models.CharField(max_length=300) Application_Number =", "models.CharField( max_length=30, choices=[ (\"M\", \"Male\"), (\"F\", \"Female\"), (\"N\", \"Non-Binary\"), (\"W\", \"Would not like", "(\"F\", \"Female\"), (\"N\", \"Non-Binary\"), (\"W\", \"Would not like to reveal\"), ], ) HomeState", "= [] _keys = list(dict_choices.keys()) _value = list(dict_choices.values()) if len(_keys) == len(_value): for", "as c from datetime import date dict_choices = dict(c) _choices = [] _keys" ]
[ "= content_type.get_object_for_this_type(pk=object_id) like, is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if is_created: logger.info(\"like_created\", content_type_id=content_type.pk,", "as a favorite for the current user \"\"\" result = { \"success\": False,", "@never_cache @csrf_exempt def json_set_like(request, content_type_id, object_id): \"\"\" Sets the object as a favorite", "django.contrib.contenttypes.models import ContentType from django.http import JsonResponse from django.views.decorators.cache import never_cache from django.views.decorators.csrf", "import liked_count logger = structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def json_set_like(request, content_type_id, object_id): \"\"\" Sets", "object as a favorite for the current user \"\"\" result = { \"success\":", "<reponame>PacktPublishing/Django-3-Web-Development-Cookbook import structlog from django.contrib.contenttypes.models import ContentType from django.http import JsonResponse from django.views.decorators.cache", "content_type_id=content_type.pk, object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result = { \"success\": True, \"action\":", "request.user.is_authenticated and request.method == \"POST\": content_type = ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) like, is_created", "\"POST\": content_type = ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) like, is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk,", "the object as a favorite for the current user \"\"\" result = {", "object_id=obj.pk) result = { \"success\": True, \"action\": \"add\" if is_created else \"remove\", \"count\":", "= { \"success\": False, } if request.user.is_authenticated and request.method == \"POST\": content_type =", "logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result = { \"success\": True, \"action\": \"add\" if is_created else", "from django.contrib.contenttypes.models import ContentType from django.http import JsonResponse from django.views.decorators.cache import never_cache from", "json_set_like(request, content_type_id, object_id): \"\"\" Sets the object as a favorite for the current", "\"\"\" result = { \"success\": False, } if request.user.is_authenticated and request.method == \"POST\":", "favorite for the current user \"\"\" result = { \"success\": False, } if", "like, is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else:", "result = { \"success\": True, \"action\": \"add\" if is_created else \"remove\", \"count\": liked_count(obj),", "from django.http import JsonResponse from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt from", "import csrf_exempt from .models import Like from .templatetags.likes_tags import liked_count logger = structlog.get_logger(\"django_structlog\")", "obj = content_type.get_object_for_this_type(pk=object_id) like, is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if is_created: logger.info(\"like_created\",", "\"success\": False, } if request.user.is_authenticated and request.method == \"POST\": content_type = ContentType.objects.get(id=content_type_id) obj", "result = { \"success\": False, } if request.user.is_authenticated and request.method == \"POST\": content_type", "django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt from .models import Like from .templatetags.likes_tags", "{ \"success\": True, \"action\": \"add\" if is_created else \"remove\", \"count\": liked_count(obj), } return", "structlog from django.contrib.contenttypes.models import ContentType from django.http import JsonResponse from django.views.decorators.cache import never_cache", "== \"POST\": content_type = ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) like, is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj),", "is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else: like.delete()", "object_id): \"\"\" Sets the object as a favorite for the current user \"\"\"", "from django.views.decorators.csrf import csrf_exempt from .models import Like from .templatetags.likes_tags import liked_count logger", "if is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result = {", "is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result = { \"success\":", "like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result = { \"success\": True, \"action\": \"add\" if is_created", "import Like from .templatetags.likes_tags import liked_count logger = structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def json_set_like(request,", "import JsonResponse from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt from .models import", "= ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) like, is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if", "content_type_id, object_id): \"\"\" Sets the object as a favorite for the current user", "= Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\",", "\"\"\" Sets the object as a favorite for the current user \"\"\" result", "import structlog from django.contrib.contenttypes.models import ContentType from django.http import JsonResponse from django.views.decorators.cache import", "import never_cache from django.views.decorators.csrf import csrf_exempt from .models import Like from .templatetags.likes_tags import", "from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt from .models import Like from", "from .models import Like from .templatetags.likes_tags import liked_count logger = structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt", "def json_set_like(request, content_type_id, object_id): \"\"\" Sets the object as a favorite for the", "{ \"success\": False, } if request.user.is_authenticated and request.method == \"POST\": content_type = ContentType.objects.get(id=content_type_id)", "content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk)", "Sets the object as a favorite for the current user \"\"\" result =", "= { \"success\": True, \"action\": \"add\" if is_created else \"remove\", \"count\": liked_count(obj), }", "and request.method == \"POST\": content_type = ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) like, is_created =", "logger = structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def json_set_like(request, content_type_id, object_id): \"\"\" Sets the object", "logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result = { \"success\": True,", "= structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def json_set_like(request, content_type_id, object_id): \"\"\" Sets the object as", "} if request.user.is_authenticated and request.method == \"POST\": content_type = ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id)", "content_type.get_object_for_this_type(pk=object_id) like, is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk)", "content_type_id=content_type.pk, object_id=obj.pk) result = { \"success\": True, \"action\": \"add\" if is_created else \"remove\",", "never_cache from django.views.decorators.csrf import csrf_exempt from .models import Like from .templatetags.likes_tags import liked_count", "content_type = ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) like, is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user)", "for the current user \"\"\" result = { \"success\": False, } if request.user.is_authenticated", "request.method == \"POST\": content_type = ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) like, is_created = Like.objects.get_or_create(", "False, } if request.user.is_authenticated and request.method == \"POST\": content_type = ContentType.objects.get(id=content_type_id) obj =", "liked_count logger = structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def json_set_like(request, content_type_id, object_id): \"\"\" Sets the", "\"success\": True, \"action\": \"add\" if is_created else \"remove\", \"count\": liked_count(obj), } return JsonResponse(result)", "structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def json_set_like(request, content_type_id, object_id): \"\"\" Sets the object as a", "user \"\"\" result = { \"success\": False, } if request.user.is_authenticated and request.method ==", "object_id=obj.pk, user=request.user) if is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result", ".models import Like from .templatetags.likes_tags import liked_count logger = structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def", "django.http import JsonResponse from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt from .models", "else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result = { \"success\": True, \"action\": \"add\" if", "ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) like, is_created = Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if is_created:", "if request.user.is_authenticated and request.method == \"POST\": content_type = ContentType.objects.get(id=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) like,", "import ContentType from django.http import JsonResponse from django.views.decorators.cache import never_cache from django.views.decorators.csrf import", "the current user \"\"\" result = { \"success\": False, } if request.user.is_authenticated and", "current user \"\"\" result = { \"success\": False, } if request.user.is_authenticated and request.method", "from .templatetags.likes_tags import liked_count logger = structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def json_set_like(request, content_type_id, object_id):", ".templatetags.likes_tags import liked_count logger = structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def json_set_like(request, content_type_id, object_id): \"\"\"", "object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result = { \"success\": True, \"action\": \"add\"", "ContentType from django.http import JsonResponse from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt", "@csrf_exempt def json_set_like(request, content_type_id, object_id): \"\"\" Sets the object as a favorite for", "Like.objects.get_or_create( content_type=ContentType.objects.get_for_model(obj), object_id=obj.pk, user=request.user) if is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk,", "JsonResponse from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt from .models import Like", "csrf_exempt from .models import Like from .templatetags.likes_tags import liked_count logger = structlog.get_logger(\"django_structlog\") @never_cache", "Like from .templatetags.likes_tags import liked_count logger = structlog.get_logger(\"django_structlog\") @never_cache @csrf_exempt def json_set_like(request, content_type_id,", "django.views.decorators.csrf import csrf_exempt from .models import Like from .templatetags.likes_tags import liked_count logger =", "user=request.user) if is_created: logger.info(\"like_created\", content_type_id=content_type.pk, object_id=obj.pk) else: like.delete() logger.info(\"like_deleted\", content_type_id=content_type.pk, object_id=obj.pk) result =", "a favorite for the current user \"\"\" result = { \"success\": False, }" ]
[ "model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ), migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o", "Distinción'), ), migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'), ), migrations.AddField(", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('parametros', '0002_empleos'), ('mainUser',", "verbose_name='Fecha de culminación de logro'), ), migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ),", "model_name='educacion', name='TipoEstu', ), migrations.RemoveField( model_name='experiencia', name='CargExpe', ), migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField( model_name='logros', name='NombTiLo',", "), migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de Habilidad: '), ), migrations.AddField(", "migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del logro'), ), migrations.AddField( model_name='usuarios', name='EmprExpe',", "field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'), ), migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True,", "null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'), ), migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo", "Logro'), ), migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'), ),", "= [ ('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ] operations = [ migrations.RemoveField( model_name='educacion', name='TipoEstu',", "by Django 2.2.2 on 2019-06-25 16:20 import ckeditor.fields from django.db import migrations, models", "field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ), migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de Habilidad:", "), migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de culminación de logro'), ), migrations.AddField( model_name='usuarios',", "field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del logro'), ), migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'),", "migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de Habilidad: '), ), migrations.AddField( model_name='usuarios',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'),", "name='NiveHabil', field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de Habilidad: '), ), migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='',", "Habilidad: '), ), migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios',", "migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True,", "), migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'), ), migrations.DeleteModel( name='Educacion', ),", "model_name='experiencia', name='CargExpe', ), migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField( model_name='logros', name='NombTiLo', ), migrations.AddField( model_name='usuarios', name='CargExpe',", "name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ), migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'),", "logro'), ), migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ), migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30,", "), migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'), ), migrations.AddField( model_name='usuarios', name='DescLogr',", "name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ), migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de", "), migrations.RemoveField( model_name='experiencia', name='CargExpe', ), migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField( model_name='logros', name='NombTiLo', ), migrations.AddField(", "), migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'), ), migrations.AddField( model_name='usuarios',", "model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de Habilidad: '), ), migrations.AddField( model_name='usuarios', name='NombHabil',", "de estudio'), ), migrations.DeleteModel( name='Educacion', ), migrations.DeleteModel( name='Experiencia', ), migrations.DeleteModel( name='Logros', ), ]", "field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ), migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'), ),", "field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'), ), migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas", "migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de", "verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'), ), migrations.AddField( model_name='usuarios',", "), migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'), ), migrations.AddField( model_name='usuarios', name='NombTiLo',", "), migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField( model_name='logros', name='NombTiLo', ), migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "<gh_stars>0 # Generated by Django 2.2.2 on 2019-06-25 16:20 import ckeditor.fields from django.db", "verbose_name='Logro o Distinción'), ), migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'),", "verbose_name='Logros Alcanzados'), ), migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de Habilidad: '),", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('parametros',", "model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'), ), migrations.AddField( model_name='usuarios', name='TituloEst',", "model_name='logros', name='NombTiLo', ), migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'), ), migrations.AddField(", "model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro", "field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de Habilidad: '), ), migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100,", "field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'), ), migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ),", "name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'), ), migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250,", "migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'), ), migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True,", "migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'), ), migrations.AddField( model_name='usuarios',", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ] operations =", "on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'), ), migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu',", "# Generated by Django 2.2.2 on 2019-06-25 16:20 import ckeditor.fields from django.db import", "on 2019-06-25 16:20 import ckeditor.fields from django.db import migrations, models import django.db.models.deletion class", "model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del logro'), ), migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150,", "culminación de logro'), ), migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ), migrations.AddField( model_name='usuarios',", "verbose_name='Titulo de estudio'), ), migrations.DeleteModel( name='Educacion', ), migrations.DeleteModel( name='Experiencia', ), migrations.DeleteModel( name='Logros', ),", "name='NombHabil', field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro o", "null=True, verbose_name='Nivel de Habilidad: '), ), migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'),", "migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField( model_name='logros', name='NombTiLo', ), migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos',", "to='parametros.TipoLogr', verbose_name='Tipo de Logro'), ), migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo", "caracteristicas del logro'), ), migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios',", "'), ), migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios', name='NombLogr',", "Migration(migrations.Migration): dependencies = [ ('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ] operations = [ migrations.RemoveField(", "verbose_name='Nivel de Habilidad: '), ), migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'), ),", "2.2.2 on 2019-06-25 16:20 import ckeditor.fields from django.db import migrations, models import django.db.models.deletion", "model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'), ), migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='',", "Ocupado'), ), migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del logro'), ), migrations.AddField(", "to='parametros.Empleos', verbose_name='Cargo Ocupado'), ), migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del logro'),", "field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de culminación de logro'),", "null=True, verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'), ), migrations.AddField(", "), migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100,", "migrations.RemoveField( model_name='experiencia', name='CargExpe', ), migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField( model_name='logros', name='NombTiLo', ), migrations.AddField( model_name='usuarios',", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ] operations", "migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'), ), migrations.DeleteModel( name='Educacion', ), migrations.DeleteModel(", "null=True, verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de culminación de logro'), ),", "de Habilidad: '), ), migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'), ), migrations.AddField(", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ]", "name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'), ), migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'),", "logro'), ), migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True,", "name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de culminación de", "migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ), migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución", "verbose_name='Cargo Ocupado'), ), migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del logro'), ),", "('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ] operations = [ migrations.RemoveField( model_name='educacion', name='TipoEstu', ), migrations.RemoveField(", "), migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del logro'), ), migrations.AddField( model_name='usuarios',", "migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'), ), migrations.AddField( model_name='usuarios', name='TipoEstu',", "name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'), ), migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr',", "dependencies = [ ('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ] operations = [ migrations.RemoveField( model_name='educacion',", "null=True, verbose_name='Titulo de estudio'), ), migrations.DeleteModel( name='Educacion', ), migrations.DeleteModel( name='Experiencia', ), migrations.DeleteModel( name='Logros',", "('mainUser', '0028_auto_20190624_1815'), ] operations = [ migrations.RemoveField( model_name='educacion', name='TipoEstu', ), migrations.RemoveField( model_name='experiencia', name='CargExpe',", "name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'), ), migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True,", "name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del logro'), ), migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True,", "Generated by Django 2.2.2 on 2019-06-25 16:20 import ckeditor.fields from django.db import migrations,", "name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de culminación de logro'), ), migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones", "de logro'), ), migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ), migrations.AddField( model_name='usuarios', name='Instituto',", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('parametros', '0002_empleos'),", "model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'), ), migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción", "migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'), ), migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True,", "on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'), ), migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo de", "'0028_auto_20190624_1815'), ] operations = [ migrations.RemoveField( model_name='educacion', name='TipoEstu', ), migrations.RemoveField( model_name='experiencia', name='CargExpe', ),", "model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ), migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel", "[ migrations.RemoveField( model_name='educacion', name='TipoEstu', ), migrations.RemoveField( model_name='experiencia', name='CargExpe', ), migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField(", "), migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'), ), migrations.AddField(", "), migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha", "] operations = [ migrations.RemoveField( model_name='educacion', name='TipoEstu', ), migrations.RemoveField( model_name='experiencia', name='CargExpe', ), migrations.DeleteModel(", "ckeditor.fields from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'), ), migrations.DeleteModel( name='Educacion', ), migrations.DeleteModel( name='Experiencia', ),", "2019-06-25 16:20 import ckeditor.fields from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "class Migration(migrations.Migration): dependencies = [ ('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ] operations = [", "Django 2.2.2 on 2019-06-25 16:20 import ckeditor.fields from django.db import migrations, models import", "o Distinción'), ), migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'), ),", "[ ('parametros', '0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ] operations = [ migrations.RemoveField( model_name='educacion', name='TipoEstu', ),", "migrations.RemoveField( model_name='educacion', name='TipoEstu', ), migrations.RemoveField( model_name='experiencia', name='CargExpe', ), migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField( model_name='logros',", "max_length=100, null=True, verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'), ),", "de educación'), ), migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'), ), migrations.DeleteModel(", "), migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ), migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20,", "to='parametros.TipoEstu', verbose_name='Tipo de educación'), ), migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'),", "Academia'), ), migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ), migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='',", "model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'), ), migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros", "verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de culminación de logro'), ), migrations.AddField(", "model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de culminación de logro'), ), migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True,", "16:20 import ckeditor.fields from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "), migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ), migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True,", "field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de Logro'), ), migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE,", "operations = [ migrations.RemoveField( model_name='educacion', name='TipoEstu', ), migrations.RemoveField( model_name='experiencia', name='CargExpe', ), migrations.DeleteModel( name='Habilidades',", "'0002_empleos'), ('mainUser', '0028_auto_20190624_1815'), ] operations = [ migrations.RemoveField( model_name='educacion', name='TipoEstu', ), migrations.RemoveField( model_name='experiencia',", "verbose_name='Tipo de educación'), ), migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'), ),", "max_length=20, null=True, verbose_name='Nivel de Habilidad: '), ), migrations.AddField( model_name='usuarios', name='NombHabil', field=models.CharField(default='', max_length=100, null=True,", "model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'), ), migrations.DeleteModel( name='Educacion', ), migrations.DeleteModel( name='Experiencia',", "on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'), ), migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o caracteristicas del", "del logro'), ), migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios', name='FechLogr',", "name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'), ), migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True, verbose_name='Descripción o", "field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'), ), migrations.DeleteModel( name='Educacion', ), migrations.DeleteModel( name='Experiencia', ), migrations.DeleteModel(", "model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'), ), migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'), ), migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo", "desempeñadas'), ), migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'), ), migrations.AddField( model_name='usuarios',", "verbose_name='Tipo de Logro'), ), migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de", "null=True, verbose_name='Logro o Distinción'), ), migrations.AddField( model_name='usuarios', name='NombTiLo', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoLogr', verbose_name='Tipo de", "), migrations.RemoveField( model_name='logros', name='NombTiLo', ), migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'),", "model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ), migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de culminación", "migrations.AddField( model_name='usuarios', name='FechLogr', field=models.DateField(null=True, verbose_name='Fecha de culminación de logro'), ), migrations.AddField( model_name='usuarios', name='FuncionE',", "field=models.DateField(null=True, verbose_name='Fecha de culminación de logro'), ), migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'),", "de Logro'), ), migrations.AddField( model_name='usuarios', name='TipoEstu', field=models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.TipoEstu', verbose_name='Tipo de educación'),", "de culminación de logro'), ), migrations.AddField( model_name='usuarios', name='FuncionE', field=ckeditor.fields.RichTextField(null=True, verbose_name='Funciones desempeñadas'), ), migrations.AddField(", "verbose_name='Funciones desempeñadas'), ), migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'), ), migrations.AddField(", "), migrations.AddField( model_name='usuarios', name='Instituto', field=models.CharField(max_length=30, null=True, verbose_name='Inistitución o Academia'), ), migrations.AddField( model_name='usuarios', name='LogrExpe',", "migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'), ), migrations.AddField( model_name='usuarios', name='DescLogr', field=ckeditor.fields.RichTextField(null=True,", "educación'), ), migrations.AddField( model_name='usuarios', name='TituloEst', field=models.CharField(max_length=250, null=True, verbose_name='Titulo de estudio'), ), migrations.DeleteModel( name='Educacion',", "null=True, verbose_name='Inistitución o Academia'), ), migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ), migrations.AddField(", "verbose_name='Inistitución o Academia'), ), migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ), migrations.AddField( model_name='usuarios',", "name='Habilidades', ), migrations.RemoveField( model_name='logros', name='NombTiLo', ), migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo", "name='CargExpe', ), migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField( model_name='logros', name='NombTiLo', ), migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True,", "verbose_name='Descripción o caracteristicas del logro'), ), migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ),", "name='TipoEstu', ), migrations.RemoveField( model_name='experiencia', name='CargExpe', ), migrations.DeleteModel( name='Habilidades', ), migrations.RemoveField( model_name='logros', name='NombTiLo', ),", "name='NombTiLo', ), migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'), ), migrations.AddField( model_name='usuarios',", "= [ migrations.RemoveField( model_name='educacion', name='TipoEstu', ), migrations.RemoveField( model_name='experiencia', name='CargExpe', ), migrations.DeleteModel( name='Habilidades', ),", "import ckeditor.fields from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ), migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20, null=True,", "migrations.RemoveField( model_name='logros', name='NombTiLo', ), migrations.AddField( model_name='usuarios', name='CargExpe', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='parametros.Empleos', verbose_name='Cargo Ocupado'), ),", "o Academia'), ), migrations.AddField( model_name='usuarios', name='LogrExpe', field=ckeditor.fields.RichTextField(null=True, verbose_name='Logros Alcanzados'), ), migrations.AddField( model_name='usuarios', name='NiveHabil',", "field=models.CharField(default='', max_length=100, null=True, verbose_name='Habilidad'), ), migrations.AddField( model_name='usuarios', name='NombLogr', field=models.CharField(max_length=100, null=True, verbose_name='Logro o Distinción'),", "o caracteristicas del logro'), ), migrations.AddField( model_name='usuarios', name='EmprExpe', field=models.CharField(max_length=150, null=True, verbose_name='Empresa'), ), migrations.AddField(", "Alcanzados'), ), migrations.AddField( model_name='usuarios', name='NiveHabil', field=models.CharField(default='', max_length=20, null=True, verbose_name='Nivel de Habilidad: '), )," ]
[ "__name__ == '__main__': # Generate vectors (with index) output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED) # Save", "import Doc2VecVectorizer from nnframework.data_builder import DataBuilder import pandas as pd import constants as", "np def generate_d2v_vectors(source_file): df = pd.read_csv(source_file) messages = df[\"Message\"].values vectorizer = Doc2VecVectorizer() vectors", "= vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors] if __name__ == '__main__': # Generate vectors (with", "'__main__': # Generate vectors (with index) output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED) # Save vectors as", "= Doc2VecVectorizer() vectors = vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors] if __name__ == '__main__': #", "(with index) output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED) # Save vectors as npy file np.save(const.FILE_DOC2VEC_INPUTS_UNLABELLED, output)", "def generate_d2v_vectors(source_file): df = pd.read_csv(source_file) messages = df[\"Message\"].values vectorizer = Doc2VecVectorizer() vectors =", "pd import constants as const import numpy as np def generate_d2v_vectors(source_file): df =", "vectors = vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors] if __name__ == '__main__': # Generate vectors", "import constants as const import numpy as np def generate_d2v_vectors(source_file): df = pd.read_csv(source_file)", "as pd import constants as const import numpy as np def generate_d2v_vectors(source_file): df", "from nnframework.data_builder import DataBuilder import pandas as pd import constants as const import", "vectorizer = Doc2VecVectorizer() vectors = vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors] if __name__ == '__main__':", "if __name__ == '__main__': # Generate vectors (with index) output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED) #", "import pandas as pd import constants as const import numpy as np def", "DataBuilder import pandas as pd import constants as const import numpy as np", "preprocessing.vectorizers import Doc2VecVectorizer from nnframework.data_builder import DataBuilder import pandas as pd import constants", "constants as const import numpy as np def generate_d2v_vectors(source_file): df = pd.read_csv(source_file) messages", "as const import numpy as np def generate_d2v_vectors(source_file): df = pd.read_csv(source_file) messages =", "const import numpy as np def generate_d2v_vectors(source_file): df = pd.read_csv(source_file) messages = df[\"Message\"].values", "df = pd.read_csv(source_file) messages = df[\"Message\"].values vectorizer = Doc2VecVectorizer() vectors = vectorizer.vectorize(messages) return", "generate_d2v_vectors(source_file): df = pd.read_csv(source_file) messages = df[\"Message\"].values vectorizer = Doc2VecVectorizer() vectors = vectorizer.vectorize(messages)", "Doc2VecVectorizer() vectors = vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors] if __name__ == '__main__': # Generate", "vectors] if __name__ == '__main__': # Generate vectors (with index) output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED)", "# Generate vectors (with index) output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED) # Save vectors as npy", "= pd.read_csv(source_file) messages = df[\"Message\"].values vectorizer = Doc2VecVectorizer() vectors = vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values,", "messages = df[\"Message\"].values vectorizer = Doc2VecVectorizer() vectors = vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors] if", "return np.c_[df.iloc[:,0].values, vectors] if __name__ == '__main__': # Generate vectors (with index) output", "df[\"Message\"].values vectorizer = Doc2VecVectorizer() vectors = vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors] if __name__ ==", "np.c_[df.iloc[:,0].values, vectors] if __name__ == '__main__': # Generate vectors (with index) output =", "as np def generate_d2v_vectors(source_file): df = pd.read_csv(source_file) messages = df[\"Message\"].values vectorizer = Doc2VecVectorizer()", "= df[\"Message\"].values vectorizer = Doc2VecVectorizer() vectors = vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors] if __name__", "== '__main__': # Generate vectors (with index) output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED) # Save vectors", "vectors (with index) output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED) # Save vectors as npy file np.save(const.FILE_DOC2VEC_INPUTS_UNLABELLED,", "Doc2VecVectorizer from nnframework.data_builder import DataBuilder import pandas as pd import constants as const", "numpy as np def generate_d2v_vectors(source_file): df = pd.read_csv(source_file) messages = df[\"Message\"].values vectorizer =", "Generate vectors (with index) output = generate_d2v_vectors(const.FILE_UNIQUE_UNLABELLED) # Save vectors as npy file", "from preprocessing.vectorizers import Doc2VecVectorizer from nnframework.data_builder import DataBuilder import pandas as pd import", "nnframework.data_builder import DataBuilder import pandas as pd import constants as const import numpy", "import numpy as np def generate_d2v_vectors(source_file): df = pd.read_csv(source_file) messages = df[\"Message\"].values vectorizer", "pd.read_csv(source_file) messages = df[\"Message\"].values vectorizer = Doc2VecVectorizer() vectors = vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors]", "pandas as pd import constants as const import numpy as np def generate_d2v_vectors(source_file):", "import DataBuilder import pandas as pd import constants as const import numpy as", "vectorizer.vectorize(messages) return np.c_[df.iloc[:,0].values, vectors] if __name__ == '__main__': # Generate vectors (with index)" ]
[ "is enabled (it’s on by default), but can only be cleaned up if", "stack frame alive); or a reference to the object on the # stack", "frame of a function that caught an exception (the traceback # stored in", "the derived class’s __del__() method, # if any, must explicitly call it to", "a base class has a __del__() method, the derived class’s __del__() method, #", "enabled (it’s on by default), but can only be cleaned up if there", "involved. Refer to the documentation for the gc module # for more information", "circular references # between objects (e.g., a doubly-linked list or a tree data", "x.__del__() — the former decrements the reference count for x by one, #", "recommended!) for the __del__() method to postpone destruction # of the instance by", "on the # stack frame that raised an unhandled exception in interactive mode", "function that caught an exception (the traceback # stored in sys.exc_traceback keeps the", "resolved by storing None in # sys.exc_traceback or sys.last_traceback. Circular references which are", "class part of the instance. # Note that it is possible (though not", "situations can be resolved by storing None in # sys.exc_traceback or sys.last_traceback. Circular", "reference count of an object from going to zero include: circular references #", "default), but can only be cleaned up if there # are no Python-level", "# stored in sys.exc_traceback keeps the stack frame alive); or a reference to", "__del__() method to postpone destruction # of the instance by creating a new", "time when # this new reference is deleted. It is not guaranteed that", "the latter is only called when x’s reference count reaches zero. Some common", "and child pointers); # a reference to the object on the stack frame", "sys.exc_traceback keeps the stack frame alive); or a reference to the object on", "no Python-level __del__() methods involved. Refer to the documentation for the gc module", "reference count for x by one, # and the latter is only called", "# if any, must explicitly call it to ensure proper deletion of the", "The first situation can only be remedied by # explicitly breaking the cycles;", "situation can only be remedied by # explicitly breaking the cycles; the latter", "new reference is deleted. It is not guaranteed that __del__() methods are called", "exception in interactive mode (the traceback stored in # sys.last_traceback keeps the stack", "handled by the cycle detector, # particularly the description of the garbage value.", "for the gc module # for more information about how __del__() methods are", "2 # 2 # Reference: # object.__del__(self) # Called when the instance is", "part of the instance. # Note that it is possible (though not recommended!)", "# Note del x doesn’t directly call x.__del__() — the former decrements the", "the instance by creating a new reference to it. It may then be", "that it is possible (though not recommended!) for the __del__() method to postpone", "explicitly call it to ensure proper deletion of the base class part of", "frame alive). The first situation can only be remedied by # explicitly breaking", "Reference: # object.__del__(self) # Called when the instance is about to be destroyed.", "is deleted. It is not guaranteed that __del__() methods are called for objects", "is about to be destroyed. This is also called a destructor. # If", "exception (the traceback # stored in sys.exc_traceback keeps the stack frame alive); or", "__del__() method, # if any, must explicitly call it to ensure proper deletion", "mode (the traceback stored in # sys.last_traceback keeps the stack frame alive). The", "__del__() method, the derived class’s __del__() method, # if any, must explicitly call", "detected when # the option cycle detector is enabled (it’s on by default),", "= C() c2 = C() print(c1.dangerous) c1.dangerous = 3 print(c1.dangerous) print(c2.dangerous) del c1.dangerous", "the reference count of an object from going to zero include: circular references", "instance. # Note that it is possible (though not recommended!) for the __del__()", "guaranteed that __del__() methods are called for objects # that still exist when", "there # are no Python-level __del__() methods involved. Refer to the documentation for", "that raised an unhandled exception in interactive mode (the traceback stored in #", "print(c1.dangerous) print(c2.dangerous) del c1.dangerous print(c1.dangerous) print(c2.dangerous) # Solution: # 2 # 3 #", "print(c2.dangerous) # Solution: # 2 # 3 # 2 # 2 # 2", "C: dangerous = 2 c1 = C() c2 = C() print(c1.dangerous) c1.dangerous =", "this new reference is deleted. It is not guaranteed that __del__() methods are", "(the traceback stored in # sys.last_traceback keeps the stack frame alive). The first", "print(c1.dangerous) print(c2.dangerous) # Solution: # 2 # 3 # 2 # 2 #", "of the instance. # Note that it is possible (though not recommended!) for", "methods are called for objects # that still exist when the interpreter exits.", "to the documentation for the gc module # for more information about how", "for the __del__() method to postpone destruction # of the instance by creating", "if any, must explicitly call it to ensure proper deletion of the base", "list or a tree data structure with parent and child pointers); # a", "when the interpreter exits. # Note del x doesn’t directly call x.__del__() —", "a tree data structure with parent and child pointers); # a reference to", "still exist when the interpreter exits. # Note del x doesn’t directly call", "it to ensure proper deletion of the base class part of the instance.", "instance is about to be destroyed. This is also called a destructor. #", "stack frame alive). The first situation can only be remedied by # explicitly", "the stack frame alive); or a reference to the object on the #", "Solution: # 2 # 3 # 2 # 2 # 2 # Reference:", "garbage are detected when # the option cycle detector is enabled (it’s on", "by one, # and the latter is only called when x’s reference count", "method to postpone destruction # of the instance by creating a new reference", "the option cycle detector is enabled (it’s on by default), but can only", "when x’s reference count reaches zero. Some common situations # that may prevent", "caught an exception (the traceback # stored in sys.exc_traceback keeps the stack frame", "C() print(c1.dangerous) c1.dangerous = 3 print(c1.dangerous) print(c2.dangerous) del c1.dangerous print(c1.dangerous) print(c2.dangerous) # Solution:", "in # sys.exc_traceback or sys.last_traceback. Circular references which are garbage are detected when", "reaches zero. Some common situations # that may prevent the reference count of", "alive); or a reference to the object on the # stack frame that", "# for more information about how __del__() methods are handled by the cycle", "on the stack frame of a function that caught an exception (the traceback", "is not guaranteed that __del__() methods are called for objects # that still", "unhandled exception in interactive mode (the traceback stored in # sys.last_traceback keeps the", "be cleaned up if there # are no Python-level __del__() methods involved. Refer", "— the former decrements the reference count for x by one, # and", "to the object on the # stack frame that raised an unhandled exception", "methods involved. Refer to the documentation for the gc module # for more", "methods are handled by the cycle detector, # particularly the description of the", "# and the latter is only called when x’s reference count reaches zero.", "(e.g., a doubly-linked list or a tree data structure with parent and child", "for more information about how __del__() methods are handled by the cycle detector,", "called a destructor. # If a base class has a __del__() method, the", "It is not guaranteed that __del__() methods are called for objects # that", "raised an unhandled exception in interactive mode (the traceback stored in # sys.last_traceback", "# Called when the instance is about to be destroyed. This is also", "not recommended!) for the __del__() method to postpone destruction # of the instance", "x by one, # and the latter is only called when x’s reference", "be called at a later time when # this new reference is deleted.", "references which are garbage are detected when # the option cycle detector is", "explicitly breaking the cycles; the latter two situations can be resolved by storing", "Note that it is possible (though not recommended!) for the __del__() method to", "which are garbage are detected when # the option cycle detector is enabled", "the instance. # Note that it is possible (though not recommended!) for the", "exits. # Note del x doesn’t directly call x.__del__() — the former decrements", "# Solution: # 2 # 3 # 2 # 2 # 2 #", "Called when the instance is about to be destroyed. This is also called", "count of an object from going to zero include: circular references # between", "an object from going to zero include: circular references # between objects (e.g.,", "in sys.exc_traceback keeps the stack frame alive); or a reference to the object", "interpreter exits. # Note del x doesn’t directly call x.__del__() — the former", "Python-level __del__() methods involved. Refer to the documentation for the gc module #", "None in # sys.exc_traceback or sys.last_traceback. Circular references which are garbage are detected", "but can only be cleaned up if there # are no Python-level __del__()", "can only be remedied by # explicitly breaking the cycles; the latter two", "or sys.last_traceback. Circular references which are garbage are detected when # the option", "ensure proper deletion of the base class part of the instance. # Note", "destroyed. This is also called a destructor. # If a base class has", "# 2 # 2 # 2 # Reference: # object.__del__(self) # Called when", "must explicitly call it to ensure proper deletion of the base class part", "3 # 2 # 2 # 2 # Reference: # object.__del__(self) # Called", "in interactive mode (the traceback stored in # sys.last_traceback keeps the stack frame", "doubly-linked list or a tree data structure with parent and child pointers); #", "information about how __del__() methods are handled by the cycle detector, # particularly", "situations # that may prevent the reference count of an object from going", "# 2 # 3 # 2 # 2 # 2 # Reference: #", "of a function that caught an exception (the traceback # stored in sys.exc_traceback", "the reference count for x by one, # and the latter is only", "is only called when x’s reference count reaches zero. Some common situations #", "# If a base class has a __del__() method, the derived class’s __del__()", "# Reference: # object.__del__(self) # Called when the instance is about to be", "zero include: circular references # between objects (e.g., a doubly-linked list or a", "later time when # this new reference is deleted. It is not guaranteed", "the latter two situations can be resolved by storing None in # sys.exc_traceback", "are detected when # the option cycle detector is enabled (it’s on by", "a reference to the object on the stack frame of a function that", "the former decrements the reference count for x by one, # and the", "called for objects # that still exist when the interpreter exits. # Note", "if there # are no Python-level __del__() methods involved. Refer to the documentation", "it. It may then be called at a later time when # this", "latter is only called when x’s reference count reaches zero. Some common situations", "that caught an exception (the traceback # stored in sys.exc_traceback keeps the stack", "data structure with parent and child pointers); # a reference to the object", "remedied by # explicitly breaking the cycles; the latter two situations can be", "x’s reference count reaches zero. Some common situations # that may prevent the", "child pointers); # a reference to the object on the stack frame of", "to ensure proper deletion of the base class part of the instance. #", "call x.__del__() — the former decrements the reference count for x by one,", "# 2 # 2 # Reference: # object.__del__(self) # Called when the instance", "new reference to it. It may then be called at a later time", "Note del x doesn’t directly call x.__del__() — the former decrements the reference", "about how __del__() methods are handled by the cycle detector, # particularly the", "traceback stored in # sys.last_traceback keeps the stack frame alive). The first situation", "C() c2 = C() print(c1.dangerous) c1.dangerous = 3 print(c1.dangerous) print(c2.dangerous) del c1.dangerous print(c1.dangerous)", "the cycles; the latter two situations can be resolved by storing None in", "class C: dangerous = 2 c1 = C() c2 = C() print(c1.dangerous) c1.dangerous", "any, must explicitly call it to ensure proper deletion of the base class", "two situations can be resolved by storing None in # sys.exc_traceback or sys.last_traceback.", "that __del__() methods are called for objects # that still exist when the", "deletion of the base class part of the instance. # Note that it", "is also called a destructor. # If a base class has a __del__()", "pointers); # a reference to the object on the stack frame of a", "postpone destruction # of the instance by creating a new reference to it.", "# that still exist when the interpreter exits. # Note del x doesn’t", "It may then be called at a later time when # this new", "possible (though not recommended!) for the __del__() method to postpone destruction # of", "# object.__del__(self) # Called when the instance is about to be destroyed. This", "and the latter is only called when x’s reference count reaches zero. Some", "up if there # are no Python-level __del__() methods involved. Refer to the", "of the instance by creating a new reference to it. It may then", "stored in # sys.last_traceback keeps the stack frame alive). The first situation can", "structure with parent and child pointers); # a reference to the object on", "sys.last_traceback. Circular references which are garbage are detected when # the option cycle", "object from going to zero include: circular references # between objects (e.g., a", "that still exist when the interpreter exits. # Note del x doesn’t directly", "called at a later time when # this new reference is deleted. It", "for objects # that still exist when the interpreter exits. # Note del", "be resolved by storing None in # sys.exc_traceback or sys.last_traceback. Circular references which", "# a reference to the object on the stack frame of a function", "how __del__() methods are handled by the cycle detector, # particularly the description", "reference to the object on the # stack frame that raised an unhandled", "include: circular references # between objects (e.g., a doubly-linked list or a tree", "only be remedied by # explicitly breaking the cycles; the latter two situations", "at a later time when # this new reference is deleted. It is", "class’s __del__() method, # if any, must explicitly call it to ensure proper", "object on the # stack frame that raised an unhandled exception in interactive", "or a tree data structure with parent and child pointers); # a reference", "to the object on the stack frame of a function that caught an", "the interpreter exits. # Note del x doesn’t directly call x.__del__() — the", "# are no Python-level __del__() methods involved. Refer to the documentation for the", "by # explicitly breaking the cycles; the latter two situations can be resolved", "stack frame that raised an unhandled exception in interactive mode (the traceback stored", "can be resolved by storing None in # sys.exc_traceback or sys.last_traceback. Circular references", "the stack frame alive). The first situation can only be remedied by #", "parent and child pointers); # a reference to the object on the stack", "detector is enabled (it’s on by default), but can only be cleaned up", "option cycle detector is enabled (it’s on by default), but can only be", "base class part of the instance. # Note that it is possible (though", "Some common situations # that may prevent the reference count of an object", "called when x’s reference count reaches zero. Some common situations # that may", "by storing None in # sys.exc_traceback or sys.last_traceback. Circular references which are garbage", "proper deletion of the base class part of the instance. # Note that", "a later time when # this new reference is deleted. It is not", "references # between objects (e.g., a doubly-linked list or a tree data structure", "the object on the stack frame of a function that caught an exception", "tree data structure with parent and child pointers); # a reference to the", "the stack frame of a function that caught an exception (the traceback #", "only called when x’s reference count reaches zero. Some common situations # that", "breaking the cycles; the latter two situations can be resolved by storing None", "reference to the object on the stack frame of a function that caught", "be remedied by # explicitly breaking the cycles; the latter two situations can", "2 # 3 # 2 # 2 # 2 # Reference: # object.__del__(self)", "creating a new reference to it. It may then be called at a", "to it. It may then be called at a later time when #", "by creating a new reference to it. It may then be called at", "x doesn’t directly call x.__del__() — the former decrements the reference count for", "= 2 c1 = C() c2 = C() print(c1.dangerous) c1.dangerous = 3 print(c1.dangerous)", "method, the derived class’s __del__() method, # if any, must explicitly call it", "object.__del__(self) # Called when the instance is about to be destroyed. This is", "del c1.dangerous print(c1.dangerous) print(c2.dangerous) # Solution: # 2 # 3 # 2 #", "This is also called a destructor. # If a base class has a", "cycle detector is enabled (it’s on by default), but can only be cleaned", "in # sys.last_traceback keeps the stack frame alive). The first situation can only", "for x by one, # and the latter is only called when x’s", "frame that raised an unhandled exception in interactive mode (the traceback stored in", "a reference to the object on the # stack frame that raised an", "latter two situations can be resolved by storing None in # sys.exc_traceback or", "derived class’s __del__() method, # if any, must explicitly call it to ensure", "is possible (though not recommended!) for the __del__() method to postpone destruction #", "dangerous = 2 c1 = C() c2 = C() print(c1.dangerous) c1.dangerous = 3", "going to zero include: circular references # between objects (e.g., a doubly-linked list", "the instance is about to be destroyed. This is also called a destructor.", "cleaned up if there # are no Python-level __del__() methods involved. Refer to", "count for x by one, # and the latter is only called when", "interactive mode (the traceback stored in # sys.last_traceback keeps the stack frame alive).", "exist when the interpreter exits. # Note del x doesn’t directly call x.__del__()", "not guaranteed that __del__() methods are called for objects # that still exist", "or a reference to the object on the # stack frame that raised", "to zero include: circular references # between objects (e.g., a doubly-linked list or", "decrements the reference count for x by one, # and the latter is", "frame alive); or a reference to the object on the # stack frame", "reference is deleted. It is not guaranteed that __del__() methods are called for", "are called for objects # that still exist when the interpreter exits. #", "that may prevent the reference count of an object from going to zero", "the # stack frame that raised an unhandled exception in interactive mode (the", "are garbage are detected when # the option cycle detector is enabled (it’s", "2 # Reference: # object.__del__(self) # Called when the instance is about to", "__del__() methods involved. Refer to the documentation for the gc module # for", "keeps the stack frame alive); or a reference to the object on the", "3 print(c1.dangerous) print(c2.dangerous) del c1.dangerous print(c1.dangerous) print(c2.dangerous) # Solution: # 2 # 3", "= C() print(c1.dangerous) c1.dangerous = 3 print(c1.dangerous) print(c2.dangerous) del c1.dangerous print(c1.dangerous) print(c2.dangerous) #", "with parent and child pointers); # a reference to the object on the", "cycles; the latter two situations can be resolved by storing None in #", "also called a destructor. # If a base class has a __del__() method,", "2 # 2 # 2 # Reference: # object.__del__(self) # Called when the", "to be destroyed. This is also called a destructor. # If a base", "first situation can only be remedied by # explicitly breaking the cycles; the", "the documentation for the gc module # for more information about how __del__()", "del x doesn’t directly call x.__del__() — the former decrements the reference count", "class has a __del__() method, the derived class’s __del__() method, # if any,", "the __del__() method to postpone destruction # of the instance by creating a", "instance by creating a new reference to it. It may then be called", "objects # that still exist when the interpreter exits. # Note del x", "an exception (the traceback # stored in sys.exc_traceback keeps the stack frame alive);", "are handled by the cycle detector, # particularly the description of the garbage", "(it’s on by default), but can only be cleaned up if there #", "one, # and the latter is only called when x’s reference count reaches", "If a base class has a __del__() method, the derived class’s __del__() method,", "it is possible (though not recommended!) for the __del__() method to postpone destruction", "may prevent the reference count of an object from going to zero include:", "when # the option cycle detector is enabled (it’s on by default), but", "c1.dangerous print(c1.dangerous) print(c2.dangerous) # Solution: # 2 # 3 # 2 # 2", "# of the instance by creating a new reference to it. It may", "count reaches zero. Some common situations # that may prevent the reference count", "objects (e.g., a doubly-linked list or a tree data structure with parent and", "gc module # for more information about how __del__() methods are handled by", "# this new reference is deleted. It is not guaranteed that __del__() methods", "may then be called at a later time when # this new reference", "from going to zero include: circular references # between objects (e.g., a doubly-linked", "# between objects (e.g., a doubly-linked list or a tree data structure with", "on by default), but can only be cleaned up if there # are", "# 2 # Reference: # object.__del__(self) # Called when the instance is about", "= 3 print(c1.dangerous) print(c2.dangerous) del c1.dangerous print(c1.dangerous) print(c2.dangerous) # Solution: # 2 #", "prevent the reference count of an object from going to zero include: circular", "# 3 # 2 # 2 # 2 # Reference: # object.__del__(self) #", "more information about how __del__() methods are handled by the cycle detector, #", "of the base class part of the instance. # Note that it is", "only be cleaned up if there # are no Python-level __del__() methods involved.", "base class has a __del__() method, the derived class’s __del__() method, # if", "Circular references which are garbage are detected when # the option cycle detector", "of an object from going to zero include: circular references # between objects", "a __del__() method, the derived class’s __del__() method, # if any, must explicitly", "a destructor. # If a base class has a __del__() method, the derived", "# the option cycle detector is enabled (it’s on by default), but can", "alive). The first situation can only be remedied by # explicitly breaking the", "module # for more information about how __del__() methods are handled by the", "can only be cleaned up if there # are no Python-level __del__() methods", "former decrements the reference count for x by one, # and the latter", "__del__() methods are called for objects # that still exist when the interpreter", "about to be destroyed. This is also called a destructor. # If a", "print(c1.dangerous) c1.dangerous = 3 print(c1.dangerous) print(c2.dangerous) del c1.dangerous print(c1.dangerous) print(c2.dangerous) # Solution: #", "traceback # stored in sys.exc_traceback keeps the stack frame alive); or a reference", "# Note that it is possible (though not recommended!) for the __del__() method", "common situations # that may prevent the reference count of an object from", "to postpone destruction # of the instance by creating a new reference to", "c1 = C() c2 = C() print(c1.dangerous) c1.dangerous = 3 print(c1.dangerous) print(c2.dangerous) del", "doesn’t directly call x.__del__() — the former decrements the reference count for x", "stored in sys.exc_traceback keeps the stack frame alive); or a reference to the", "an unhandled exception in interactive mode (the traceback stored in # sys.last_traceback keeps", "has a __del__() method, the derived class’s __del__() method, # if any, must", "object on the stack frame of a function that caught an exception (the", "destruction # of the instance by creating a new reference to it. It", "call it to ensure proper deletion of the base class part of the", "sys.exc_traceback or sys.last_traceback. Circular references which are garbage are detected when # the", "by default), but can only be cleaned up if there # are no", "(though not recommended!) for the __del__() method to postpone destruction # of the", "a doubly-linked list or a tree data structure with parent and child pointers);", "a new reference to it. It may then be called at a later", "a function that caught an exception (the traceback # stored in sys.exc_traceback keeps", "reference count reaches zero. Some common situations # that may prevent the reference", "storing None in # sys.exc_traceback or sys.last_traceback. Circular references which are garbage are", "# explicitly breaking the cycles; the latter two situations can be resolved by", "# stack frame that raised an unhandled exception in interactive mode (the traceback", "Refer to the documentation for the gc module # for more information about", "# that may prevent the reference count of an object from going to", "__del__() methods are handled by the cycle detector, # particularly the description of", "2 c1 = C() c2 = C() print(c1.dangerous) c1.dangerous = 3 print(c1.dangerous) print(c2.dangerous)", "when the instance is about to be destroyed. This is also called a", "c1.dangerous = 3 print(c1.dangerous) print(c2.dangerous) del c1.dangerous print(c1.dangerous) print(c2.dangerous) # Solution: # 2", "directly call x.__del__() — the former decrements the reference count for x by", "c2 = C() print(c1.dangerous) c1.dangerous = 3 print(c1.dangerous) print(c2.dangerous) del c1.dangerous print(c1.dangerous) print(c2.dangerous)", "the gc module # for more information about how __del__() methods are handled", "stack frame of a function that caught an exception (the traceback # stored", "the base class part of the instance. # Note that it is possible", "deleted. It is not guaranteed that __del__() methods are called for objects #", "print(c2.dangerous) del c1.dangerous print(c1.dangerous) print(c2.dangerous) # Solution: # 2 # 3 # 2", "documentation for the gc module # for more information about how __del__() methods", "keeps the stack frame alive). The first situation can only be remedied by", "zero. Some common situations # that may prevent the reference count of an", "between objects (e.g., a doubly-linked list or a tree data structure with parent", "sys.last_traceback keeps the stack frame alive). The first situation can only be remedied", "# sys.last_traceback keeps the stack frame alive). The first situation can only be", "when # this new reference is deleted. It is not guaranteed that __del__()", "method, # if any, must explicitly call it to ensure proper deletion of", "are no Python-level __del__() methods involved. Refer to the documentation for the gc", "be destroyed. This is also called a destructor. # If a base class", "the object on the # stack frame that raised an unhandled exception in", "(the traceback # stored in sys.exc_traceback keeps the stack frame alive); or a", "destructor. # If a base class has a __del__() method, the derived class’s", "then be called at a later time when # this new reference is", "# sys.exc_traceback or sys.last_traceback. Circular references which are garbage are detected when #", "reference to it. It may then be called at a later time when" ]
[ "dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True else: return False def isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\" Returns", "attrs={'class': 'entry-title'}) # Clean html tags to extract date and corresponding link to", "be consistently formatted on the website. # Eg. dd-mm-yyy and dd/mm/yyyy are both", "dateLinkDict = __getDateLinkDict(verbose) if stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True else: return False", "corresponding link to pdfs bulletins dateLinkDict = dict() for tag in tags: #", "date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today) if isNew: print('NEW BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded to '", "__getPDFlink(bulletinPageLink): \"\"\" Return links to pdf bulletin uploads in page. This link can", "bulletin is not available. \"\"\" stdDate = cleanDate(date) dateLinkDict = __getDateLinkDict(verbose) if stdDate", "in directory dateLinkDictOld = readJSON(jsonDefaultFile) # If date does not exist in local", "tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink) if verbose: print(date) return dateLinkDict def downloadPDF(PDFlink): \"\"\" Downloads", "dict data type containing all dates and their corresponding links to bulletin pages.", "If date does not exist in local JSON file if not stdDate in", "= 'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\" Return links", "Clean html tags to extract date and corresponding link to pdfs bulletins dateLinkDict", "\"\"\" # Parse bulletin page to get pdf link req = urllib3.PoolManager() bulletinPage", "= req.request('GET', bulletinPageLink) soup = BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag = soup.find('div', attrs={'class': 'entry-content'})", "in a standard format \"\"\" # Sanity checks if not isinstance(date,str): raise TypeError", "if isNew: print('NEW BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded to ' + bulletinDefaultFile) else: print('NO", "req.request('GET', DHSLink) soup = BeautifulSoup(DHSPage.data, 'html.parser') tags = soup.findAll('h3', attrs={'class': 'entry-title'}) # Clean", "are available. If running for first time, the JSON file is created and", "If local JSON file does not exist if updateJSONfile: writeJSON(dateLinkDictNew) return True if", "dateLinkDict def downloadPDF(PDFlink): \"\"\" Downloads pdf bulletin from the provided link \"\"\" try:", "link to pdfs bulletins dateLinkDict = dict() for tag in tags: # The", "DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager() DHSPage = req.request('GET', DHSLink) soup", "Downloads latest bulletin for the given date and returns True. Returns False if", "and downloads the latest one. \"\"\" from datetime import date today = date.today().strftime('%d.%m.%Y')", "# Get link to pdf bulletin for tag in pTags: if 'English' in", "not available. \"\"\" stdDate = cleanDate(date) dateLinkDict = __getDateLinkDict(verbose) if stdDate in dateLinkDict:", "bulletin is available on provided date. Returns False if no new bulletins are", "False finally: bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes dateLinkDict as a json file.", "bulletins are available. If running for first time, the JSON file is created", "Reads all dateLinkDict from a json file. This JSON file can be used", "check for updates. \"\"\" jsonFile = open(filename, 'r') dateLinkDict = json.load(jsonFile) jsonFile.close() return", "# If date does not exist in local JSON file if not stdDate", "AttributeError: print('Error: Broken Connection. Rerun') raise ConnectionError # Get link to pdf bulletin", "for updated bulletins. \"\"\" # Parse bulletin page to get pdf link req", "'.' + date[6:10] def __getDateLinkDict(verbose=True): \"\"\" Returns a dict data type containing all", "(dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return False else: # If both bulletins are different if", "formatted on the website. # Eg. dd-mm-yyy and dd/mm/yyyy are both found date", "updates. \"\"\" jsonFile = open(filename, 'r') dateLinkDict = json.load(jsonFile) jsonFile.close() return dateLinkDict def", "divTag = soup.find('div', attrs={'class': 'entry-content'}) pTags = divTag.findAll('p') except AttributeError: print('Error: Broken Connection.", "= soup.findAll('h3', attrs={'class': 'entry-title'}) # Clean html tags to extract date and corresponding", "of dates:') DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager() DHSPage = req.request('GET',", "page. This link can be checked for updated bulletins. \"\"\" # Parse bulletin", "# Clean html tags to extract date and corresponding link to pdfs bulletins", "the website. # Eg. dd-mm-yyy and dd/mm/yyyy are both found date = cleanDate(tag.a.text)", "print('NEW BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded to ' + bulletinDefaultFile) else: print('NO NEW BULLETINS", "return date[0:2] + '.' + date[3:5] + '.' + date[6:10] def __getDateLinkDict(verbose=True): \"\"\"", "file. This JSON file can be used to check for updates. \"\"\" jsonFile", "link \"\"\" try: req = urllib3.PoolManager() response = req.request('GET', PDFlink) bulletinFile = open(bulletinDefaultFile,", "page to get pdf link req = urllib3.PoolManager() bulletinPage = req.request('GET', bulletinPageLink) soup", "consistently formatted on the website. # Eg. dd-mm-yyy and dd/mm/yyyy are both found", "10: raise ValueError return date[0:2] + '.' + date[3:5] + '.' + date[6:10]", "date[0:2] + '.' + date[3:5] + '.' + date[6:10] def __getDateLinkDict(verbose=True): \"\"\" Returns", "JSON file is created and returns True. \"\"\" stdDate = cleanDate(date) dateLinkDictNew =", "dates:') DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager() DHSPage = req.request('GET', DHSLink)", "PDF file not found') return False finally: bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes", "= __getDateLinkDict(verbose) if stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True else: return False def", "This can be used to write to the JSON file in a standard", "does not exist on server if not stdDate in dateLinkDictNew: return False try:", "BeautifulSoup(DHSPage.data, 'html.parser') tags = soup.findAll('h3', attrs={'class': 'entry-title'}) # Clean html tags to extract", "does not exist in local JSON file if not stdDate in dateLinkDictOld: if", "file not found') return False finally: bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes dateLinkDict", "Returns a dict data type containing all dates and their corresponding links to", "checks if not isinstance(date,str): raise TypeError if not len(date) == 10: raise ValueError", "PDFlink) bulletinFile = open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except HTTPError: print('Error: PDF file not found')", "not len(date) == 10: raise ValueError return date[0:2] + '.' + date[3:5] +", "json import sys linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf' def", "not exist in local JSON file if not stdDate in dateLinkDictOld: if updateJSONfile:", "in tags: # The returned dates may not be consistently formatted on the", "jsonFile = open(filename, 'w') json.dump(dateLinkDict, jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\" Reads all dateLinkDict", "\"\"\" If the module is invoked as a python program, it checks for", "cleanDate(date) dateLinkDict = __getDateLinkDict(verbose) if stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True else: return", "soup = BeautifulSoup(DHSPage.data, 'html.parser') tags = soup.findAll('h3', attrs={'class': 'entry-title'}) # Clean html tags", "bulletin pages. \"\"\" # Ensure python version 3+ if sys.version_info.major < 3: print('ERROR:", "soup.find('div', attrs={'class': 'entry-content'}) pTags = divTag.findAll('p') except AttributeError: print('Error: Broken Connection. Rerun') raise", "'entry-title'}) # Clean html tags to extract date and corresponding link to pdfs", "dd/mm/yyyy are both found date = cleanDate(tag.a.text) bulletinPageLink = linkPre + tag.a.get('href') dateLinkDict[date]", "# If both bulletins are different if updateJSONfile: writeJSON(dateLinkDictNew) return True except FileNotFoundError:", "no new bulletins are available. If running for first time, the JSON file", "the format dd.mm.yyyy This can be used to write to the JSON file", "not stdDate in dateLinkDictNew: return False try: # If local JSON file exists", "'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\" Return links to", "as a json file. This JSON file can be used to check for", "html tags to extract date and corresponding link to pdfs bulletins dateLinkDict =", "version 3+ if sys.version_info.major < 3: print('ERROR: Use python version 3+') raise SyntaxError", "and returns True. \"\"\" stdDate = cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose) # If date", "updateJSONfile: writeJSON(dateLinkDictNew) return True if __name__ == \"__main__\": \"\"\" If the module is", "pdf bulletin for tag in pTags: if 'English' in tag.text: return linkPre +", "from a json file. This JSON file can be used to check for", "return False def isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\" Returns bulletin link if an updated", "tags if verbose: print('Parsing Kerala DHS webpage ...') print('Obtaining links of dates:') DHSLink", "3+ if sys.version_info.major < 3: print('ERROR: Use python version 3+') raise SyntaxError #", "cleanDate(date): \"\"\" Returns the date in the format dd.mm.yyyy This can be used", "jsonFile.close() return dateLinkDict def getBulletin(date, verbose=True): \"\"\" Downloads latest bulletin for the given", "sys linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\"", "their corresponding links to bulletin pages. \"\"\" # Ensure python version 3+ if", "website. # Eg. dd-mm-yyy and dd/mm/yyyy are both found date = cleanDate(tag.a.text) bulletinPageLink", "jsonFile = open(filename, 'r') dateLinkDict = json.load(jsonFile) jsonFile.close() return dateLinkDict def getBulletin(date, verbose=True):", "available on provided date. Returns False if no new bulletins are available. If", "the JSON file in a standard format \"\"\" # Sanity checks if not", "= dict() for tag in tags: # The returned dates may not be", "< 3: print('ERROR: Use python version 3+') raise SyntaxError # Parse DHS Kerala", "else: # If both bulletins are different if updateJSONfile: writeJSON(dateLinkDictNew) return True except", "updated bulletins. \"\"\" # Parse bulletin page to get pdf link req =", "local JSON file exists in directory dateLinkDictOld = readJSON(jsonDefaultFile) # If date does", "dict() for tag in tags: # The returned dates may not be consistently", "to bulletin pages. \"\"\" # Ensure python version 3+ if sys.version_info.major < 3:", "json.dump(dateLinkDict, jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\" Reads all dateLinkDict from a json file.", "python program, it checks for new bulletins and downloads the latest one. \"\"\"", "= 'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\" Return links to pdf bulletin uploads in page.", "__getPDFlink(bulletinPageLink) if verbose: print(date) return dateLinkDict def downloadPDF(PDFlink): \"\"\" Downloads pdf bulletin from", "if not stdDate in dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew) return True # If both", "for new bulletins and downloads the latest one. \"\"\" from datetime import date", "\"\"\" # Ensure python version 3+ if sys.version_info.major < 3: print('ERROR: Use python", "\"\"\" Reads all dateLinkDict from a json file. This JSON file can be", "link req = urllib3.PoolManager() bulletinPage = req.request('GET', bulletinPageLink) soup = BeautifulSoup(bulletinPage.data, 'html.parser') try:", "cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose) # If date does not exist on server if", "+ '.' + date[6:10] def __getDateLinkDict(verbose=True): \"\"\" Returns a dict data type containing", "Downloads pdf bulletin from the provided link \"\"\" try: req = urllib3.PoolManager() response", "pdf bulletin from the provided link \"\"\" try: req = urllib3.PoolManager() response =", "not be consistently formatted on the website. # Eg. dd-mm-yyy and dd/mm/yyyy are", "= open(filename, 'r') dateLinkDict = json.load(jsonFile) jsonFile.close() return dateLinkDict def getBulletin(date, verbose=True): \"\"\"", "Parse DHS Kerala webpage to get html tags if verbose: print('Parsing Kerala DHS", "downloads the latest one. \"\"\" from datetime import date today = date.today().strftime('%d.%m.%Y') isNew", "in dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew) return True # If both bulletins are same", "\"\"\" jsonFile = open(filename, 'w') json.dump(dateLinkDict, jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\" Reads all", "== \"__main__\": \"\"\" If the module is invoked as a python program, it", "server if not stdDate in dateLinkDictNew: return False try: # If local JSON", "BeautifulSoup import json import sys linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile =", "if verbose: print(date) return dateLinkDict def downloadPDF(PDFlink): \"\"\" Downloads pdf bulletin from the", "if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return False else: # If both bulletins are different", "return False try: # If local JSON file exists in directory dateLinkDictOld =", "tag.a.get('href') else: return None def cleanDate(date): \"\"\" Returns the date in the format", "for updates. \"\"\" jsonFile = open(filename, 'w') json.dump(dateLinkDict, jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\"", "\"\"\" # Sanity checks if not isinstance(date,str): raise TypeError if not len(date) ==", "attrs={'class': 'entry-content'}) pTags = divTag.findAll('p') except AttributeError: print('Error: Broken Connection. Rerun') raise ConnectionError", "except HTTPError: print('Error: PDF file not found') return False finally: bulletinFile.close() def writeJSON(dateLinkDict,", "date. Returns False if no new bulletins are available. If running for first", "Connection. Rerun') raise ConnectionError # Get link to pdf bulletin for tag in", "\"\"\" Writes dateLinkDict as a json file. This JSON file can be used", "None def cleanDate(date): \"\"\" Returns the date in the format dd.mm.yyyy This can", "except FileNotFoundError: # If local JSON file does not exist if updateJSONfile: writeJSON(dateLinkDictNew)", "try: req = urllib3.PoolManager() response = req.request('GET', PDFlink) bulletinFile = open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data)", "+ date[6:10] def __getDateLinkDict(verbose=True): \"\"\" Returns a dict data type containing all dates", "if bulletin is not available. \"\"\" stdDate = cleanDate(date) dateLinkDict = __getDateLinkDict(verbose) if", "on server if not stdDate in dateLinkDictNew: return False try: # If local", "checked for updated bulletins. \"\"\" # Parse bulletin page to get pdf link", "and dd/mm/yyyy are both found date = cleanDate(tag.a.text) bulletinPageLink = linkPre + tag.a.get('href')", "verbose=True): \"\"\" Downloads latest bulletin for the given date and returns True. Returns", "Ensure python version 3+ if sys.version_info.major < 3: print('ERROR: Use python version 3+')", "return dateLinkDict def getBulletin(date, verbose=True): \"\"\" Downloads latest bulletin for the given date", "DHSLink) soup = BeautifulSoup(DHSPage.data, 'html.parser') tags = soup.findAll('h3', attrs={'class': 'entry-title'}) # Clean html", "if sys.version_info.major < 3: print('ERROR: Use python version 3+') raise SyntaxError # Parse", "\"\"\" stdDate = cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose) # If date does not exist", "local JSON file does not exist if updateJSONfile: writeJSON(dateLinkDictNew) return True if __name__", "provided by DHS Kerala \"\"\" import urllib3 from bs4 import BeautifulSoup import json", "raise TypeError if not len(date) == 10: raise ValueError return date[0:2] + '.'", "# The returned dates may not be consistently formatted on the website. #", "return True if __name__ == \"__main__\": \"\"\" If the module is invoked as", "be used to write to the JSON file in a standard format \"\"\"", "Writes dateLinkDict as a json file. This JSON file can be used to", "If both bulletins are same if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return False else: #", "= open(filename, 'w') json.dump(dateLinkDict, jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\" Reads all dateLinkDict from", "updates. \"\"\" jsonFile = open(filename, 'w') json.dump(dateLinkDict, jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\" Reads", "dateLinkDict = dict() for tag in tags: # The returned dates may not", "directory dateLinkDictOld = readJSON(jsonDefaultFile) # If date does not exist in local JSON", "the latest one. \"\"\" from datetime import date today = date.today().strftime('%d.%m.%Y') isNew =", "format dd.mm.yyyy This can be used to write to the JSON file in", "print('Parsing Kerala DHS webpage ...') print('Obtaining links of dates:') DHSLink = linkPre +", "def isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\" Returns bulletin link if an updated bulletin is", "if stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True else: return False def isNewBulletin(date, updateJSONfile=True,", "all dateLinkDict from a json file. This JSON file can be used to", "finally: bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes dateLinkDict as a json file. This", "for first time, the JSON file is created and returns True. \"\"\" stdDate", "tag in pTags: if 'English' in tag.text: return linkPre + tag.a.get('href') else: return", "on the website. # Eg. dd-mm-yyy and dd/mm/yyyy are both found date =", "# Sanity checks if not isinstance(date,str): raise TypeError if not len(date) == 10:", "new bulletins and downloads the latest one. \"\"\" from datetime import date today", "bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes dateLinkDict as a json file. This JSON", "raise SyntaxError # Parse DHS Kerala webpage to get html tags if verbose:", "False def isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\" Returns bulletin link if an updated bulletin", "bulletin uploads in page. This link can be checked for updated bulletins. \"\"\"", "file if not stdDate in dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew) return True # If", "linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\" Return", "divTag.findAll('p') except AttributeError: print('Error: Broken Connection. Rerun') raise ConnectionError # Get link to", "to the JSON file in a standard format \"\"\" # Sanity checks if", "is available on provided date. Returns False if no new bulletins are available.", "time, the JSON file is created and returns True. \"\"\" stdDate = cleanDate(date)", "stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True else: return False def isNewBulletin(date, updateJSONfile=True, verbose=True):", "+ '.' + date[3:5] + '.' + date[6:10] def __getDateLinkDict(verbose=True): \"\"\" Returns a", "ConnectionError # Get link to pdf bulletin for tag in pTags: if 'English'", "to pdfs bulletins dateLinkDict = dict() for tag in tags: # The returned", "verbose: print('Parsing Kerala DHS webpage ...') print('Obtaining links of dates:') DHSLink = linkPre", "req = urllib3.PoolManager() DHSPage = req.request('GET', DHSLink) soup = BeautifulSoup(DHSPage.data, 'html.parser') tags =", "\"\"\" jsonFile = open(filename, 'r') dateLinkDict = json.load(jsonFile) jsonFile.close() return dateLinkDict def getBulletin(date,", "dd.mm.yyyy This can be used to write to the JSON file in a", "'wb') bulletinFile.write(response.data) except HTTPError: print('Error: PDF file not found') return False finally: bulletinFile.close()", "one. \"\"\" from datetime import date today = date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today) if", "Returns False if no new bulletins are available. If running for first time,", "is invoked as a python program, it checks for new bulletins and downloads", "= cleanDate(tag.a.text) bulletinPageLink = linkPre + tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink) if verbose: print(date)", "module is invoked as a python program, it checks for new bulletins and", "a python program, it checks for new bulletins and downloads the latest one.", "\"\"\" A module to parse the COVID bulletins provided by DHS Kerala \"\"\"", "ValueError return date[0:2] + '.' + date[3:5] + '.' + date[6:10] def __getDateLinkDict(verbose=True):", "= req.request('GET', PDFlink) bulletinFile = open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except HTTPError: print('Error: PDF file", "bulletin for tag in pTags: if 'English' in tag.text: return linkPre + tag.a.get('href')", "bulletins provided by DHS Kerala \"\"\" import urllib3 from bs4 import BeautifulSoup import", "by DHS Kerala \"\"\" import urllib3 from bs4 import BeautifulSoup import json import", "== dateLinkDictOld[stdDate]): return False else: # If both bulletins are different if updateJSONfile:", "program, it checks for new bulletins and downloads the latest one. \"\"\" from", "in pTags: if 'English' in tag.text: return linkPre + tag.a.get('href') else: return None", "module to parse the COVID bulletins provided by DHS Kerala \"\"\" import urllib3", "'.' + date[3:5] + '.' + date[6:10] def __getDateLinkDict(verbose=True): \"\"\" Returns a dict", "link if an updated bulletin is available on provided date. Returns False if", "bulletin from the provided link \"\"\" try: req = urllib3.PoolManager() response = req.request('GET',", "bulletinFile.write(response.data) except HTTPError: print('Error: PDF file not found') return False finally: bulletinFile.close() def", "to pdf bulletin for tag in pTags: if 'English' in tag.text: return linkPre", "bulletinPage = req.request('GET', bulletinPageLink) soup = BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag = soup.find('div', attrs={'class':", "writeJSON(dateLinkDictNew) return True except FileNotFoundError: # If local JSON file does not exist", "req.request('GET', PDFlink) bulletinFile = open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except HTTPError: print('Error: PDF file not", "bs4 import BeautifulSoup import json import sys linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json'", "bulletins dateLinkDict = dict() for tag in tags: # The returned dates may", "different if updateJSONfile: writeJSON(dateLinkDictNew) return True except FileNotFoundError: # If local JSON file", "urllib3 from bs4 import BeautifulSoup import json import sys linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile", "urllib3.PoolManager() response = req.request('GET', PDFlink) bulletinFile = open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except HTTPError: print('Error:", "= BeautifulSoup(DHSPage.data, 'html.parser') tags = soup.findAll('h3', attrs={'class': 'entry-title'}) # Clean html tags to", "This JSON file can be used to check for updates. \"\"\" jsonFile =", "may not be consistently formatted on the website. # Eg. dd-mm-yyy and dd/mm/yyyy", "soup = BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag = soup.find('div', attrs={'class': 'entry-content'}) pTags = divTag.findAll('p')", "def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes dateLinkDict as a json file. This JSON file", "to parse the COVID bulletins provided by DHS Kerala \"\"\" import urllib3 from", "verbose=True): \"\"\" Returns bulletin link if an updated bulletin is available on provided", "python version 3+ if sys.version_info.major < 3: print('ERROR: Use python version 3+') raise", "datetime import date today = date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today) if isNew: print('NEW BULLETIN", "for tag in pTags: if 'English' in tag.text: return linkPre + tag.a.get('href') else:", "Kerala \"\"\" import urllib3 from bs4 import BeautifulSoup import json import sys linkPre", "= urllib3.PoolManager() response = req.request('GET', PDFlink) bulletinFile = open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except HTTPError:", "get html tags if verbose: print('Parsing Kerala DHS webpage ...') print('Obtaining links of", "If both bulletins are different if updateJSONfile: writeJSON(dateLinkDictNew) return True except FileNotFoundError: #", "\"\"\" Downloads latest bulletin for the given date and returns True. Returns False", "uploads in page. This link can be checked for updated bulletins. \"\"\" #", "verbose: print(date) return dateLinkDict def downloadPDF(PDFlink): \"\"\" Downloads pdf bulletin from the provided", "response = req.request('GET', PDFlink) bulletinFile = open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except HTTPError: print('Error: PDF", "# If date does not exist on server if not stdDate in dateLinkDictNew:", "to get html tags if verbose: print('Parsing Kerala DHS webpage ...') print('Obtaining links", "to pdf bulletin uploads in page. This link can be checked for updated", "else: return None def cleanDate(date): \"\"\" Returns the date in the format dd.mm.yyyy", "If running for first time, the JSON file is created and returns True.", "urllib3.PoolManager() bulletinPage = req.request('GET', bulletinPageLink) soup = BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag = soup.find('div',", "= cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose) # If date does not exist on server", "# If local JSON file exists in directory dateLinkDictOld = readJSON(jsonDefaultFile) # If", "Eg. dd-mm-yyy and dd/mm/yyyy are both found date = cleanDate(tag.a.text) bulletinPageLink = linkPre", "raise ValueError return date[0:2] + '.' + date[3:5] + '.' + date[6:10] def", "= __getDateLinkDict(verbose) # If date does not exist on server if not stdDate", "format \"\"\" # Sanity checks if not isinstance(date,str): raise TypeError if not len(date)", "standard format \"\"\" # Sanity checks if not isinstance(date,str): raise TypeError if not", "invoked as a python program, it checks for new bulletins and downloads the", "not stdDate in dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew) return True # If both bulletins", "if 'English' in tag.text: return linkPre + tag.a.get('href') else: return None def cleanDate(date):", "all dates and their corresponding links to bulletin pages. \"\"\" # Ensure python", "date in the format dd.mm.yyyy This can be used to write to the", "isNew = isNewBulletin(today) if isNew: print('NEW BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded to ' +", "exist on server if not stdDate in dateLinkDictNew: return False try: # If", "bulletinPageLink = linkPre + tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink) if verbose: print(date) return dateLinkDict", "containing all dates and their corresponding links to bulletin pages. \"\"\" # Ensure", "if verbose: print('Parsing Kerala DHS webpage ...') print('Obtaining links of dates:') DHSLink =", "pdf link req = urllib3.PoolManager() bulletinPage = req.request('GET', bulletinPageLink) soup = BeautifulSoup(bulletinPage.data, 'html.parser')", "new bulletins are available. If running for first time, the JSON file is", "req = urllib3.PoolManager() bulletinPage = req.request('GET', bulletinPageLink) soup = BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag", "__getDateLinkDict(verbose) # If date does not exist on server if not stdDate in", "# If local JSON file does not exist if updateJSONfile: writeJSON(dateLinkDictNew) return True", "# If both bulletins are same if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return False else:", "except AttributeError: print('Error: Broken Connection. Rerun') raise ConnectionError # Get link to pdf", "= linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager() DHSPage = req.request('GET', DHSLink) soup =", "not exist if updateJSONfile: writeJSON(dateLinkDictNew) return True if __name__ == \"__main__\": \"\"\" If", "'English' in tag.text: return linkPre + tag.a.get('href') else: return None def cleanDate(date): \"\"\"", "True if __name__ == \"__main__\": \"\"\" If the module is invoked as a", "python version 3+') raise SyntaxError # Parse DHS Kerala webpage to get html", "Sanity checks if not isinstance(date,str): raise TypeError if not len(date) == 10: raise", "DHS Kerala \"\"\" import urllib3 from bs4 import BeautifulSoup import json import sys", "file in a standard format \"\"\" # Sanity checks if not isinstance(date,str): raise", "False if bulletin is not available. \"\"\" stdDate = cleanDate(date) dateLinkDict = __getDateLinkDict(verbose)", "html tags if verbose: print('Parsing Kerala DHS webpage ...') print('Obtaining links of dates:')", "Returns the date in the format dd.mm.yyyy This can be used to write", "are different if updateJSONfile: writeJSON(dateLinkDictNew) return True except FileNotFoundError: # If local JSON", "JSON file exists in directory dateLinkDictOld = readJSON(jsonDefaultFile) # If date does not", "# Eg. dd-mm-yyy and dd/mm/yyyy are both found date = cleanDate(tag.a.text) bulletinPageLink =", "local JSON file if not stdDate in dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew) return True", "data type containing all dates and their corresponding links to bulletin pages. \"\"\"", "if updateJSONfile: writeJSON(dateLinkDictNew) return True if __name__ == \"__main__\": \"\"\" If the module", "be used to check for updates. \"\"\" jsonFile = open(filename, 'r') dateLinkDict =", "available. \"\"\" stdDate = cleanDate(date) dateLinkDict = __getDateLinkDict(verbose) if stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate])", "dateLinkDictOld = readJSON(jsonDefaultFile) # If date does not exist in local JSON file", "JSON file does not exist if updateJSONfile: writeJSON(dateLinkDictNew) return True if __name__ ==", "req.request('GET', bulletinPageLink) soup = BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag = soup.find('div', attrs={'class': 'entry-content'}) pTags", "__getDateLinkDict(verbose) if stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True else: return False def isNewBulletin(date,", "return True except FileNotFoundError: # If local JSON file does not exist if", "req = urllib3.PoolManager() response = req.request('GET', PDFlink) bulletinFile = open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except", "date[6:10] def __getDateLinkDict(verbose=True): \"\"\" Returns a dict data type containing all dates and", "\"\"\" Returns bulletin link if an updated bulletin is available on provided date.", "print(date) return dateLinkDict def downloadPDF(PDFlink): \"\"\" Downloads pdf bulletin from the provided link", "for tag in tags: # The returned dates may not be consistently formatted", "json.load(jsonFile) jsonFile.close() return dateLinkDict def getBulletin(date, verbose=True): \"\"\" Downloads latest bulletin for the", "for the given date and returns True. Returns False if bulletin is not", "json file. This JSON file can be used to check for updates. \"\"\"", "else: return False def isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\" Returns bulletin link if an", "if no new bulletins are available. If running for first time, the JSON", "DHS Kerala webpage to get html tags if verbose: print('Parsing Kerala DHS webpage", "return True # If both bulletins are same if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return", "Get link to pdf bulletin for tag in pTags: if 'English' in tag.text:", "bulletin for the given date and returns True. Returns False if bulletin is", "both found date = cleanDate(tag.a.text) bulletinPageLink = linkPre + tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink)", "stdDate = cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose) # If date does not exist on", "HTTPError: print('Error: PDF file not found') return False finally: bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile):", "can be used to write to the JSON file in a standard format", "def __getPDFlink(bulletinPageLink): \"\"\" Return links to pdf bulletin uploads in page. This link", "import json import sys linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf'", "raise ConnectionError # Get link to pdf bulletin for tag in pTags: if", "+ tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink) if verbose: print(date) return dateLinkDict def downloadPDF(PDFlink): \"\"\"", "True. Returns False if bulletin is not available. \"\"\" stdDate = cleanDate(date) dateLinkDict", "try: # If local JSON file exists in directory dateLinkDictOld = readJSON(jsonDefaultFile) #", "and their corresponding links to bulletin pages. \"\"\" # Ensure python version 3+", "webpage ...') print('Obtaining links of dates:') DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req =", "return linkPre + tag.a.get('href') else: return None def cleanDate(date): \"\"\" Returns the date", "Kerala webpage to get html tags if verbose: print('Parsing Kerala DHS webpage ...')", "return None def cleanDate(date): \"\"\" Returns the date in the format dd.mm.yyyy This", "False if no new bulletins are available. If running for first time, the", "for updates. \"\"\" jsonFile = open(filename, 'r') dateLinkDict = json.load(jsonFile) jsonFile.close() return dateLinkDict", "filename=jsonDefaultFile): \"\"\" Writes dateLinkDict as a json file. This JSON file can be", "DHSPage = req.request('GET', DHSLink) soup = BeautifulSoup(DHSPage.data, 'html.parser') tags = soup.findAll('h3', attrs={'class': 'entry-title'})", "used to write to the JSON file in a standard format \"\"\" #", "Kerala DHS webpage ...') print('Obtaining links of dates:') DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/'", "= cleanDate(date) dateLinkDict = __getDateLinkDict(verbose) if stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True else:", "A module to parse the COVID bulletins provided by DHS Kerala \"\"\" import", "def cleanDate(date): \"\"\" Returns the date in the format dd.mm.yyyy This can be", "dates and their corresponding links to bulletin pages. \"\"\" # Ensure python version", "date = cleanDate(tag.a.text) bulletinPageLink = linkPre + tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink) if verbose:", "bulletin page to get pdf link req = urllib3.PoolManager() bulletinPage = req.request('GET', bulletinPageLink)", "in local JSON file if not stdDate in dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew) return", "returns True. Returns False if bulletin is not available. \"\"\" stdDate = cleanDate(date)", "extract date and corresponding link to pdfs bulletins dateLinkDict = dict() for tag", "cleanDate(tag.a.text) bulletinPageLink = linkPre + tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink) if verbose: print(date) return", "\"\"\" try: req = urllib3.PoolManager() response = req.request('GET', PDFlink) bulletinFile = open(bulletinDefaultFile, 'wb')", "dateLinkDict def getBulletin(date, verbose=True): \"\"\" Downloads latest bulletin for the given date and", "This link can be checked for updated bulletins. \"\"\" # Parse bulletin page", "TypeError if not len(date) == 10: raise ValueError return date[0:2] + '.' +", "today = date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today) if isNew: print('NEW BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded", "dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew) return True # If both bulletins are same if", "return dateLinkDict def downloadPDF(PDFlink): \"\"\" Downloads pdf bulletin from the provided link \"\"\"", "bulletins. \"\"\" # Parse bulletin page to get pdf link req = urllib3.PoolManager()", "file does not exist if updateJSONfile: writeJSON(dateLinkDictNew) return True if __name__ == \"__main__\":", "def __getDateLinkDict(verbose=True): \"\"\" Returns a dict data type containing all dates and their", "...') print('Obtaining links of dates:') DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager()", "not isinstance(date,str): raise TypeError if not len(date) == 10: raise ValueError return date[0:2]", "\"\"\" Downloads pdf bulletin from the provided link \"\"\" try: req = urllib3.PoolManager()", "True else: return False def isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\" Returns bulletin link if", "'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\" Return links to pdf bulletin uploads in page. This", "= BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag = soup.find('div', attrs={'class': 'entry-content'}) pTags = divTag.findAll('p') except", "dateLinkDict[date] = __getPDFlink(bulletinPageLink) if verbose: print(date) return dateLinkDict def downloadPDF(PDFlink): \"\"\" Downloads pdf", "tag in tags: # The returned dates may not be consistently formatted on", "is not available. \"\"\" stdDate = cleanDate(date) dateLinkDict = __getDateLinkDict(verbose) if stdDate in", "date and corresponding link to pdfs bulletins dateLinkDict = dict() for tag in", "'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\" Return links to pdf bulletin uploads", "check for updates. \"\"\" jsonFile = open(filename, 'w') json.dump(dateLinkDict, jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile):", "latest one. \"\"\" from datetime import date today = date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today)", "bulletinPageLink) soup = BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag = soup.find('div', attrs={'class': 'entry-content'}) pTags =", "pTags = divTag.findAll('p') except AttributeError: print('Error: Broken Connection. Rerun') raise ConnectionError # Get", "found') return False finally: bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes dateLinkDict as a", "+ tag.a.get('href') else: return None def cleanDate(date): \"\"\" Returns the date in the", "def getBulletin(date, verbose=True): \"\"\" Downloads latest bulletin for the given date and returns", "def downloadPDF(PDFlink): \"\"\" Downloads pdf bulletin from the provided link \"\"\" try: req", "jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\" Return links to pdf", "\"\"\" Return links to pdf bulletin uploads in page. This link can be", "= req.request('GET', DHSLink) soup = BeautifulSoup(DHSPage.data, 'html.parser') tags = soup.findAll('h3', attrs={'class': 'entry-title'}) #", "True except FileNotFoundError: # If local JSON file does not exist if updateJSONfile:", "running for first time, the JSON file is created and returns True. \"\"\"", "pdfs bulletins dateLinkDict = dict() for tag in tags: # The returned dates", "are both found date = cleanDate(tag.a.text) bulletinPageLink = linkPre + tag.a.get('href') dateLinkDict[date] =", "If the module is invoked as a python program, it checks for new", "updateJSONfile=True, verbose=True): \"\"\" Returns bulletin link if an updated bulletin is available on", "+ '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager() DHSPage = req.request('GET', DHSLink) soup = BeautifulSoup(DHSPage.data, 'html.parser')", "parse the COVID bulletins provided by DHS Kerala \"\"\" import urllib3 from bs4", "pdf bulletin uploads in page. This link can be checked for updated bulletins.", "3: print('ERROR: Use python version 3+') raise SyntaxError # Parse DHS Kerala webpage", "\"\"\" Returns a dict data type containing all dates and their corresponding links", "exist in local JSON file if not stdDate in dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew)", "updateJSONfile: writeJSON(dateLinkDictNew) return True except FileNotFoundError: # If local JSON file does not", "print('Error: Broken Connection. Rerun') raise ConnectionError # Get link to pdf bulletin for", "link to pdf bulletin for tag in pTags: if 'English' in tag.text: return", "bulletinDefaultFile = 'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\" Return links to pdf bulletin uploads in", "tags to extract date and corresponding link to pdfs bulletins dateLinkDict = dict()", "to check for updates. \"\"\" jsonFile = open(filename, 'w') json.dump(dateLinkDict, jsonFile) jsonFile.close() def", "# Ensure python version 3+ if sys.version_info.major < 3: print('ERROR: Use python version", "'r') dateLinkDict = json.load(jsonFile) jsonFile.close() return dateLinkDict def getBulletin(date, verbose=True): \"\"\" Downloads latest", "def readJSON(filename=jsonDefaultFile): \"\"\" Reads all dateLinkDict from a json file. This JSON file", "updateJSONfile: writeJSON(dateLinkDictNew) return True # If both bulletins are same if (dateLinkDictNew[stdDate] ==", "the provided link \"\"\" try: req = urllib3.PoolManager() response = req.request('GET', PDFlink) bulletinFile", "latest bulletin for the given date and returns True. Returns False if bulletin", "__name__ == \"__main__\": \"\"\" If the module is invoked as a python program,", "from the provided link \"\"\" try: req = urllib3.PoolManager() response = req.request('GET', PDFlink)", "dates may not be consistently formatted on the website. # Eg. dd-mm-yyy and", "corresponding links to bulletin pages. \"\"\" # Ensure python version 3+ if sys.version_info.major", "dateLinkDict as a json file. This JSON file can be used to check", "True. \"\"\" stdDate = cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose) # If date does not", "as a python program, it checks for new bulletins and downloads the latest", "bulletins and downloads the latest one. \"\"\" from datetime import date today =", "Return links to pdf bulletin uploads in page. This link can be checked", "readJSON(jsonDefaultFile) # If date does not exist in local JSON file if not", "to check for updates. \"\"\" jsonFile = open(filename, 'r') dateLinkDict = json.load(jsonFile) jsonFile.close()", "dd-mm-yyy and dd/mm/yyyy are both found date = cleanDate(tag.a.text) bulletinPageLink = linkPre +", "to extract date and corresponding link to pdfs bulletins dateLinkDict = dict() for", "given date and returns True. Returns False if bulletin is not available. \"\"\"", "bulletin link if an updated bulletin is available on provided date. Returns False", "Parse bulletin page to get pdf link req = urllib3.PoolManager() bulletinPage = req.request('GET',", "dateLinkDictNew: return False try: # If local JSON file exists in directory dateLinkDictOld", "from bs4 import BeautifulSoup import json import sys linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile =", "if not isinstance(date,str): raise TypeError if not len(date) == 10: raise ValueError return", "getBulletin(date, verbose=True): \"\"\" Downloads latest bulletin for the given date and returns True.", "and returns True. Returns False if bulletin is not available. \"\"\" stdDate =", "= open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except HTTPError: print('Error: PDF file not found') return False", "get pdf link req = urllib3.PoolManager() bulletinPage = req.request('GET', bulletinPageLink) soup = BeautifulSoup(bulletinPage.data,", "not found') return False finally: bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes dateLinkDict as", "False else: # If both bulletins are different if updateJSONfile: writeJSON(dateLinkDictNew) return True", "webpage to get html tags if verbose: print('Parsing Kerala DHS webpage ...') print('Obtaining", "jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\" Reads all dateLinkDict from a json file. This JSON", "Returns bulletin link if an updated bulletin is available on provided date. Returns", "date does not exist on server if not stdDate in dateLinkDictNew: return False", "file is created and returns True. \"\"\" stdDate = cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose)", "dateLinkDictOld[stdDate]): return False else: # If both bulletins are different if updateJSONfile: writeJSON(dateLinkDictNew)", "# Parse bulletin page to get pdf link req = urllib3.PoolManager() bulletinPage =", "__getDateLinkDict(verbose=True): \"\"\" Returns a dict data type containing all dates and their corresponding", "are same if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return False else: # If both bulletins", "dateLinkDictNew = __getDateLinkDict(verbose) # If date does not exist on server if not", "date[3:5] + '.' + date[6:10] def __getDateLinkDict(verbose=True): \"\"\" Returns a dict data type", "= urllib3.PoolManager() bulletinPage = req.request('GET', bulletinPageLink) soup = BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag =", "available. If running for first time, the JSON file is created and returns", "links to bulletin pages. \"\"\" # Ensure python version 3+ if sys.version_info.major <", "\"\"\" stdDate = cleanDate(date) dateLinkDict = __getDateLinkDict(verbose) if stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return", "Rerun') raise ConnectionError # Get link to pdf bulletin for tag in pTags:", "Broken Connection. Rerun') raise ConnectionError # Get link to pdf bulletin for tag", "dateLinkDict = json.load(jsonFile) jsonFile.close() return dateLinkDict def getBulletin(date, verbose=True): \"\"\" Downloads latest bulletin", "provided date. Returns False if no new bulletins are available. If running for", "links to pdf bulletin uploads in page. This link can be checked for", "downloadPDF(dateLinkDict[stdDate]) return True else: return False def isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\" Returns bulletin", "on provided date. Returns False if no new bulletins are available. If running", "import date today = date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today) if isNew: print('NEW BULLETIN AVAILABLE')", "isNewBulletin(today) if isNew: print('NEW BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded to ' + bulletinDefaultFile) else:", "both bulletins are different if updateJSONfile: writeJSON(dateLinkDictNew) return True except FileNotFoundError: # If", "linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager() DHSPage = req.request('GET', DHSLink) soup = BeautifulSoup(DHSPage.data,", "a standard format \"\"\" # Sanity checks if not isinstance(date,str): raise TypeError if", "file can be used to check for updates. \"\"\" jsonFile = open(filename, 'r')", "sys.version_info.major < 3: print('ERROR: Use python version 3+') raise SyntaxError # Parse DHS", "same if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return False else: # If both bulletins are", "tag.text: return linkPre + tag.a.get('href') else: return None def cleanDate(date): \"\"\" Returns the", "provided link \"\"\" try: req = urllib3.PoolManager() response = req.request('GET', PDFlink) bulletinFile =", "in dateLinkDictNew: return False try: # If local JSON file exists in directory", "JSON file in a standard format \"\"\" # Sanity checks if not isinstance(date,str):", "version 3+') raise SyntaxError # Parse DHS Kerala webpage to get html tags", "links of dates:') DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager() DHSPage =", "soup.findAll('h3', attrs={'class': 'entry-title'}) # Clean html tags to extract date and corresponding link", "'entry-content'}) pTags = divTag.findAll('p') except AttributeError: print('Error: Broken Connection. Rerun') raise ConnectionError #", "date does not exist in local JSON file if not stdDate in dateLinkDictOld:", "can be checked for updated bulletins. \"\"\" # Parse bulletin page to get", "date and returns True. Returns False if bulletin is not available. \"\"\" stdDate", "tags: # The returned dates may not be consistently formatted on the website.", "# Parse DHS Kerala webpage to get html tags if verbose: print('Parsing Kerala", "an updated bulletin is available on provided date. Returns False if no new", "linkPre + tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink) if verbose: print(date) return dateLinkDict def downloadPDF(PDFlink):", "True # If both bulletins are same if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return False", "\"__main__\": \"\"\" If the module is invoked as a python program, it checks", "it checks for new bulletins and downloads the latest one. \"\"\" from datetime", "open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except HTTPError: print('Error: PDF file not found') return False finally:", "writeJSON(dateLinkDictNew) return True if __name__ == \"__main__\": \"\"\" If the module is invoked", "import BeautifulSoup import json import sys linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile", "open(filename, 'w') json.dump(dateLinkDict, jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\" Reads all dateLinkDict from a", "isinstance(date,str): raise TypeError if not len(date) == 10: raise ValueError return date[0:2] +", "both bulletins are same if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return False else: # If", "returns True. \"\"\" stdDate = cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose) # If date does", "If local JSON file exists in directory dateLinkDictOld = readJSON(jsonDefaultFile) # If date", "BeautifulSoup(bulletinPage.data, 'html.parser') try: divTag = soup.find('div', attrs={'class': 'entry-content'}) pTags = divTag.findAll('p') except AttributeError:", "linkPre + tag.a.get('href') else: return None def cleanDate(date): \"\"\" Returns the date in", "the given date and returns True. Returns False if bulletin is not available.", "if not len(date) == 10: raise ValueError return date[0:2] + '.' + date[3:5]", "FileNotFoundError: # If local JSON file does not exist if updateJSONfile: writeJSON(dateLinkDictNew) return", "open(filename, 'r') dateLinkDict = json.load(jsonFile) jsonFile.close() return dateLinkDict def getBulletin(date, verbose=True): \"\"\" Downloads", "stdDate in dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew) return True # If both bulletins are", "False try: # If local JSON file exists in directory dateLinkDictOld = readJSON(jsonDefaultFile)", "and corresponding link to pdfs bulletins dateLinkDict = dict() for tag in tags:", "= __getPDFlink(bulletinPageLink) if verbose: print(date) return dateLinkDict def downloadPDF(PDFlink): \"\"\" Downloads pdf bulletin", "in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True else: return False def isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\"", "be checked for updated bulletins. \"\"\" # Parse bulletin page to get pdf", "a dict data type containing all dates and their corresponding links to bulletin", "SyntaxError # Parse DHS Kerala webpage to get html tags if verbose: print('Parsing", "The returned dates may not be consistently formatted on the website. # Eg.", "COVID bulletins provided by DHS Kerala \"\"\" import urllib3 from bs4 import BeautifulSoup", "used to check for updates. \"\"\" jsonFile = open(filename, 'w') json.dump(dateLinkDict, jsonFile) jsonFile.close()", "'html.parser') try: divTag = soup.find('div', attrs={'class': 'entry-content'}) pTags = divTag.findAll('p') except AttributeError: print('Error:", "= linkPre + tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink) if verbose: print(date) return dateLinkDict def", "len(date) == 10: raise ValueError return date[0:2] + '.' + date[3:5] + '.'", "checks for new bulletins and downloads the latest one. \"\"\" from datetime import", "return False else: # If both bulletins are different if updateJSONfile: writeJSON(dateLinkDictNew) return", "'w') json.dump(dateLinkDict, jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\" Reads all dateLinkDict from a json", "== 10: raise ValueError return date[0:2] + '.' + date[3:5] + '.' +", "3+') raise SyntaxError # Parse DHS Kerala webpage to get html tags if", "dateLinkDict from a json file. This JSON file can be used to check", "= 'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf' def __getPDFlink(bulletinPageLink): \"\"\" Return links to pdf bulletin", "jsonFile) jsonFile.close() def readJSON(filename=jsonDefaultFile): \"\"\" Reads all dateLinkDict from a json file. This", "= json.load(jsonFile) jsonFile.close() return dateLinkDict def getBulletin(date, verbose=True): \"\"\" Downloads latest bulletin for", "the COVID bulletins provided by DHS Kerala \"\"\" import urllib3 from bs4 import", "not exist on server if not stdDate in dateLinkDictNew: return False try: #", "the module is invoked as a python program, it checks for new bulletins", "print('Error: PDF file not found') return False finally: bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\"", "'html.parser') tags = soup.findAll('h3', attrs={'class': 'entry-title'}) # Clean html tags to extract date", "can be used to check for updates. \"\"\" jsonFile = open(filename, 'r') dateLinkDict", "tags = soup.findAll('h3', attrs={'class': 'entry-title'}) # Clean html tags to extract date and", "in page. This link can be checked for updated bulletins. \"\"\" # Parse", "type containing all dates and their corresponding links to bulletin pages. \"\"\" #", "is created and returns True. \"\"\" stdDate = cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose) #", "readJSON(filename=jsonDefaultFile): \"\"\" Reads all dateLinkDict from a json file. This JSON file can", "updated bulletin is available on provided date. Returns False if no new bulletins", "if __name__ == \"__main__\": \"\"\" If the module is invoked as a python", "the date in the format dd.mm.yyyy This can be used to write to", "date today = date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today) if isNew: print('NEW BULLETIN AVAILABLE') downloadPDF(isNew)", "bulletinFile = open(bulletinDefaultFile, 'wb') bulletinFile.write(response.data) except HTTPError: print('Error: PDF file not found') return", "the JSON file is created and returns True. \"\"\" stdDate = cleanDate(date) dateLinkDictNew", "JSON file can be used to check for updates. \"\"\" jsonFile = open(filename,", "return False finally: bulletinFile.close() def writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes dateLinkDict as a json", "first time, the JSON file is created and returns True. \"\"\" stdDate =", "BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded to ' + bulletinDefaultFile) else: print('NO NEW BULLETINS AVAILABLE')", "= urllib3.PoolManager() DHSPage = req.request('GET', DHSLink) soup = BeautifulSoup(DHSPage.data, 'html.parser') tags = soup.findAll('h3',", "to write to the JSON file in a standard format \"\"\" # Sanity", "if updateJSONfile: writeJSON(dateLinkDictNew) return True except FileNotFoundError: # If local JSON file does", "bulletins are same if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]): return False else: # If both", "bulletins are different if updateJSONfile: writeJSON(dateLinkDictNew) return True except FileNotFoundError: # If local", "import sys linkPre = 'http://dhs.kerala.gov.in' jsonDefaultFile = 'bulletinLinks.json' bulletinDefaultFile = 'bulletin.pdf' def __getPDFlink(bulletinPageLink):", "try: divTag = soup.find('div', attrs={'class': 'entry-content'}) pTags = divTag.findAll('p') except AttributeError: print('Error: Broken", "print('ERROR: Use python version 3+') raise SyntaxError # Parse DHS Kerala webpage to", "downloadPDF(PDFlink): \"\"\" Downloads pdf bulletin from the provided link \"\"\" try: req =", "JSON file if not stdDate in dateLinkDictOld: if updateJSONfile: writeJSON(dateLinkDictNew) return True #", "if updateJSONfile: writeJSON(dateLinkDictNew) return True # If both bulletins are same if (dateLinkDictNew[stdDate]", "pTags: if 'English' in tag.text: return linkPre + tag.a.get('href') else: return None def", "writeJSON(dateLinkDict, filename=jsonDefaultFile): \"\"\" Writes dateLinkDict as a json file. This JSON file can", "exist if updateJSONfile: writeJSON(dateLinkDictNew) return True if __name__ == \"__main__\": \"\"\" If the", "file can be used to check for updates. \"\"\" jsonFile = open(filename, 'w')", "stdDate in dateLinkDictNew: return False try: # If local JSON file exists in", "in tag.text: return linkPre + tag.a.get('href') else: return None def cleanDate(date): \"\"\" Returns", "can be used to check for updates. \"\"\" jsonFile = open(filename, 'w') json.dump(dateLinkDict,", "<filename>covidKeralaDHS.py \"\"\" A module to parse the COVID bulletins provided by DHS Kerala", "= divTag.findAll('p') except AttributeError: print('Error: Broken Connection. Rerun') raise ConnectionError # Get link", "file exists in directory dateLinkDictOld = readJSON(jsonDefaultFile) # If date does not exist", "returned dates may not be consistently formatted on the website. # Eg. dd-mm-yyy", "If date does not exist on server if not stdDate in dateLinkDictNew: return", "= soup.find('div', attrs={'class': 'entry-content'}) pTags = divTag.findAll('p') except AttributeError: print('Error: Broken Connection. Rerun')", "used to check for updates. \"\"\" jsonFile = open(filename, 'r') dateLinkDict = json.load(jsonFile)", "link can be checked for updated bulletins. \"\"\" # Parse bulletin page to", "= readJSON(jsonDefaultFile) # If date does not exist in local JSON file if", "be used to check for updates. \"\"\" jsonFile = open(filename, 'w') json.dump(dateLinkDict, jsonFile)", "isNew: print('NEW BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded to ' + bulletinDefaultFile) else: print('NO NEW", "if not stdDate in dateLinkDictNew: return False try: # If local JSON file", "\"\"\" import urllib3 from bs4 import BeautifulSoup import json import sys linkPre =", "if an updated bulletin is available on provided date. Returns False if no", "'/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager() DHSPage = req.request('GET', DHSLink) soup = BeautifulSoup(DHSPage.data, 'html.parser') tags", "= date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today) if isNew: print('NEW BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded to", "= isNewBulletin(today) if isNew: print('NEW BULLETIN AVAILABLE') downloadPDF(isNew) print('Downloaded to ' + bulletinDefaultFile)", "in the format dd.mm.yyyy This can be used to write to the JSON", "stdDate = cleanDate(date) dateLinkDict = __getDateLinkDict(verbose) if stdDate in dateLinkDict: downloadPDF(dateLinkDict[stdDate]) return True", "pages. \"\"\" # Ensure python version 3+ if sys.version_info.major < 3: print('ERROR: Use", "Use python version 3+') raise SyntaxError # Parse DHS Kerala webpage to get", "import urllib3 from bs4 import BeautifulSoup import json import sys linkPre = 'http://dhs.kerala.gov.in'", "\"\"\" from datetime import date today = date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today) if isNew:", "print('Obtaining links of dates:') DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req = urllib3.PoolManager() DHSPage", "\"\"\" Returns the date in the format dd.mm.yyyy This can be used to", "found date = cleanDate(tag.a.text) bulletinPageLink = linkPre + tag.a.get('href') dateLinkDict[date] = __getPDFlink(bulletinPageLink) if", "return True else: return False def isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\" Returns bulletin link", "writeJSON(dateLinkDictNew) return True # If both bulletins are same if (dateLinkDictNew[stdDate] == dateLinkDictOld[stdDate]):", "Returns False if bulletin is not available. \"\"\" stdDate = cleanDate(date) dateLinkDict =", "from datetime import date today = date.today().strftime('%d.%m.%Y') isNew = isNewBulletin(today) if isNew: print('NEW", "write to the JSON file in a standard format \"\"\" # Sanity checks", "+ date[3:5] + '.' + date[6:10] def __getDateLinkDict(verbose=True): \"\"\" Returns a dict data", "DHS webpage ...') print('Obtaining links of dates:') DHSLink = linkPre + '/%e0%b4%a1%e0%b5%86%e0%b4%af%e0%b4%bf%e0%b4%b2%e0%b4%bf-%e0%b4%ac%e0%b5%81%e0%b4%b3%e0%b5%8d%e0%b4%b3%e0%b4%b1%e0%b5%8d%e0%b4%b1%e0%b4%bf%e0%b4%a8%e0%b5%8d%e2%80%8d/' req", "does not exist if updateJSONfile: writeJSON(dateLinkDictNew) return True if __name__ == \"__main__\": \"\"\"", "to get pdf link req = urllib3.PoolManager() bulletinPage = req.request('GET', bulletinPageLink) soup =", "isNewBulletin(date, updateJSONfile=True, verbose=True): \"\"\" Returns bulletin link if an updated bulletin is available", "urllib3.PoolManager() DHSPage = req.request('GET', DHSLink) soup = BeautifulSoup(DHSPage.data, 'html.parser') tags = soup.findAll('h3', attrs={'class':", "exists in directory dateLinkDictOld = readJSON(jsonDefaultFile) # If date does not exist in", "a json file. This JSON file can be used to check for updates.", "created and returns True. \"\"\" stdDate = cleanDate(date) dateLinkDictNew = __getDateLinkDict(verbose) # If" ]
[ "def myrange_generator(top): current = 0: while current < top: yield current current +=", "myrange_generator(top): current = 0: while current < top: yield current current += 1", "<filename>1-DiveIntoPython/week5/lecturesdemos/AsychnchronousProgramming/generators.py def myrange_generator(top): current = 0: while current < top: yield current current" ]
[ "matrix for respective timestep timestep = int(np.floor(arrival)) prob = self.endpoints[timestep] # sample ingress", "TrafficStub: def __init__(self, trace): self.trace = trace def sample(self): return self.trace def __iter__(self):", "for req in srequests] # sort to-be-simulated service requests according to their arrival", "None self.resd_lat: float = None def __str__(self): attrs = [round(self.duration, 2), round(self.datarate, 2),", "in zip(arrival, duration, rates, latencies, ingresses, egresses): req = Request(arr, dr, rate, lat,", "__init__(self, arrival: float, duration: float, datarate: float, max_latency: float, endpoints: tuple, service: int):", "service from distribution functions arrival = self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival)) ingresses, egresses =", "Lat.: {}; Lat.: {}; Service: {}'.format(*attrs) class ServiceTraffic: def __init__(self, rng: BitGenerator, service:", "= self.rng.integers(0, self.MAX_SEED) poi_seed = int(poi_seed) in_poisson = SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False, seed=poi_seed)", "np.ndarray, rates: np.ndarray, spaths: Dict): self.rng = rng self.MAX_SEED = 2**30 - 1", "service from respective processes requests = [process.sample() for process in self.processes] requests =", "scipy.stats as stats from numpy.random import default_rng, BitGenerator from tick.base import TimeFunction from", "= [round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency, 2)] attrs = [self.ingress, self.egress,", "rates)) def sample_arrival(self, horizon): poi_seed = self.rng.integers(0, self.MAX_SEED) poi_seed = int(poi_seed) in_poisson =", "verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals = in_poisson.timestamps[0] return arrivals def sample_duration(self, size): mduration", "class Request: def __init__(self, arrival: float, duration: float, datarate: float, max_latency: float, endpoints:", "def sample_duration(self, size): mduration = self.process['mduration'] duration = self.rng.exponential(scale=mduration, size=size) return duration def", "= int(np.floor(arrival)) prob = self.endpoints[timestep] # sample ingress / egress from probability matrix", "spaths # create time function for inhomogenous poisson process T = np.linspace(0.0, horizon", "(a - mean) / scale, (b - mean) / scale lat = stats.truncnorm.rvs(a,", "= np.ascontiguousarray(rates) self.rate_function = TimeFunction((T, rates)) def sample_arrival(self, horizon): poi_seed = self.rng.integers(0, self.MAX_SEED)", "requests for req in srequests] # sort to-be-simulated service requests according to their", "a, b = (a - mean) / scale, (b - mean) / scale", "srequests] # sort to-be-simulated service requests according to their arrival time requests =", "sample_arrival(self, horizon): poi_seed = self.rng.integers(0, self.MAX_SEED) poi_seed = int(poi_seed) in_poisson = SimuInhomogeneousPoisson( [self.rate_function],", "TimeFunction((T, rates)) def sample_arrival(self, horizon): poi_seed = self.rng.integers(0, self.MAX_SEED) poi_seed = int(poi_seed) in_poisson", "scale = self.datarates['scale'] a, b = self.datarates['a'], self.datarates['b'] a, b = (a -", "horizon self.process = process self.datarates = datarates self.latencies = latencies self.endpoints = endpoints", "sampled factor lat = lat * propagation return lat def sample_endpoints(self, arrivals): ingresses,", "__init__(self, trace): self.trace = trace def sample(self): return self.trace def __iter__(self): return iter(self.trace)", "ingresses, egresses def sample(self): # sample parameters for each service from distribution functions", "def sample_latencies(self, propagation: np.ndarray): mean = self.latencies['loc'] scale = self.latencies['scale'] a, b =", "for respective timestep timestep = int(np.floor(arrival)) prob = self.endpoints[timestep] # sample ingress /", "float = None def __str__(self): attrs = [round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat, 2),", "prob.shape) ingresses.append(ingress) egresses.append(egress) return ingresses, egresses def sample(self): # sample parameters for each", "and append them to the traffic trace requests = [] for arr, dr,", "mean) / scale, (b - mean) / scale lat = stats.truncnorm.rvs(a, b, mean,", "(given by shortest path propagation delay) with sampled factor lat = lat *", "numpy.random import default_rng, BitGenerator from tick.base import TimeFunction from tick.hawkes import SimuInhomogeneousPoisson class", "def sample(self): # sample parameters for each service from distribution functions arrival =", "endpoints: tuple, service: int): self.arrival = arrival self.duration = duration self.datarate = datarate", "end-to-end latencies (given by shortest path propagation delay) with sampled factor lat =", "def sample_arrival(self, horizon): poi_seed = self.rng.integers(0, self.MAX_SEED) poi_seed = int(poi_seed) in_poisson = SimuInhomogeneousPoisson(", "import SimuInhomogeneousPoisson class Request: def __init__(self, arrival: float, duration: float, datarate: float, max_latency:", "= np.linspace(0.0, horizon - 1, horizon) rates = np.ascontiguousarray(rates) self.rate_function = TimeFunction((T, rates))", "maximum end-to-end latencies (given by shortest path propagation delay) with sampled factor lat", "{}; Service: {}'.format(*attrs) class ServiceTraffic: def __init__(self, rng: BitGenerator, service: int, horizon: float,", "latencies: Dict, endpoints: np.ndarray, rates: np.ndarray, spaths: Dict): self.rng = rng self.MAX_SEED =", "Dict, datarates: Dict, latencies: Dict, endpoints: np.ndarray, rates: np.ndarray, spaths: Dict): self.rng =", "= datarates self.latencies = latencies self.endpoints = endpoints self.spaths = spaths # create", "self.processes = processes def sample(self): # generate requests for each type of service", "probability matrix flatten = prob.ravel() index = np.arange(flatten.size) ingress, egress = np.unravel_index( self.rng.choice(index,", "self.process['mduration'] duration = self.rng.exponential(scale=mduration, size=size) return duration def sample_datarates(self, size): mean = self.datarates['loc']", "self.service] return 'Route: ({}-{}); Duration: {}; Rate: {}; Resd. Lat.: {}; Lat.: {};", "process self.datarates = datarates self.latencies = latencies self.endpoints = endpoints self.spaths = spaths", "for inhomogenous poisson process T = np.linspace(0.0, horizon - 1, horizon) rates =", "self.ingress, self.egress = endpoints self.ingress = int(self.ingress) self.egress = int(self.egress) self.service: int =", "sample_endpoints(self, arrivals): ingresses, egresses = [], [] for arrival in arrivals: # get", "[] for arrival in arrivals: # get endpoint probability matrix for respective timestep", "scale = self.latencies['scale'] a, b = self.latencies['a'], self.latencies['b'] a, b = (a -", "arrival = self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival)) ingresses, egresses = self.sample_endpoints(arrival) # use arrival", "zip(arrival, duration, rates, latencies, ingresses, egresses): req = Request(arr, dr, rate, lat, (ingr,", "sample(self): # generate requests for each type of service from respective processes requests", "= self.rng.exponential(scale=mduration, size=size) return duration def sample_datarates(self, size): mean = self.datarates['loc'] scale =", "from tick.base import TimeFunction from tick.hawkes import SimuInhomogeneousPoisson class Request: def __init__(self, arrival:", "__str__(self): attrs = [round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency, 2)] attrs =", "them to the traffic trace requests = [] for arr, dr, rate, lat,", "attrs = [round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency, 2)] attrs = [self.ingress,", "rate, lat, (ingr, egr), self.service) requests.append(req) return requests class Traffic: def __init__(self, processes):", "probability matrix for respective timestep timestep = int(np.floor(arrival)) prob = self.endpoints[timestep] # sample", "Dict from functools import cmp_to_key import numpy as np import scipy.stats as stats", "import cmp_to_key import numpy as np import scipy.stats as stats from numpy.random import", "T = np.linspace(0.0, horizon - 1, horizon) rates = np.ascontiguousarray(rates) self.rate_function = TimeFunction((T,", "self.latencies['b'] a, b = (a - mean) / scale, (b - mean) /", "for arrival in arrivals: # get endpoint probability matrix for respective timestep timestep", "= process self.datarates = datarates self.latencies = latencies self.endpoints = endpoints self.spaths =", "dr, rate, lat, ingr, egr in zip(arrival, duration, rates, latencies, ingresses, egresses): req", "rates: np.ndarray, spaths: Dict): self.rng = rng self.MAX_SEED = 2**30 - 1 self.service", "[req for srequests in requests for req in srequests] # sort to-be-simulated service", "requests for each type of service from respective processes requests = [process.sample() for", "from numpy.random import default_rng, BitGenerator from tick.base import TimeFunction from tick.hawkes import SimuInhomogeneousPoisson", "propagation = np.asarray([self.spaths[ingr][egr] for ingr, egr in zip(ingresses, egresses)]) latencies = self.sample_latencies(propagation) #", "lambda r1, r2: r1.arrival - r2.arrival)) return requests def __iter__(self): trace = self.sample()", "in_poisson.track_intensity() in_poisson.simulate() arrivals = in_poisson.timestamps[0] return arrivals def sample_duration(self, size): mduration = self.process['mduration']", "as stats from numpy.random import default_rng, BitGenerator from tick.base import TimeFunction from tick.hawkes", "= self.process['mduration'] duration = self.rng.exponential(scale=mduration, size=size) return duration def sample_datarates(self, size): mean =", "self.service = service self.horizon = horizon self.process = process self.datarates = datarates self.latencies", "= np.unravel_index( self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress) return ingresses, egresses def sample(self): #", "for each type of service from respective processes requests = [process.sample() for process", "r1.arrival - r2.arrival)) return requests def __iter__(self): trace = self.sample() return iter(trace) class", "self.endpoints[timestep] # sample ingress / egress from probability matrix flatten = prob.ravel() index", "ingr, egr in zip(ingresses, egresses)]) latencies = self.sample_latencies(propagation) # build request objects and", "to-be-simulated service requests according to their arrival time requests = sorted(requests, key=cmp_to_key( lambda", "Traffic: def __init__(self, processes): self.processes = processes def sample(self): # generate requests for", "= self.sample() return iter(trace) class TrafficStub: def __init__(self, trace): self.trace = trace def", "delay) with sampled factor lat = lat * propagation return lat def sample_endpoints(self,", "p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress) return ingresses, egresses def sample(self): # sample parameters for", "latencies self.endpoints = endpoints self.spaths = spaths # create time function for inhomogenous", "the traffic trace requests = [] for arr, dr, rate, lat, ingr, egr", "arrival self.duration = duration self.datarate = datarate self.max_latency = max_latency self.ingress, self.egress =", "flatten = prob.ravel() index = np.arange(flatten.size) ingress, egress = np.unravel_index( self.rng.choice(index, p=flatten), prob.shape)", "- r2.arrival)) return requests def __iter__(self): trace = self.sample() return iter(trace) class TrafficStub:", "ingress, egress = np.unravel_index( self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress) return ingresses, egresses def", "(a - mean) / scale, (b - mean) / scale datarates = stats.truncnorm.rvs(a,", "mean = self.datarates['loc'] scale = self.datarates['scale'] a, b = self.datarates['a'], self.datarates['b'] a, b", "distribution functions arrival = self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival)) ingresses, egresses = self.sample_endpoints(arrival) #", "return requests class Traffic: def __init__(self, processes): self.processes = processes def sample(self): #", "propagation return lat def sample_endpoints(self, arrivals): ingresses, egresses = [], [] for arrival", "# get endpoint probability matrix for respective timestep timestep = int(np.floor(arrival)) prob =", "of service from respective processes requests = [process.sample() for process in self.processes] requests", "= prob.ravel() index = np.arange(flatten.size) ingress, egress = np.unravel_index( self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress)", "duration, rates, latencies, ingresses, egresses): req = Request(arr, dr, rate, lat, (ingr, egr),", "def sample_endpoints(self, arrivals): ingresses, egresses = [], [] for arrival in arrivals: #", "sample_datarates(self, size): mean = self.datarates['loc'] scale = self.datarates['scale'] a, b = self.datarates['a'], self.datarates['b']", "= int(self.ingress) self.egress = int(self.egress) self.service: int = int(service) self.vtypes: List[int] = None", "poisson process T = np.linspace(0.0, horizon - 1, horizon) rates = np.ascontiguousarray(rates) self.rate_function", "traffic trace requests = [] for arr, dr, rate, lat, ingr, egr in", "<reponame>CN-UPB/FutureCoord from typing import List, Dict from functools import cmp_to_key import numpy as", "zip(ingresses, egresses)]) latencies = self.sample_latencies(propagation) # build request objects and append them to", "scale datarates = stats.truncnorm.rvs(a, b, mean, scale, size=size, random_state=self.rng) return datarates def sample_latencies(self,", "prob = self.endpoints[timestep] # sample ingress / egress from probability matrix flatten =", "to the traffic trace requests = [] for arr, dr, rate, lat, ingr,", "Lat.: {}; Service: {}'.format(*attrs) class ServiceTraffic: def __init__(self, rng: BitGenerator, service: int, horizon:", "# sample parameters for each service from distribution functions arrival = self.sample_arrival(self.horizon) duration", "self.sample_duration(len(arrival)) ingresses, egresses = self.sample_endpoints(arrival) # use arrival time to index the endpoint", "generate requests for each type of service from respective processes requests = [process.sample()", "arrival in arrivals: # get endpoint probability matrix for respective timestep timestep =", "float, process: Dict, datarates: Dict, latencies: Dict, endpoints: np.ndarray, rates: np.ndarray, spaths: Dict):", "timestep timestep = int(np.floor(arrival)) prob = self.endpoints[timestep] # sample ingress / egress from", "lat * propagation return lat def sample_endpoints(self, arrivals): ingresses, egresses = [], []", "for srequests in requests for req in srequests] # sort to-be-simulated service requests", "= sorted(requests, key=cmp_to_key( lambda r1, r2: r1.arrival - r2.arrival)) return requests def __iter__(self):", "rng: BitGenerator, service: int, horizon: float, process: Dict, datarates: Dict, latencies: Dict, endpoints:", "self.rng.exponential(scale=mduration, size=size) return duration def sample_datarates(self, size): mean = self.datarates['loc'] scale = self.datarates['scale']", "self.datarates['a'], self.datarates['b'] a, b = (a - mean) / scale, (b - mean)", "requests = [req for srequests in requests for req in srequests] # sort", "= TimeFunction((T, rates)) def sample_arrival(self, horizon): poi_seed = self.rng.integers(0, self.MAX_SEED) poi_seed = int(poi_seed)", "r2.arrival)) return requests def __iter__(self): trace = self.sample() return iter(trace) class TrafficStub: def", "List[int] = None self.resd_lat: float = None def __str__(self): attrs = [round(self.duration, 2),", "def sample(self): # generate requests for each type of service from respective processes", "= int(poi_seed) in_poisson = SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals =", "self.latencies['loc'] scale = self.latencies['scale'] a, b = self.latencies['a'], self.latencies['b'] a, b = (a", "random_state=self.rng) # scale maximum end-to-end latencies (given by shortest path propagation delay) with", "List, Dict from functools import cmp_to_key import numpy as np import scipy.stats as", "datarates = stats.truncnorm.rvs(a, b, mean, scale, size=size, random_state=self.rng) return datarates def sample_latencies(self, propagation:", "requests according to their arrival time requests = sorted(requests, key=cmp_to_key( lambda r1, r2:", "in arrivals: # get endpoint probability matrix for respective timestep timestep = int(np.floor(arrival))", "arrival time requests = sorted(requests, key=cmp_to_key( lambda r1, r2: r1.arrival - r2.arrival)) return", "prob.ravel() index = np.arange(flatten.size) ingress, egress = np.unravel_index( self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress)", "{}; Resd. Lat.: {}; Lat.: {}; Service: {}'.format(*attrs) class ServiceTraffic: def __init__(self, rng:", "timestep = int(np.floor(arrival)) prob = self.endpoints[timestep] # sample ingress / egress from probability", "= datarate self.max_latency = max_latency self.ingress, self.egress = endpoints self.ingress = int(self.ingress) self.egress", "self.max_latency = max_latency self.ingress, self.egress = endpoints self.ingress = int(self.ingress) self.egress = int(self.egress)", "= 2**30 - 1 self.service = service self.horizon = horizon self.process = process", "int(self.egress) self.service: int = int(service) self.vtypes: List[int] = None self.resd_lat: float = None", "= np.arange(flatten.size) ingress, egress = np.unravel_index( self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress) return ingresses,", "= int(self.egress) self.service: int = int(service) self.vtypes: List[int] = None self.resd_lat: float =", "egresses.append(egress) return ingresses, egresses def sample(self): # sample parameters for each service from", "= self.sample_latencies(propagation) # build request objects and append them to the traffic trace", "{}'.format(*attrs) class ServiceTraffic: def __init__(self, rng: BitGenerator, service: int, horizon: float, process: Dict,", "= self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival)) ingresses, egresses = self.sample_endpoints(arrival) # use arrival time", "Duration: {}; Rate: {}; Resd. Lat.: {}; Lat.: {}; Service: {}'.format(*attrs) class ServiceTraffic:", "self.latencies = latencies self.endpoints = endpoints self.spaths = spaths # create time function", "tuple, service: int): self.arrival = arrival self.duration = duration self.datarate = datarate self.max_latency", "self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr] for ingr, egr in zip(ingresses, egresses)]) latencies = self.sample_latencies(propagation)", "1 self.service = service self.horizon = horizon self.process = process self.datarates = datarates", "lat, ingr, egr in zip(arrival, duration, rates, latencies, ingresses, egresses): req = Request(arr,", "round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency, 2)] attrs = [self.ingress, self.egress, *attrs, self.service] return", "__init__(self, processes): self.processes = processes def sample(self): # generate requests for each type", "self.latencies['scale'] a, b = self.latencies['a'], self.latencies['b'] a, b = (a - mean) /", "return ingresses, egresses def sample(self): # sample parameters for each service from distribution", "[round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency, 2)] attrs = [self.ingress, self.egress, *attrs,", "return lat def sample_endpoints(self, arrivals): ingresses, egresses = [], [] for arrival in", "traffic matrix rates = self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr] for ingr, egr in zip(ingresses,", "tick.hawkes import SimuInhomogeneousPoisson class Request: def __init__(self, arrival: float, duration: float, datarate: float,", "b = (a - mean) / scale, (b - mean) / scale lat", "= lat * propagation return lat def sample_endpoints(self, arrivals): ingresses, egresses = [],", "process in self.processes] requests = [req for srequests in requests for req in", "return 'Route: ({}-{}); Duration: {}; Rate: {}; Resd. Lat.: {}; Lat.: {}; Service:", "= self.datarates['a'], self.datarates['b'] a, b = (a - mean) / scale, (b -", "datarates: Dict, latencies: Dict, endpoints: np.ndarray, rates: np.ndarray, spaths: Dict): self.rng = rng", "datarates def sample_latencies(self, propagation: np.ndarray): mean = self.latencies['loc'] scale = self.latencies['scale'] a, b", "according to their arrival time requests = sorted(requests, key=cmp_to_key( lambda r1, r2: r1.arrival", "for process in self.processes] requests = [req for srequests in requests for req", "= None def __str__(self): attrs = [round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency,", "b, mean, scale, size=size, random_state=self.rng) return datarates def sample_latencies(self, propagation: np.ndarray): mean =", "class Traffic: def __init__(self, processes): self.processes = processes def sample(self): # generate requests", "self.datarates = datarates self.latencies = latencies self.endpoints = endpoints self.spaths = spaths #", "parameters for each service from distribution functions arrival = self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival))", "in requests for req in srequests] # sort to-be-simulated service requests according to", "self.datarates['b'] a, b = (a - mean) / scale, (b - mean) /", "Request: def __init__(self, arrival: float, duration: float, datarate: float, max_latency: float, endpoints: tuple,", "datarate self.max_latency = max_latency self.ingress, self.egress = endpoints self.ingress = int(self.ingress) self.egress =", "propagation delay) with sampled factor lat = lat * propagation return lat def", "2), round(self.resd_lat, 2), round(self.max_latency, 2)] attrs = [self.ingress, self.egress, *attrs, self.service] return 'Route:", "in srequests] # sort to-be-simulated service requests according to their arrival time requests", "random_state=self.rng) return datarates def sample_latencies(self, propagation: np.ndarray): mean = self.latencies['loc'] scale = self.latencies['scale']", "round(self.max_latency, 2)] attrs = [self.ingress, self.egress, *attrs, self.service] return 'Route: ({}-{}); Duration: {};", "attrs = [self.ingress, self.egress, *attrs, self.service] return 'Route: ({}-{}); Duration: {}; Rate: {};", "(b - mean) / scale datarates = stats.truncnorm.rvs(a, b, mean, scale, size=size, random_state=self.rng)", "self.MAX_SEED = 2**30 - 1 self.service = service self.horizon = horizon self.process =", "time function for inhomogenous poisson process T = np.linspace(0.0, horizon - 1, horizon)", "np.asarray([self.spaths[ingr][egr] for ingr, egr in zip(ingresses, egresses)]) latencies = self.sample_latencies(propagation) # build request", "None def __str__(self): attrs = [round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency, 2)]", "arrivals: # get endpoint probability matrix for respective timestep timestep = int(np.floor(arrival)) prob", "import numpy as np import scipy.stats as stats from numpy.random import default_rng, BitGenerator", "requests def __iter__(self): trace = self.sample() return iter(trace) class TrafficStub: def __init__(self, trace):", "b, mean, scale, size=propagation.size, random_state=self.rng) # scale maximum end-to-end latencies (given by shortest", "a, b = self.datarates['a'], self.datarates['b'] a, b = (a - mean) / scale,", "lat = stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size, random_state=self.rng) # scale maximum end-to-end latencies", "and traffic matrix rates = self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr] for ingr, egr in", "- mean) / scale, (b - mean) / scale datarates = stats.truncnorm.rvs(a, b,", "self.latencies['a'], self.latencies['b'] a, b = (a - mean) / scale, (b - mean)", "requests.append(req) return requests class Traffic: def __init__(self, processes): self.processes = processes def sample(self):", "each type of service from respective processes requests = [process.sample() for process in", "stats.truncnorm.rvs(a, b, mean, scale, size=size, random_state=self.rng) return datarates def sample_latencies(self, propagation: np.ndarray): mean", "return datarates def sample_latencies(self, propagation: np.ndarray): mean = self.latencies['loc'] scale = self.latencies['scale'] a,", "self.horizon = horizon self.process = process self.datarates = datarates self.latencies = latencies self.endpoints", "class ServiceTraffic: def __init__(self, rng: BitGenerator, service: int, horizon: float, process: Dict, datarates:", "def sample_datarates(self, size): mean = self.datarates['loc'] scale = self.datarates['scale'] a, b = self.datarates['a'],", "rates, latencies, ingresses, egresses): req = Request(arr, dr, rate, lat, (ingr, egr), self.service)", "from distribution functions arrival = self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival)) ingresses, egresses = self.sample_endpoints(arrival)", "= [process.sample() for process in self.processes] requests = [req for srequests in requests", "= self.latencies['a'], self.latencies['b'] a, b = (a - mean) / scale, (b -", "path propagation delay) with sampled factor lat = lat * propagation return lat", "2**30 - 1 self.service = service self.horizon = horizon self.process = process self.datarates", "a, b = self.latencies['a'], self.latencies['b'] a, b = (a - mean) / scale,", "ServiceTraffic: def __init__(self, rng: BitGenerator, service: int, horizon: float, process: Dict, datarates: Dict,", "key=cmp_to_key( lambda r1, r2: r1.arrival - r2.arrival)) return requests def __iter__(self): trace =", "sample(self): # sample parameters for each service from distribution functions arrival = self.sample_arrival(self.horizon)", "objects and append them to the traffic trace requests = [] for arr,", "as np import scipy.stats as stats from numpy.random import default_rng, BitGenerator from tick.base", "index = np.arange(flatten.size) ingress, egress = np.unravel_index( self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress) return", "in self.processes] requests = [req for srequests in requests for req in srequests]", "sort to-be-simulated service requests according to their arrival time requests = sorted(requests, key=cmp_to_key(", "= endpoints self.ingress = int(self.ingress) self.egress = int(self.egress) self.service: int = int(service) self.vtypes:", "rates = np.ascontiguousarray(rates) self.rate_function = TimeFunction((T, rates)) def sample_arrival(self, horizon): poi_seed = self.rng.integers(0,", "self.egress = endpoints self.ingress = int(self.ingress) self.egress = int(self.egress) self.service: int = int(service)", "max_latency self.ingress, self.egress = endpoints self.ingress = int(self.ingress) self.egress = int(self.egress) self.service: int", "BitGenerator from tick.base import TimeFunction from tick.hawkes import SimuInhomogeneousPoisson class Request: def __init__(self,", "endpoints self.ingress = int(self.ingress) self.egress = int(self.egress) self.service: int = int(service) self.vtypes: List[int]", "latencies (given by shortest path propagation delay) with sampled factor lat = lat", "({}-{}); Duration: {}; Rate: {}; Resd. Lat.: {}; Lat.: {}; Service: {}'.format(*attrs) class", "= None self.resd_lat: float = None def __str__(self): attrs = [round(self.duration, 2), round(self.datarate,", "sample_duration(self, size): mduration = self.process['mduration'] duration = self.rng.exponential(scale=mduration, size=size) return duration def sample_datarates(self,", "self.resd_lat: float = None def __str__(self): attrs = [round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat,", "matrix flatten = prob.ravel() index = np.arange(flatten.size) ingress, egress = np.unravel_index( self.rng.choice(index, p=flatten),", "np.ndarray, spaths: Dict): self.rng = rng self.MAX_SEED = 2**30 - 1 self.service =", "import TimeFunction from tick.hawkes import SimuInhomogeneousPoisson class Request: def __init__(self, arrival: float, duration:", "= max_latency self.ingress, self.egress = endpoints self.ingress = int(self.ingress) self.egress = int(self.egress) self.service:", "duration self.datarate = datarate self.max_latency = max_latency self.ingress, self.egress = endpoints self.ingress =", "= (a - mean) / scale, (b - mean) / scale lat =", "processes): self.processes = processes def sample(self): # generate requests for each type of", "[self.ingress, self.egress, *attrs, self.service] return 'Route: ({}-{}); Duration: {}; Rate: {}; Resd. Lat.:", "= self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr] for ingr, egr in zip(ingresses, egresses)]) latencies =", "typing import List, Dict from functools import cmp_to_key import numpy as np import", "= rng self.MAX_SEED = 2**30 - 1 self.service = service self.horizon = horizon", "in_poisson = SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals = in_poisson.timestamps[0] return", "def __init__(self, processes): self.processes = processes def sample(self): # generate requests for each", "size): mduration = self.process['mduration'] duration = self.rng.exponential(scale=mduration, size=size) return duration def sample_datarates(self, size):", "Dict, latencies: Dict, endpoints: np.ndarray, rates: np.ndarray, spaths: Dict): self.rng = rng self.MAX_SEED", "= [] for arr, dr, rate, lat, ingr, egr in zip(arrival, duration, rates,", "[self.rate_function], end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals = in_poisson.timestamps[0] return arrivals def sample_duration(self,", "egresses = self.sample_endpoints(arrival) # use arrival time to index the endpoint probability matrix", "endpoints self.spaths = spaths # create time function for inhomogenous poisson process T", "numpy as np import scipy.stats as stats from numpy.random import default_rng, BitGenerator from", "poi_seed = self.rng.integers(0, self.MAX_SEED) poi_seed = int(poi_seed) in_poisson = SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False,", "mean) / scale datarates = stats.truncnorm.rvs(a, b, mean, scale, size=size, random_state=self.rng) return datarates", "= self.endpoints[timestep] # sample ingress / egress from probability matrix flatten = prob.ravel()", "inhomogenous poisson process T = np.linspace(0.0, horizon - 1, horizon) rates = np.ascontiguousarray(rates)", "sample ingress / egress from probability matrix flatten = prob.ravel() index = np.arange(flatten.size)", "duration: float, datarate: float, max_latency: float, endpoints: tuple, service: int): self.arrival = arrival", "size=size, random_state=self.rng) return datarates def sample_latencies(self, propagation: np.ndarray): mean = self.latencies['loc'] scale =", "mean, scale, size=size, random_state=self.rng) return datarates def sample_latencies(self, propagation: np.ndarray): mean = self.latencies['loc']", "the endpoint probability matrix and traffic matrix rates = self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr]", "= self.sample_endpoints(arrival) # use arrival time to index the endpoint probability matrix and", "by shortest path propagation delay) with sampled factor lat = lat * propagation", "import List, Dict from functools import cmp_to_key import numpy as np import scipy.stats", "srequests in requests for req in srequests] # sort to-be-simulated service requests according", "egresses def sample(self): # sample parameters for each service from distribution functions arrival", "- 1, horizon) rates = np.ascontiguousarray(rates) self.rate_function = TimeFunction((T, rates)) def sample_arrival(self, horizon):", "self.arrival = arrival self.duration = duration self.datarate = datarate self.max_latency = max_latency self.ingress,", "# generate requests for each type of service from respective processes requests =", "horizon: float, process: Dict, datarates: Dict, latencies: Dict, endpoints: np.ndarray, rates: np.ndarray, spaths:", "= self.datarates['loc'] scale = self.datarates['scale'] a, b = self.datarates['a'], self.datarates['b'] a, b =", "egress from probability matrix flatten = prob.ravel() index = np.arange(flatten.size) ingress, egress =", "= [], [] for arrival in arrivals: # get endpoint probability matrix for", "datarates self.latencies = latencies self.endpoints = endpoints self.spaths = spaths # create time", "self.rate_function = TimeFunction((T, rates)) def sample_arrival(self, horizon): poi_seed = self.rng.integers(0, self.MAX_SEED) poi_seed =", "{}; Lat.: {}; Service: {}'.format(*attrs) class ServiceTraffic: def __init__(self, rng: BitGenerator, service: int,", "= self.sample_duration(len(arrival)) ingresses, egresses = self.sample_endpoints(arrival) # use arrival time to index the", "self.ingress = int(self.ingress) self.egress = int(self.egress) self.service: int = int(service) self.vtypes: List[int] =", "self.spaths = spaths # create time function for inhomogenous poisson process T =", "trace requests = [] for arr, dr, rate, lat, ingr, egr in zip(arrival,", "factor lat = lat * propagation return lat def sample_endpoints(self, arrivals): ingresses, egresses", "arrivals): ingresses, egresses = [], [] for arrival in arrivals: # get endpoint", "# scale maximum end-to-end latencies (given by shortest path propagation delay) with sampled", "self.vtypes: List[int] = None self.resd_lat: float = None def __str__(self): attrs = [round(self.duration,", "egr in zip(ingresses, egresses)]) latencies = self.sample_latencies(propagation) # build request objects and append", "{}; Rate: {}; Resd. Lat.: {}; Lat.: {}; Service: {}'.format(*attrs) class ServiceTraffic: def", "np.ndarray): mean = self.latencies['loc'] scale = self.latencies['scale'] a, b = self.latencies['a'], self.latencies['b'] a,", "egr in zip(arrival, duration, rates, latencies, ingresses, egresses): req = Request(arr, dr, rate,", "in_poisson.simulate() arrivals = in_poisson.timestamps[0] return arrivals def sample_duration(self, size): mduration = self.process['mduration'] duration", "- 1 self.service = service self.horizon = horizon self.process = process self.datarates =", "= processes def sample(self): # generate requests for each type of service from", "endpoint probability matrix and traffic matrix rates = self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr] for", "for arr, dr, rate, lat, ingr, egr in zip(arrival, duration, rates, latencies, ingresses,", "mean) / scale lat = stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size, random_state=self.rng) # scale", "= self.latencies['loc'] scale = self.latencies['scale'] a, b = self.latencies['a'], self.latencies['b'] a, b =", "Rate: {}; Resd. Lat.: {}; Lat.: {}; Service: {}'.format(*attrs) class ServiceTraffic: def __init__(self,", "append them to the traffic trace requests = [] for arr, dr, rate,", "size=size) return duration def sample_datarates(self, size): mean = self.datarates['loc'] scale = self.datarates['scale'] a,", "= service self.horizon = horizon self.process = process self.datarates = datarates self.latencies =", "ingr, egr in zip(arrival, duration, rates, latencies, ingresses, egresses): req = Request(arr, dr,", "mean = self.latencies['loc'] scale = self.latencies['scale'] a, b = self.latencies['a'], self.latencies['b'] a, b", "return requests def __iter__(self): trace = self.sample() return iter(trace) class TrafficStub: def __init__(self,", "respective timestep timestep = int(np.floor(arrival)) prob = self.endpoints[timestep] # sample ingress / egress", "poi_seed = int(poi_seed) in_poisson = SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals", "round(self.resd_lat, 2), round(self.max_latency, 2)] attrs = [self.ingress, self.egress, *attrs, self.service] return 'Route: ({}-{});", "self.datarate = datarate self.max_latency = max_latency self.ingress, self.egress = endpoints self.ingress = int(self.ingress)", "self.service) requests.append(req) return requests class Traffic: def __init__(self, processes): self.processes = processes def", "return iter(trace) class TrafficStub: def __init__(self, trace): self.trace = trace def sample(self): return", "shortest path propagation delay) with sampled factor lat = lat * propagation return", "request objects and append them to the traffic trace requests = [] for", "- mean) / scale, (b - mean) / scale lat = stats.truncnorm.rvs(a, b,", "class TrafficStub: def __init__(self, trace): self.trace = trace def sample(self): return self.trace def", "np.unravel_index( self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress) return ingresses, egresses def sample(self): # sample", "def __init__(self, arrival: float, duration: float, datarate: float, max_latency: float, endpoints: tuple, service:", "/ scale datarates = stats.truncnorm.rvs(a, b, mean, scale, size=size, random_state=self.rng) return datarates def", "stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size, random_state=self.rng) # scale maximum end-to-end latencies (given by", "cmp_to_key import numpy as np import scipy.stats as stats from numpy.random import default_rng,", "service: int): self.arrival = arrival self.duration = duration self.datarate = datarate self.max_latency =", "ingresses.append(ingress) egresses.append(egress) return ingresses, egresses def sample(self): # sample parameters for each service", "__iter__(self): trace = self.sample() return iter(trace) class TrafficStub: def __init__(self, trace): self.trace =", "from tick.hawkes import SimuInhomogeneousPoisson class Request: def __init__(self, arrival: float, duration: float, datarate:", "# create time function for inhomogenous poisson process T = np.linspace(0.0, horizon -", "matrix rates = self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr] for ingr, egr in zip(ingresses, egresses)])", "Request(arr, dr, rate, lat, (ingr, egr), self.service) requests.append(req) return requests class Traffic: def", "datarate: float, max_latency: float, endpoints: tuple, service: int): self.arrival = arrival self.duration =", "requests = [] for arr, dr, rate, lat, ingr, egr in zip(arrival, duration,", "sample parameters for each service from distribution functions arrival = self.sample_arrival(self.horizon) duration =", "type of service from respective processes requests = [process.sample() for process in self.processes]", "* propagation return lat def sample_endpoints(self, arrivals): ingresses, egresses = [], [] for", "each service from distribution functions arrival = self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival)) ingresses, egresses", "b = (a - mean) / scale, (b - mean) / scale datarates", "(ingr, egr), self.service) requests.append(req) return requests class Traffic: def __init__(self, processes): self.processes =", "lat = lat * propagation return lat def sample_endpoints(self, arrivals): ingresses, egresses =", "latencies = self.sample_latencies(propagation) # build request objects and append them to the traffic", "# use arrival time to index the endpoint probability matrix and traffic matrix", "= latencies self.endpoints = endpoints self.spaths = spaths # create time function for", "= arrival self.duration = duration self.datarate = datarate self.max_latency = max_latency self.ingress, self.egress", "endpoint probability matrix for respective timestep timestep = int(np.floor(arrival)) prob = self.endpoints[timestep] #", "sample_latencies(self, propagation: np.ndarray): mean = self.latencies['loc'] scale = self.latencies['scale'] a, b = self.latencies['a'],", "default_rng, BitGenerator from tick.base import TimeFunction from tick.hawkes import SimuInhomogeneousPoisson class Request: def", "in zip(ingresses, egresses)]) latencies = self.sample_latencies(propagation) # build request objects and append them", "scale lat = stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size, random_state=self.rng) # scale maximum end-to-end", "def __str__(self): attrs = [round(self.duration, 2), round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency, 2)] attrs", "iter(trace) class TrafficStub: def __init__(self, trace): self.trace = trace def sample(self): return self.trace", "function for inhomogenous poisson process T = np.linspace(0.0, horizon - 1, horizon) rates", "float, duration: float, datarate: float, max_latency: float, endpoints: tuple, service: int): self.arrival =", "= self.datarates['scale'] a, b = self.datarates['a'], self.datarates['b'] a, b = (a - mean)", "build request objects and append them to the traffic trace requests = []", "b = self.datarates['a'], self.datarates['b'] a, b = (a - mean) / scale, (b", "propagation: np.ndarray): mean = self.latencies['loc'] scale = self.latencies['scale'] a, b = self.latencies['a'], self.latencies['b']", "int): self.arrival = arrival self.duration = duration self.datarate = datarate self.max_latency = max_latency", "2), round(self.max_latency, 2)] attrs = [self.ingress, self.egress, *attrs, self.service] return 'Route: ({}-{}); Duration:", "use arrival time to index the endpoint probability matrix and traffic matrix rates", "service requests according to their arrival time requests = sorted(requests, key=cmp_to_key( lambda r1,", "= self.latencies['scale'] a, b = self.latencies['a'], self.latencies['b'] a, b = (a - mean)", "= in_poisson.timestamps[0] return arrivals def sample_duration(self, size): mduration = self.process['mduration'] duration = self.rng.exponential(scale=mduration,", "int(self.ingress) self.egress = int(self.egress) self.service: int = int(service) self.vtypes: List[int] = None self.resd_lat:", "for each service from distribution functions arrival = self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival)) ingresses,", "= duration self.datarate = datarate self.max_latency = max_latency self.ingress, self.egress = endpoints self.ingress", "horizon) rates = np.ascontiguousarray(rates) self.rate_function = TimeFunction((T, rates)) def sample_arrival(self, horizon): poi_seed =", "matrix and traffic matrix rates = self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr] for ingr, egr", "self.endpoints = endpoints self.spaths = spaths # create time function for inhomogenous poisson", "= int(service) self.vtypes: List[int] = None self.resd_lat: float = None def __str__(self): attrs", "float, endpoints: tuple, service: int): self.arrival = arrival self.duration = duration self.datarate =", "= np.asarray([self.spaths[ingr][egr] for ingr, egr in zip(ingresses, egresses)]) latencies = self.sample_latencies(propagation) # build", "2)] attrs = [self.ingress, self.egress, *attrs, self.service] return 'Route: ({}-{}); Duration: {}; Rate:", "end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals = in_poisson.timestamps[0] return arrivals def sample_duration(self, size):", "processes def sample(self): # generate requests for each type of service from respective", "# build request objects and append them to the traffic trace requests =", "self.MAX_SEED) poi_seed = int(poi_seed) in_poisson = SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate()", "egresses)]) latencies = self.sample_latencies(propagation) # build request objects and append them to the", "Service: {}'.format(*attrs) class ServiceTraffic: def __init__(self, rng: BitGenerator, service: int, horizon: float, process:", "spaths: Dict): self.rng = rng self.MAX_SEED = 2**30 - 1 self.service = service", "to their arrival time requests = sorted(requests, key=cmp_to_key( lambda r1, r2: r1.arrival -", "= SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals = in_poisson.timestamps[0] return arrivals", "float, max_latency: float, endpoints: tuple, service: int): self.arrival = arrival self.duration = duration", "size=propagation.size, random_state=self.rng) # scale maximum end-to-end latencies (given by shortest path propagation delay)", "= [self.ingress, self.egress, *attrs, self.service] return 'Route: ({}-{}); Duration: {}; Rate: {}; Resd.", "from respective processes requests = [process.sample() for process in self.processes] requests = [req", "[] for arr, dr, rate, lat, ingr, egr in zip(arrival, duration, rates, latencies,", "'Route: ({}-{}); Duration: {}; Rate: {}; Resd. Lat.: {}; Lat.: {}; Service: {}'.format(*attrs)", "processes requests = [process.sample() for process in self.processes] requests = [req for srequests", "create time function for inhomogenous poisson process T = np.linspace(0.0, horizon - 1,", "requests = sorted(requests, key=cmp_to_key( lambda r1, r2: r1.arrival - r2.arrival)) return requests def", "np.arange(flatten.size) ingress, egress = np.unravel_index( self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress) return ingresses, egresses", "self.sample_endpoints(arrival) # use arrival time to index the endpoint probability matrix and traffic", "scale, (b - mean) / scale datarates = stats.truncnorm.rvs(a, b, mean, scale, size=size,", "self.datarates['scale'] a, b = self.datarates['a'], self.datarates['b'] a, b = (a - mean) /", "time requests = sorted(requests, key=cmp_to_key( lambda r1, r2: r1.arrival - r2.arrival)) return requests", "process T = np.linspace(0.0, horizon - 1, horizon) rates = np.ascontiguousarray(rates) self.rate_function =", "service: int, horizon: float, process: Dict, datarates: Dict, latencies: Dict, endpoints: np.ndarray, rates:", "egresses): req = Request(arr, dr, rate, lat, (ingr, egr), self.service) requests.append(req) return requests", "import scipy.stats as stats from numpy.random import default_rng, BitGenerator from tick.base import TimeFunction", "duration = self.sample_duration(len(arrival)) ingresses, egresses = self.sample_endpoints(arrival) # use arrival time to index", "in_poisson.timestamps[0] return arrivals def sample_duration(self, size): mduration = self.process['mduration'] duration = self.rng.exponential(scale=mduration, size=size)", "from typing import List, Dict from functools import cmp_to_key import numpy as np", "ingresses, egresses): req = Request(arr, dr, rate, lat, (ingr, egr), self.service) requests.append(req) return", "def __init__(self, trace): self.trace = trace def sample(self): return self.trace def __iter__(self): return", "self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival)) ingresses, egresses = self.sample_endpoints(arrival) # use arrival time to", "TimeFunction from tick.hawkes import SimuInhomogeneousPoisson class Request: def __init__(self, arrival: float, duration: float,", "scale, (b - mean) / scale lat = stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size,", "(b - mean) / scale lat = stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size, random_state=self.rng)", "[], [] for arrival in arrivals: # get endpoint probability matrix for respective", "functions arrival = self.sample_arrival(self.horizon) duration = self.sample_duration(len(arrival)) ingresses, egresses = self.sample_endpoints(arrival) # use", "index the endpoint probability matrix and traffic matrix rates = self.sample_datarates(size=len(arrival)) propagation =", "latencies, ingresses, egresses): req = Request(arr, dr, rate, lat, (ingr, egr), self.service) requests.append(req)", "- mean) / scale datarates = stats.truncnorm.rvs(a, b, mean, scale, size=size, random_state=self.rng) return", "for ingr, egr in zip(ingresses, egresses)]) latencies = self.sample_latencies(propagation) # build request objects", "- mean) / scale lat = stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size, random_state=self.rng) #", "get endpoint probability matrix for respective timestep timestep = int(np.floor(arrival)) prob = self.endpoints[timestep]", "sorted(requests, key=cmp_to_key( lambda r1, r2: r1.arrival - r2.arrival)) return requests def __iter__(self): trace", "dr, rate, lat, (ingr, egr), self.service) requests.append(req) return requests class Traffic: def __init__(self,", "BitGenerator, service: int, horizon: float, process: Dict, datarates: Dict, latencies: Dict, endpoints: np.ndarray,", "rng self.MAX_SEED = 2**30 - 1 self.service = service self.horizon = horizon self.process", "rate, lat, ingr, egr in zip(arrival, duration, rates, latencies, ingresses, egresses): req =", "self.duration = duration self.datarate = datarate self.max_latency = max_latency self.ingress, self.egress = endpoints", "/ egress from probability matrix flatten = prob.ravel() index = np.arange(flatten.size) ingress, egress", "time to index the endpoint probability matrix and traffic matrix rates = self.sample_datarates(size=len(arrival))", "with sampled factor lat = lat * propagation return lat def sample_endpoints(self, arrivals):", "scale, size=size, random_state=self.rng) return datarates def sample_latencies(self, propagation: np.ndarray): mean = self.latencies['loc'] scale", "ingresses, egresses = [], [] for arrival in arrivals: # get endpoint probability", "egr), self.service) requests.append(req) return requests class Traffic: def __init__(self, processes): self.processes = processes", "probability matrix and traffic matrix rates = self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr] for ingr,", "respective processes requests = [process.sample() for process in self.processes] requests = [req for", "= endpoints self.spaths = spaths # create time function for inhomogenous poisson process", "self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress) return ingresses, egresses def sample(self): # sample parameters", "their arrival time requests = sorted(requests, key=cmp_to_key( lambda r1, r2: r1.arrival - r2.arrival))", "mduration = self.process['mduration'] duration = self.rng.exponential(scale=mduration, size=size) return duration def sample_datarates(self, size): mean", "/ scale, (b - mean) / scale datarates = stats.truncnorm.rvs(a, b, mean, scale,", "= [req for srequests in requests for req in srequests] # sort to-be-simulated", "int(service) self.vtypes: List[int] = None self.resd_lat: float = None def __str__(self): attrs =", "np.ascontiguousarray(rates) self.rate_function = TimeFunction((T, rates)) def sample_arrival(self, horizon): poi_seed = self.rng.integers(0, self.MAX_SEED) poi_seed", "mean, scale, size=propagation.size, random_state=self.rng) # scale maximum end-to-end latencies (given by shortest path", "mean) / scale, (b - mean) / scale datarates = stats.truncnorm.rvs(a, b, mean,", "from functools import cmp_to_key import numpy as np import scipy.stats as stats from", "lat, (ingr, egr), self.service) requests.append(req) return requests class Traffic: def __init__(self, processes): self.processes", "requests = [process.sample() for process in self.processes] requests = [req for srequests in", "int(np.floor(arrival)) prob = self.endpoints[timestep] # sample ingress / egress from probability matrix flatten", "self.processes] requests = [req for srequests in requests for req in srequests] #", "int, horizon: float, process: Dict, datarates: Dict, latencies: Dict, endpoints: np.ndarray, rates: np.ndarray,", "b = self.latencies['a'], self.latencies['b'] a, b = (a - mean) / scale, (b", "self.rng = rng self.MAX_SEED = 2**30 - 1 self.service = service self.horizon =", "np.linspace(0.0, horizon - 1, horizon) rates = np.ascontiguousarray(rates) self.rate_function = TimeFunction((T, rates)) def", "r2: r1.arrival - r2.arrival)) return requests def __iter__(self): trace = self.sample() return iter(trace)", "arr, dr, rate, lat, ingr, egr in zip(arrival, duration, rates, latencies, ingresses, egresses):", "self.sample_latencies(propagation) # build request objects and append them to the traffic trace requests", "process: Dict, datarates: Dict, latencies: Dict, endpoints: np.ndarray, rates: np.ndarray, spaths: Dict): self.rng", "int = int(service) self.vtypes: List[int] = None self.resd_lat: float = None def __str__(self):", "# sort to-be-simulated service requests according to their arrival time requests = sorted(requests,", "scale maximum end-to-end latencies (given by shortest path propagation delay) with sampled factor", "self.egress = int(self.egress) self.service: int = int(service) self.vtypes: List[int] = None self.resd_lat: float", "self.sample() return iter(trace) class TrafficStub: def __init__(self, trace): self.trace = trace def sample(self):", "arrival time to index the endpoint probability matrix and traffic matrix rates =", "= spaths # create time function for inhomogenous poisson process T = np.linspace(0.0,", "self.rng.integers(0, self.MAX_SEED) poi_seed = int(poi_seed) in_poisson = SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity()", "to index the endpoint probability matrix and traffic matrix rates = self.sample_datarates(size=len(arrival)) propagation", "rates = self.sample_datarates(size=len(arrival)) propagation = np.asarray([self.spaths[ingr][egr] for ingr, egr in zip(ingresses, egresses)]) latencies", "req = Request(arr, dr, rate, lat, (ingr, egr), self.service) requests.append(req) return requests class", "self.process = process self.datarates = datarates self.latencies = latencies self.endpoints = endpoints self.spaths", "ingresses, egresses = self.sample_endpoints(arrival) # use arrival time to index the endpoint probability", "endpoints: np.ndarray, rates: np.ndarray, spaths: Dict): self.rng = rng self.MAX_SEED = 2**30 -", "arrivals def sample_duration(self, size): mduration = self.process['mduration'] duration = self.rng.exponential(scale=mduration, size=size) return duration", "requests class Traffic: def __init__(self, processes): self.processes = processes def sample(self): # generate", "def __init__(self, rng: BitGenerator, service: int, horizon: float, process: Dict, datarates: Dict, latencies:", "functools import cmp_to_key import numpy as np import scipy.stats as stats from numpy.random", "tick.base import TimeFunction from tick.hawkes import SimuInhomogeneousPoisson class Request: def __init__(self, arrival: float,", "= Request(arr, dr, rate, lat, (ingr, egr), self.service) requests.append(req) return requests class Traffic:", "SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals = in_poisson.timestamps[0] return arrivals def", "from probability matrix flatten = prob.ravel() index = np.arange(flatten.size) ingress, egress = np.unravel_index(", "1, horizon) rates = np.ascontiguousarray(rates) self.rate_function = TimeFunction((T, rates)) def sample_arrival(self, horizon): poi_seed", "float, datarate: float, max_latency: float, endpoints: tuple, service: int): self.arrival = arrival self.duration", "duration def sample_datarates(self, size): mean = self.datarates['loc'] scale = self.datarates['scale'] a, b =", "/ scale, (b - mean) / scale lat = stats.truncnorm.rvs(a, b, mean, scale,", "egresses = [], [] for arrival in arrivals: # get endpoint probability matrix", "Dict, endpoints: np.ndarray, rates: np.ndarray, spaths: Dict): self.rng = rng self.MAX_SEED = 2**30", "Resd. Lat.: {}; Lat.: {}; Service: {}'.format(*attrs) class ServiceTraffic: def __init__(self, rng: BitGenerator,", "self.service: int = int(service) self.vtypes: List[int] = None self.resd_lat: float = None def", "def __iter__(self): trace = self.sample() return iter(trace) class TrafficStub: def __init__(self, trace): self.trace", "arrival: float, duration: float, datarate: float, max_latency: float, endpoints: tuple, service: int): self.arrival", "req in srequests] # sort to-be-simulated service requests according to their arrival time", "stats from numpy.random import default_rng, BitGenerator from tick.base import TimeFunction from tick.hawkes import", "horizon - 1, horizon) rates = np.ascontiguousarray(rates) self.rate_function = TimeFunction((T, rates)) def sample_arrival(self,", "trace = self.sample() return iter(trace) class TrafficStub: def __init__(self, trace): self.trace = trace", "return arrivals def sample_duration(self, size): mduration = self.process['mduration'] duration = self.rng.exponential(scale=mduration, size=size) return", "r1, r2: r1.arrival - r2.arrival)) return requests def __iter__(self): trace = self.sample() return", "import default_rng, BitGenerator from tick.base import TimeFunction from tick.hawkes import SimuInhomogeneousPoisson class Request:", "= horizon self.process = process self.datarates = datarates self.latencies = latencies self.endpoints =", "max_latency: float, endpoints: tuple, service: int): self.arrival = arrival self.duration = duration self.datarate", "duration = self.rng.exponential(scale=mduration, size=size) return duration def sample_datarates(self, size): mean = self.datarates['loc'] scale", "arrivals = in_poisson.timestamps[0] return arrivals def sample_duration(self, size): mduration = self.process['mduration'] duration =", "= (a - mean) / scale, (b - mean) / scale datarates =", "np import scipy.stats as stats from numpy.random import default_rng, BitGenerator from tick.base import", "= stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size, random_state=self.rng) # scale maximum end-to-end latencies (given", "self.egress, *attrs, self.service] return 'Route: ({}-{}); Duration: {}; Rate: {}; Resd. Lat.: {};", "int(poi_seed) in_poisson = SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon, verbose=False, seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals = in_poisson.timestamps[0]", "# sample ingress / egress from probability matrix flatten = prob.ravel() index =", "*attrs, self.service] return 'Route: ({}-{}); Duration: {}; Rate: {}; Resd. Lat.: {}; Lat.:", "service self.horizon = horizon self.process = process self.datarates = datarates self.latencies = latencies", "lat def sample_endpoints(self, arrivals): ingresses, egresses = [], [] for arrival in arrivals:", "egress = np.unravel_index( self.rng.choice(index, p=flatten), prob.shape) ingresses.append(ingress) egresses.append(egress) return ingresses, egresses def sample(self):", "scale, size=propagation.size, random_state=self.rng) # scale maximum end-to-end latencies (given by shortest path propagation", "return duration def sample_datarates(self, size): mean = self.datarates['loc'] scale = self.datarates['scale'] a, b", "[process.sample() for process in self.processes] requests = [req for srequests in requests for", "SimuInhomogeneousPoisson class Request: def __init__(self, arrival: float, duration: float, datarate: float, max_latency: float,", "= stats.truncnorm.rvs(a, b, mean, scale, size=size, random_state=self.rng) return datarates def sample_latencies(self, propagation: np.ndarray):", "Dict): self.rng = rng self.MAX_SEED = 2**30 - 1 self.service = service self.horizon", "/ scale lat = stats.truncnorm.rvs(a, b, mean, scale, size=propagation.size, random_state=self.rng) # scale maximum", "self.datarates['loc'] scale = self.datarates['scale'] a, b = self.datarates['a'], self.datarates['b'] a, b = (a", "ingress / egress from probability matrix flatten = prob.ravel() index = np.arange(flatten.size) ingress,", "__init__(self, rng: BitGenerator, service: int, horizon: float, process: Dict, datarates: Dict, latencies: Dict,", "seed=poi_seed) in_poisson.track_intensity() in_poisson.simulate() arrivals = in_poisson.timestamps[0] return arrivals def sample_duration(self, size): mduration =", "2), round(self.datarate, 2), round(self.resd_lat, 2), round(self.max_latency, 2)] attrs = [self.ingress, self.egress, *attrs, self.service]", "horizon): poi_seed = self.rng.integers(0, self.MAX_SEED) poi_seed = int(poi_seed) in_poisson = SimuInhomogeneousPoisson( [self.rate_function], end_time=horizon,", "size): mean = self.datarates['loc'] scale = self.datarates['scale'] a, b = self.datarates['a'], self.datarates['b'] a," ]
[ "= alsaaudio_capture self.nonblock = alsaaudio_nonblock self.inp = None def __enter__(self): \"\"\"Set the acquisition", "of the loop \"\"\" self.capture = alsaaudio_capture self.nonblock = alsaaudio_nonblock self.inp = None", "for blocking mode. Then we could have left out the sleep call in", "\"\"\"Class to use micro in a `with` bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open", "self.inp = alsaaudio.PCM(self.capture, self.nonblock) return self.inp def __exit__(self, capture, nonblock, inpt): \"\"\"Close the", "we could have left out the sleep call in the bottom of the", "in a `with` bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device in nonblocking", "last argument could just as well have been zero for blocking mode. Then", "alsaaudio # pylint: disable=R0903, E1101 class Micro(): \"\"\"Class to use micro in a", "\"\"\"Set the acquisition and return it.\"\"\" self.inp = alsaaudio.PCM(self.capture, self.nonblock) return self.inp def", "alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device in nonblocking capture mode. The last argument could", "= alsaaudio_nonblock self.inp = None def __enter__(self): \"\"\"Set the acquisition and return it.\"\"\"", "in nonblocking capture mode. The last argument could just as well have been", "sleep call in the bottom of the loop \"\"\" self.capture = alsaaudio_capture self.nonblock", "blocking mode. Then we could have left out the sleep call in the", "have left out the sleep call in the bottom of the loop \"\"\"", "in the bottom of the loop \"\"\" self.capture = alsaaudio_capture self.nonblock = alsaaudio_nonblock", "capture mode. The last argument could just as well have been zero for", "alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device in nonblocking capture mode. The last argument could just", "# pylint: disable=R0903, E1101 class Micro(): \"\"\"Class to use micro in a `with`", "bottom of the loop \"\"\" self.capture = alsaaudio_capture self.nonblock = alsaaudio_nonblock self.inp =", "well have been zero for blocking mode. Then we could have left out", "a `with` bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device in nonblocking capture", "device in nonblocking capture mode. The last argument could just as well have", "E1101 class Micro(): \"\"\"Class to use micro in a `with` bloc.\"\"\" def __init__(self,", "use micro in a `with` bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device", "alsaaudio_capture self.nonblock = alsaaudio_nonblock self.inp = None def __enter__(self): \"\"\"Set the acquisition and", "been zero for blocking mode. Then we could have left out the sleep", "pylint: disable=R0903, E1101 class Micro(): \"\"\"Class to use micro in a `with` bloc.\"\"\"", "the loop \"\"\" self.capture = alsaaudio_capture self.nonblock = alsaaudio_nonblock self.inp = None def", "mode. The last argument could just as well have been zero for blocking", "__init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device in nonblocking capture mode. The last argument", "nonblocking capture mode. The last argument could just as well have been zero", "the bottom of the loop \"\"\" self.capture = alsaaudio_capture self.nonblock = alsaaudio_nonblock self.inp", "def __enter__(self): \"\"\"Set the acquisition and return it.\"\"\" self.inp = alsaaudio.PCM(self.capture, self.nonblock) return", "None def __enter__(self): \"\"\"Set the acquisition and return it.\"\"\" self.inp = alsaaudio.PCM(self.capture, self.nonblock)", "could just as well have been zero for blocking mode. Then we could", "self.nonblock = alsaaudio_nonblock self.inp = None def __enter__(self): \"\"\"Set the acquisition and return", "\"\"\"Microphone module.\"\"\" import alsaaudio # pylint: disable=R0903, E1101 class Micro(): \"\"\"Class to use", "def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device in nonblocking capture mode. The last", "= None def __enter__(self): \"\"\"Set the acquisition and return it.\"\"\" self.inp = alsaaudio.PCM(self.capture,", "have been zero for blocking mode. Then we could have left out the", "out the sleep call in the bottom of the loop \"\"\" self.capture =", "self.capture = alsaaudio_capture self.nonblock = alsaaudio_nonblock self.inp = None def __enter__(self): \"\"\"Set the", "= alsaaudio.PCM(self.capture, self.nonblock) return self.inp def __exit__(self, capture, nonblock, inpt): \"\"\"Close the acquisition.\"\"\"", "import alsaaudio # pylint: disable=R0903, E1101 class Micro(): \"\"\"Class to use micro in", "it.\"\"\" self.inp = alsaaudio.PCM(self.capture, self.nonblock) return self.inp def __exit__(self, capture, nonblock, inpt): \"\"\"Close", "disable=R0903, E1101 class Micro(): \"\"\"Class to use micro in a `with` bloc.\"\"\" def", "self.inp = None def __enter__(self): \"\"\"Set the acquisition and return it.\"\"\" self.inp =", "loop \"\"\" self.capture = alsaaudio_capture self.nonblock = alsaaudio_nonblock self.inp = None def __enter__(self):", "module.\"\"\" import alsaaudio # pylint: disable=R0903, E1101 class Micro(): \"\"\"Class to use micro", "could have left out the sleep call in the bottom of the loop", "the acquisition and return it.\"\"\" self.inp = alsaaudio.PCM(self.capture, self.nonblock) return self.inp def __exit__(self,", "and return it.\"\"\" self.inp = alsaaudio.PCM(self.capture, self.nonblock) return self.inp def __exit__(self, capture, nonblock,", "The last argument could just as well have been zero for blocking mode.", "Then we could have left out the sleep call in the bottom of", "Micro(): \"\"\"Class to use micro in a `with` bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK):", "the sleep call in the bottom of the loop \"\"\" self.capture = alsaaudio_capture", "bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device in nonblocking capture mode. The", "just as well have been zero for blocking mode. Then we could have", "`with` bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device in nonblocking capture mode.", "class Micro(): \"\"\"Class to use micro in a `with` bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE,", "left out the sleep call in the bottom of the loop \"\"\" self.capture", "acquisition and return it.\"\"\" self.inp = alsaaudio.PCM(self.capture, self.nonblock) return self.inp def __exit__(self, capture,", "argument could just as well have been zero for blocking mode. Then we", "mode. Then we could have left out the sleep call in the bottom", "alsaaudio_nonblock self.inp = None def __enter__(self): \"\"\"Set the acquisition and return it.\"\"\" self.inp", "\"\"\" self.capture = alsaaudio_capture self.nonblock = alsaaudio_nonblock self.inp = None def __enter__(self): \"\"\"Set", "to use micro in a `with` bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the", "micro in a `with` bloc.\"\"\" def __init__(self, alsaaudio_capture=alsaaudio.PCM_CAPTURE, alsaaudio_nonblock=alsaaudio.PCM_NONBLOCK): \"\"\"Open the device in", "zero for blocking mode. Then we could have left out the sleep call", "as well have been zero for blocking mode. Then we could have left", "__enter__(self): \"\"\"Set the acquisition and return it.\"\"\" self.inp = alsaaudio.PCM(self.capture, self.nonblock) return self.inp", "alsaaudio.PCM(self.capture, self.nonblock) return self.inp def __exit__(self, capture, nonblock, inpt): \"\"\"Close the acquisition.\"\"\" self.inp.close()", "\"\"\"Open the device in nonblocking capture mode. The last argument could just as", "the device in nonblocking capture mode. The last argument could just as well", "return it.\"\"\" self.inp = alsaaudio.PCM(self.capture, self.nonblock) return self.inp def __exit__(self, capture, nonblock, inpt):", "call in the bottom of the loop \"\"\" self.capture = alsaaudio_capture self.nonblock =" ]
[ "√(s(s-a)*(s-b)*(s-c)) \"\"\" a = float(input('Enter first side: ')) b = float(input('Enter second side:", "side: ')) c = float(input('Enter third side: ')) s = (a+b+c)/2 area =", "= (a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c)) \"\"\" a = float(input('Enter first side: ')) b", "\"\"\" To find the area of a triangle, you must use this method:", "(a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c)) \"\"\" a = float(input('Enter first side: ')) b =", "= (a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 print(\"The area of the triangle is", "b = float(input('Enter second side: ')) c = float(input('Enter third side: ')) s", "To find the area of a triangle, you must use this method: s", "this method: s = (a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c)) \"\"\" a = float(input('Enter first", "the area of a triangle, you must use this method: s = (a+b+c)/2", "float(input('Enter first side: ')) b = float(input('Enter second side: ')) c = float(input('Enter", "c = float(input('Enter third side: ')) s = (a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c)) **", "area of a triangle, you must use this method: s = (a+b+c)/2 area", "triangle, you must use this method: s = (a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c)) \"\"\"", "third side: ')) s = (a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 print(\"The area", "must use this method: s = (a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c)) \"\"\" a =", "= √(s(s-a)*(s-b)*(s-c)) \"\"\" a = float(input('Enter first side: ')) b = float(input('Enter second", "= float(input('Enter second side: ')) c = float(input('Enter third side: ')) s =", "second side: ')) c = float(input('Enter third side: ')) s = (a+b+c)/2 area", "')) c = float(input('Enter third side: ')) s = (a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c))", "a triangle, you must use this method: s = (a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c))", "\"\"\" a = float(input('Enter first side: ')) b = float(input('Enter second side: '))", "')) b = float(input('Enter second side: ')) c = float(input('Enter third side: '))", "<reponame>PythonCodes1/Python-Progression \"\"\" To find the area of a triangle, you must use this", "= float(input('Enter first side: ')) b = float(input('Enter second side: ')) c =", "of a triangle, you must use this method: s = (a+b+c)/2 area =", "(a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 print(\"The area of the triangle is %0.2f\"", "side: ')) s = (a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 print(\"The area of", "s = (a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 print(\"The area of the triangle", "s = (a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c)) \"\"\" a = float(input('Enter first side: '))", "side: ')) b = float(input('Enter second side: ')) c = float(input('Enter third side:", "float(input('Enter second side: ')) c = float(input('Enter third side: ')) s = (a+b+c)/2", "method: s = (a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c)) \"\"\" a = float(input('Enter first side:", "area = √(s(s-a)*(s-b)*(s-c)) \"\"\" a = float(input('Enter first side: ')) b = float(input('Enter", "a = float(input('Enter first side: ')) b = float(input('Enter second side: ')) c", "first side: ')) b = float(input('Enter second side: ')) c = float(input('Enter third", "')) s = (a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 print(\"The area of the", "float(input('Enter third side: ')) s = (a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 print(\"The", "find the area of a triangle, you must use this method: s =", "use this method: s = (a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c)) \"\"\" a = float(input('Enter", "area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 print(\"The area of the triangle is %0.2f\" %area)", "= float(input('Enter third side: ')) s = (a+b+c)/2 area = (s*(s-a)*(s-b)*(s-c)) ** 0.5", "you must use this method: s = (a+b+c)/2 area = √(s(s-a)*(s-b)*(s-c)) \"\"\" a" ]
[ "# Generated by Django 2.1.2 on 2018-12-19 23:32 import datetime from django.db import", "23:32 import datetime from django.db import migrations, models from django.utils.timezone import utc class", "Migration(migrations.Migration): dependencies = [ ('payments', '0002_auto_20181219_2206'), ] operations = [ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time',", "import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('payments',", "('payments', '0002_auto_20181219_2206'), ] operations = [ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018, 12, 19, 23,", "= [ ('payments', '0002_auto_20181219_2206'), ] operations = [ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018, 12,", "'0002_auto_20181219_2206'), ] operations = [ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018, 12, 19, 23, 32,", "import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration):", "Generated by Django 2.1.2 on 2018-12-19 23:32 import datetime from django.db import migrations,", "operations = [ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018, 12, 19, 23, 32, 52, 579324,", "django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('payments', '0002_auto_20181219_2206'), ] operations =", "from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('payments', '0002_auto_20181219_2206'), ] operations", "utc class Migration(migrations.Migration): dependencies = [ ('payments', '0002_auto_20181219_2206'), ] operations = [ migrations.AlterField(", "] operations = [ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018, 12, 19, 23, 32, 52,", "datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies", "migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018, 12, 19, 23, 32, 52, 579324, tzinfo=utc)), ), ]", "django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [", "models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('payments', '0002_auto_20181219_2206'), ]", "= [ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018, 12, 19, 23, 32, 52, 579324, tzinfo=utc)),", "dependencies = [ ('payments', '0002_auto_20181219_2206'), ] operations = [ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018,", "<reponame>zanielyene/krabacus3<filename>app/payments/migrations/0003_auto_20181219_2332.py # Generated by Django 2.1.2 on 2018-12-19 23:32 import datetime from django.db", "class Migration(migrations.Migration): dependencies = [ ('payments', '0002_auto_20181219_2206'), ] operations = [ migrations.AlterField( model_name='subscriptionpayment',", "from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies =", "[ ('payments', '0002_auto_20181219_2206'), ] operations = [ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018, 12, 19,", "on 2018-12-19 23:32 import datetime from django.db import migrations, models from django.utils.timezone import", "migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('payments', '0002_auto_20181219_2206'),", "2018-12-19 23:32 import datetime from django.db import migrations, models from django.utils.timezone import utc", "2.1.2 on 2018-12-19 23:32 import datetime from django.db import migrations, models from django.utils.timezone", "by Django 2.1.2 on 2018-12-19 23:32 import datetime from django.db import migrations, models", "import utc class Migration(migrations.Migration): dependencies = [ ('payments', '0002_auto_20181219_2206'), ] operations = [", "[ migrations.AlterField( model_name='subscriptionpayment', name='payment_read_time', field=models.DateTimeField(default=datetime.datetime(2018, 12, 19, 23, 32, 52, 579324, tzinfo=utc)), ),", "Django 2.1.2 on 2018-12-19 23:32 import datetime from django.db import migrations, models from" ]
[ "return PReLULayer() elif type == 'elu': logger.info(type) return ELULayer() elif type == 'leakyrelu':", "'prelu': logger.info(type) return PReLULayer() elif type == 'elu': logger.info(type) return ELULayer() elif type", "import ELULayer from lab1.part1.util.logger_util import logger class LayerFactory(object): @staticmethod def produce_layer(type): if type", "type == 'prelu': logger.info(type) return PReLULayer() elif type == 'elu': logger.info(type) return ELULayer()", "from lab1.part1.core.layers.elu import ELULayer from lab1.part1.util.logger_util import logger class LayerFactory(object): @staticmethod def produce_layer(type):", "LayerFactory(object): @staticmethod def produce_layer(type): if type == 'dense': logger.info(type) return DenseLayer() elif type", "from lab1.part1.core.layers.softmax import SoftmaxLayer from lab1.part1.core.layers.preLU import PReLULayer from lab1.part1.core.layers.leakyRelu import LeakyReLULayer from", "return SigmoidLayer() elif type == 'softmax': logger.info(type) return SoftmaxLayer() elif type == 'prelu':", "import LeakyReLULayer from lab1.part1.core.layers.elu import ELULayer from lab1.part1.util.logger_util import logger class LayerFactory(object): @staticmethod", "type == 'elu': logger.info(type) return ELULayer() elif type == 'leakyrelu': logger.info(type) return LeakyReLULayer()", "ELULayer from lab1.part1.util.logger_util import logger class LayerFactory(object): @staticmethod def produce_layer(type): if type ==", "import logger class LayerFactory(object): @staticmethod def produce_layer(type): if type == 'dense': logger.info(type) return", "'elu': logger.info(type) return ELULayer() elif type == 'leakyrelu': logger.info(type) return LeakyReLULayer() if __name__", "lab1.part1.core.layers.dense import DenseLayer from lab1.part1.core.layers.relu import ReluLayer from lab1.part1.core.layers.sigmoid import SigmoidLayer from lab1.part1.core.layers.softmax", "lab1.part1.core.layers.preLU import PReLULayer from lab1.part1.core.layers.leakyRelu import LeakyReLULayer from lab1.part1.core.layers.elu import ELULayer from lab1.part1.util.logger_util", "logger.info(type) return PReLULayer() elif type == 'elu': logger.info(type) return ELULayer() elif type ==", "'sigmoid': logger.info(type) return SigmoidLayer() elif type == 'softmax': logger.info(type) return SoftmaxLayer() elif type", "type == 'softmax': logger.info(type) return SoftmaxLayer() elif type == 'prelu': logger.info(type) return PReLULayer()", "logger class LayerFactory(object): @staticmethod def produce_layer(type): if type == 'dense': logger.info(type) return DenseLayer()", "logger.info(type) return SoftmaxLayer() elif type == 'prelu': logger.info(type) return PReLULayer() elif type ==", "lab1.part1.core.layers.elu import ELULayer from lab1.part1.util.logger_util import logger class LayerFactory(object): @staticmethod def produce_layer(type): if", "lab1.part1.util.logger_util import logger class LayerFactory(object): @staticmethod def produce_layer(type): if type == 'dense': logger.info(type)", "elif type == 'relu': logger.info(type) return ReluLayer() elif type == 'sigmoid': logger.info(type) return", "logger.info(type) return DenseLayer() elif type == 'relu': logger.info(type) return ReluLayer() elif type ==", "<reponame>Currycurrycurry/FDSS_PRML<filename>lab1/part1/core/layerFactory.py from lab1.part1.core.layers.dense import DenseLayer from lab1.part1.core.layers.relu import ReluLayer from lab1.part1.core.layers.sigmoid import SigmoidLayer", "lab1.part1.core.layers.sigmoid import SigmoidLayer from lab1.part1.core.layers.softmax import SoftmaxLayer from lab1.part1.core.layers.preLU import PReLULayer from lab1.part1.core.layers.leakyRelu", "DenseLayer() elif type == 'relu': logger.info(type) return ReluLayer() elif type == 'sigmoid': logger.info(type)", "class LayerFactory(object): @staticmethod def produce_layer(type): if type == 'dense': logger.info(type) return DenseLayer() elif", "logger.info(type) return ReluLayer() elif type == 'sigmoid': logger.info(type) return SigmoidLayer() elif type ==", "return ELULayer() elif type == 'leakyrelu': logger.info(type) return LeakyReLULayer() if __name__ == '__main__':", "elif type == 'softmax': logger.info(type) return SoftmaxLayer() elif type == 'prelu': logger.info(type) return", "ReluLayer() elif type == 'sigmoid': logger.info(type) return SigmoidLayer() elif type == 'softmax': logger.info(type)", "if type == 'dense': logger.info(type) return DenseLayer() elif type == 'relu': logger.info(type) return", "import SigmoidLayer from lab1.part1.core.layers.softmax import SoftmaxLayer from lab1.part1.core.layers.preLU import PReLULayer from lab1.part1.core.layers.leakyRelu import", "elif type == 'elu': logger.info(type) return ELULayer() elif type == 'leakyrelu': logger.info(type) return", "from lab1.part1.core.layers.leakyRelu import LeakyReLULayer from lab1.part1.core.layers.elu import ELULayer from lab1.part1.util.logger_util import logger class", "== 'sigmoid': logger.info(type) return SigmoidLayer() elif type == 'softmax': logger.info(type) return SoftmaxLayer() elif", "import DenseLayer from lab1.part1.core.layers.relu import ReluLayer from lab1.part1.core.layers.sigmoid import SigmoidLayer from lab1.part1.core.layers.softmax import", "DenseLayer from lab1.part1.core.layers.relu import ReluLayer from lab1.part1.core.layers.sigmoid import SigmoidLayer from lab1.part1.core.layers.softmax import SoftmaxLayer", "lab1.part1.core.layers.softmax import SoftmaxLayer from lab1.part1.core.layers.preLU import PReLULayer from lab1.part1.core.layers.leakyRelu import LeakyReLULayer from lab1.part1.core.layers.elu", "elif type == 'prelu': logger.info(type) return PReLULayer() elif type == 'elu': logger.info(type) return", "type == 'dense': logger.info(type) return DenseLayer() elif type == 'relu': logger.info(type) return ReluLayer()", "return ReluLayer() elif type == 'sigmoid': logger.info(type) return SigmoidLayer() elif type == 'softmax':", "SoftmaxLayer from lab1.part1.core.layers.preLU import PReLULayer from lab1.part1.core.layers.leakyRelu import LeakyReLULayer from lab1.part1.core.layers.elu import ELULayer", "PReLULayer from lab1.part1.core.layers.leakyRelu import LeakyReLULayer from lab1.part1.core.layers.elu import ELULayer from lab1.part1.util.logger_util import logger", "from lab1.part1.core.layers.sigmoid import SigmoidLayer from lab1.part1.core.layers.softmax import SoftmaxLayer from lab1.part1.core.layers.preLU import PReLULayer from", "from lab1.part1.util.logger_util import logger class LayerFactory(object): @staticmethod def produce_layer(type): if type == 'dense':", "SigmoidLayer from lab1.part1.core.layers.softmax import SoftmaxLayer from lab1.part1.core.layers.preLU import PReLULayer from lab1.part1.core.layers.leakyRelu import LeakyReLULayer", "produce_layer(type): if type == 'dense': logger.info(type) return DenseLayer() elif type == 'relu': logger.info(type)", "def produce_layer(type): if type == 'dense': logger.info(type) return DenseLayer() elif type == 'relu':", "from lab1.part1.core.layers.dense import DenseLayer from lab1.part1.core.layers.relu import ReluLayer from lab1.part1.core.layers.sigmoid import SigmoidLayer from", "ELULayer() elif type == 'leakyrelu': logger.info(type) return LeakyReLULayer() if __name__ == '__main__': print(LayerFactory.produce_layer('dense'))", "from lab1.part1.core.layers.preLU import PReLULayer from lab1.part1.core.layers.leakyRelu import LeakyReLULayer from lab1.part1.core.layers.elu import ELULayer from", "type == 'sigmoid': logger.info(type) return SigmoidLayer() elif type == 'softmax': logger.info(type) return SoftmaxLayer()", "type == 'relu': logger.info(type) return ReluLayer() elif type == 'sigmoid': logger.info(type) return SigmoidLayer()", "== 'relu': logger.info(type) return ReluLayer() elif type == 'sigmoid': logger.info(type) return SigmoidLayer() elif", "== 'prelu': logger.info(type) return PReLULayer() elif type == 'elu': logger.info(type) return ELULayer() elif", "'relu': logger.info(type) return ReluLayer() elif type == 'sigmoid': logger.info(type) return SigmoidLayer() elif type", "LeakyReLULayer from lab1.part1.core.layers.elu import ELULayer from lab1.part1.util.logger_util import logger class LayerFactory(object): @staticmethod def", "logger.info(type) return ELULayer() elif type == 'leakyrelu': logger.info(type) return LeakyReLULayer() if __name__ ==", "== 'softmax': logger.info(type) return SoftmaxLayer() elif type == 'prelu': logger.info(type) return PReLULayer() elif", "SigmoidLayer() elif type == 'softmax': logger.info(type) return SoftmaxLayer() elif type == 'prelu': logger.info(type)", "== 'dense': logger.info(type) return DenseLayer() elif type == 'relu': logger.info(type) return ReluLayer() elif", "import PReLULayer from lab1.part1.core.layers.leakyRelu import LeakyReLULayer from lab1.part1.core.layers.elu import ELULayer from lab1.part1.util.logger_util import", "lab1.part1.core.layers.relu import ReluLayer from lab1.part1.core.layers.sigmoid import SigmoidLayer from lab1.part1.core.layers.softmax import SoftmaxLayer from lab1.part1.core.layers.preLU", "ReluLayer from lab1.part1.core.layers.sigmoid import SigmoidLayer from lab1.part1.core.layers.softmax import SoftmaxLayer from lab1.part1.core.layers.preLU import PReLULayer", "from lab1.part1.core.layers.relu import ReluLayer from lab1.part1.core.layers.sigmoid import SigmoidLayer from lab1.part1.core.layers.softmax import SoftmaxLayer from", "return DenseLayer() elif type == 'relu': logger.info(type) return ReluLayer() elif type == 'sigmoid':", "== 'elu': logger.info(type) return ELULayer() elif type == 'leakyrelu': logger.info(type) return LeakyReLULayer() if", "'softmax': logger.info(type) return SoftmaxLayer() elif type == 'prelu': logger.info(type) return PReLULayer() elif type", "'dense': logger.info(type) return DenseLayer() elif type == 'relu': logger.info(type) return ReluLayer() elif type", "return SoftmaxLayer() elif type == 'prelu': logger.info(type) return PReLULayer() elif type == 'elu':", "lab1.part1.core.layers.leakyRelu import LeakyReLULayer from lab1.part1.core.layers.elu import ELULayer from lab1.part1.util.logger_util import logger class LayerFactory(object):", "@staticmethod def produce_layer(type): if type == 'dense': logger.info(type) return DenseLayer() elif type ==", "PReLULayer() elif type == 'elu': logger.info(type) return ELULayer() elif type == 'leakyrelu': logger.info(type)", "SoftmaxLayer() elif type == 'prelu': logger.info(type) return PReLULayer() elif type == 'elu': logger.info(type)", "import ReluLayer from lab1.part1.core.layers.sigmoid import SigmoidLayer from lab1.part1.core.layers.softmax import SoftmaxLayer from lab1.part1.core.layers.preLU import", "logger.info(type) return SigmoidLayer() elif type == 'softmax': logger.info(type) return SoftmaxLayer() elif type ==", "import SoftmaxLayer from lab1.part1.core.layers.preLU import PReLULayer from lab1.part1.core.layers.leakyRelu import LeakyReLULayer from lab1.part1.core.layers.elu import", "elif type == 'sigmoid': logger.info(type) return SigmoidLayer() elif type == 'softmax': logger.info(type) return" ]
[ "else: self.xlim = xlim if ylim is None: self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else:", "img, ext = cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1], save_path, ll=True, # source=cx.providers.CartoDB.Positron, #", "bg_img=None): # Init attributes self.full_df = full_df self.color_col = color_col self.cmap = cmap", "self.ax.set_axis_off() self.ax.set_position([0., 0., 1., 1.]) def download_bg(self, save_path): print(f\"Downloading map's background image to", "= color_col self.cmap = cmap self.bg_img = bg_img if xlim is None: self.xlim", "self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(),", "full_df self.color_col = color_col self.cmap = cmap self.bg_img = bg_img if xlim is", "#df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img,", "DrawerMap(DrawerElem): def __init__(self, full_df, color_col, cmap='YlGnBu', xlim=None, ylim=None, bg_img=None): # Init attributes self.full_df", "def draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col,", "30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0., 1., 1.]) def download_bg(self, save_path): print(f\"Downloading map's", "attributes self.full_df = full_df self.color_col = color_col self.cmap = cmap self.bg_img = bg_img", "self.full_df = full_df self.color_col = color_col self.cmap = cmap self.bg_img = bg_img if", "print(f\"Downloading map's background image to {save_path}\") img, ext = cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1],", "Init attributes self.full_df = full_df self.color_col = color_col self.cmap = cmap self.bg_img =", "cx import matplotlib.pyplot as plt from EixampleEnergy.drawers.drawer_elem import DrawerElem class DrawerMap(DrawerElem): def __init__(self,", "EixampleEnergy.drawers.drawer_elem import DrawerElem class DrawerMap(DrawerElem): def __init__(self, full_df, color_col, cmap='YlGnBu', xlim=None, ylim=None, bg_img=None):", "self.vmax = self.full_df[self.color_col].max() def draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin,", "xlim=None, ylim=None, bg_img=None): # Init attributes self.full_df = full_df self.color_col = color_col self.cmap", "plt from EixampleEnergy.drawers.drawer_elem import DrawerElem class DrawerMap(DrawerElem): def __init__(self, full_df, color_col, cmap='YlGnBu', xlim=None,", "# Init attributes self.full_df = full_df self.color_col = color_col self.cmap = cmap self.bg_img", "self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim = ylim # Min / max self.vmin", "import DrawerElem class DrawerMap(DrawerElem): def __init__(self, full_df, color_col, cmap='YlGnBu', xlim=None, ylim=None, bg_img=None): #", "background image to {save_path}\") img, ext = cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1], save_path,", "= cmap self.bg_img = bg_img if xlim is None: self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]])", "= ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim = ylim # Min / max self.vmin =", "cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0,", "to {save_path}\") img, ext = cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1], save_path, ll=True, #", "ylim # Min / max self.vmin = self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max() def draw(self,", "class DrawerMap(DrawerElem): def __init__(self, full_df, color_col, cmap='YlGnBu', xlim=None, ylim=None, bg_img=None): # Init attributes", "<gh_stars>0 import contextily as cx import matplotlib.pyplot as plt from EixampleEnergy.drawers.drawer_elem import DrawerElem", "= bg_img if xlim is None: self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim =", "cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off()", ".5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0., 1., 1.]) def download_bg(self, save_path):", "self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0., 1., 1.]) def", "def download_bg(self, save_path): print(f\"Downloading map's background image to {save_path}\") img, ext = cx.bounds2raster(", "0., 1., 1.]) def download_bg(self, save_path): print(f\"Downloading map's background image to {save_path}\") img,", "= ylim # Min / max self.vmin = self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max() def", "download_bg(self, save_path): print(f\"Downloading map's background image to {save_path}\") img, ext = cx.bounds2raster( self.xlim[0],", "xlim if ylim is None: self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim = ylim", "self.color_col = color_col self.cmap = cmap self.bg_img = bg_img if xlim is None:", "as cx import matplotlib.pyplot as plt from EixampleEnergy.drawers.drawer_elem import DrawerElem class DrawerMap(DrawerElem): def", "self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size':", "None: self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim = ylim # Min / max", "= self.full_df[self.color_col].max() def draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax)", "source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5))", "contextily as cx import matplotlib.pyplot as plt from EixampleEnergy.drawers.drawer_elem import DrawerElem class DrawerMap(DrawerElem):", "/ max self.vmin = self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max() def draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim)", "self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim = xlim if ylim is None: self.ylim", "self.bg_img = bg_img if xlim is None: self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim", "is None: self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim = ylim # Min /", "if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5),", "ylim is None: self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim = ylim # Min", "xlim is None: self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim = xlim if ylim", "crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15,", "color_col, cmap='YlGnBu', xlim=None, ylim=None, bg_img=None): # Init attributes self.full_df = full_df self.color_col =", "self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if self.bg_img:", "= self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0., 1., 1.])", "DrawerElem class DrawerMap(DrawerElem): def __init__(self, full_df, color_col, cmap='YlGnBu', xlim=None, ylim=None, bg_img=None): # Init", "color_col self.cmap = cmap self.bg_img = bg_img if xlim is None: self.xlim =", "else: self.ylim = ylim # Min / max self.vmin = self.full_df[self.color_col].min() self.vmax =", "= ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim = xlim if ylim is None: self.ylim =", "map's background image to {save_path}\") img, ext = cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1],", "#leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0., 1.,", "df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if", "= full_df self.color_col = color_col self.cmap = cmap self.bg_img = bg_img if xlim", "if xlim is None: self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim = xlim if", "([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim = ylim # Min / max self.vmin = self.full_df[self.color_col].min()", "image to {save_path}\") img, ext = cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1], save_path, ll=True,", "ext = cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1], save_path, ll=True, # source=cx.providers.CartoDB.Positron, # source='https://a.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png'", "= xlim if ylim is None: self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim =", "cmap self.bg_img = bg_img if xlim is None: self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else:", "self.ax.set_position([0., 0., 1., 1.]) def download_bg(self, save_path): print(f\"Downloading map's background image to {save_path}\")", "0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0., 1., 1.]) def download_bg(self, save_path): print(f\"Downloading map's background image", "self.full_df.total_bounds[2]]) else: self.xlim = xlim if ylim is None: self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]])", "#self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0., 1., 1.]) def download_bg(self,", "ylim=None, bg_img=None): # Init attributes self.full_df = full_df self.color_col = color_col self.cmap =", "column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'),", "#leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0., 1., 1.]) def download_bg(self, save_path): print(f\"Downloading map's background", "save_path): print(f\"Downloading map's background image to {save_path}\") img, ext = cx.bounds2raster( self.xlim[0], self.ylim[0],", "self.vmin = self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max() def draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax,", "matplotlib.pyplot as plt from EixampleEnergy.drawers.drawer_elem import DrawerElem class DrawerMap(DrawerElem): def __init__(self, full_df, color_col,", "vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255)", "prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0., 1., 1.]) def download_bg(self, save_path): print(f\"Downloading", "bg_img if xlim is None: self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim = xlim", "{save_path}\") img, ext = cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1], save_path, ll=True, # source=cx.providers.CartoDB.Positron,", "import contextily as cx import matplotlib.pyplot as plt from EixampleEnergy.drawers.drawer_elem import DrawerElem class", "as plt from EixampleEnergy.drawers.drawer_elem import DrawerElem class DrawerMap(DrawerElem): def __init__(self, full_df, color_col, cmap='YlGnBu',", "from EixampleEnergy.drawers.drawer_elem import DrawerElem class DrawerMap(DrawerElem): def __init__(self, full_df, color_col, cmap='YlGnBu', xlim=None, ylim=None,", "is None: self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim = xlim if ylim is", "self.full_df.total_bounds[3]]) else: self.ylim = ylim # Min / max self.vmin = self.full_df[self.color_col].min() self.vmax", "self.cmap = cmap self.bg_img = bg_img if xlim is None: self.xlim = ([self.full_df.total_bounds[0],", "column=self.color_col, cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg = self.ax.get_legend()", "self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max() def draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap,", "1.]) def download_bg(self, save_path): print(f\"Downloading map's background image to {save_path}\") img, ext =", "= cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1], save_path, ll=True, # source=cx.providers.CartoDB.Positron, # source='https://a.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png' source='http://www.google.cn/maps/vt?lyrs=s@189&gl=cn&x={x}&y={y}&z={z}'", "def __init__(self, full_df, color_col, cmap='YlGnBu', xlim=None, ylim=None, bg_img=None): # Init attributes self.full_df =", "self.ylim = ylim # Min / max self.vmin = self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max()", "= self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max() def draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col,", "self.xlim = xlim if ylim is None: self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim", "# Min / max self.vmin = self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max() def draw(self, df):", "1., 1.]) def download_bg(self, save_path): print(f\"Downloading map's background image to {save_path}\") img, ext", "cx.bounds2raster( self.xlim[0], self.ylim[0], self.xlim[1], self.ylim[1], save_path, ll=True, # source=cx.providers.CartoDB.Positron, # source='https://a.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png' source='http://www.google.cn/maps/vt?lyrs=s@189&gl=cn&x={x}&y={y}&z={z}' )", "df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg =", "None: self.xlim = ([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim = xlim if ylim is None:", "import matplotlib.pyplot as plt from EixampleEnergy.drawers.drawer_elem import DrawerElem class DrawerMap(DrawerElem): def __init__(self, full_df,", "cmap='YlGnBu', xlim=None, ylim=None, bg_img=None): # Init attributes self.full_df = full_df self.color_col = color_col", "vmax=255) #leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0., 0.,", "cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30})", "if ylim is None: self.ylim = ([self.full_df.total_bounds[1], self.full_df.total_bounds[3]]) else: self.ylim = ylim #", "vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg", "__init__(self, full_df, color_col, cmap='YlGnBu', xlim=None, ylim=None, bg_img=None): # Init attributes self.full_df = full_df", "full_df, color_col, cmap='YlGnBu', xlim=None, ylim=None, bg_img=None): # Init attributes self.full_df = full_df self.color_col", "Min / max self.vmin = self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max() def draw(self, df): self.ax.clear()", "max self.vmin = self.full_df[self.color_col].min() self.vmax = self.full_df[self.color_col].max() def draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim)", "draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap)", "self.full_df[self.color_col].max() def draw(self, df): self.ax.clear() self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax,", "self.ax.set_xlim(self.xlim) self.ax.set_ylim(self.ylim) #df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) df.plot(ax=self.ax, column=self.color_col, cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax,", "vmin=0, vmax=255) #leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0, .5), prop={'size': 30}) #leg.set_bbox_to_anchor((1.15, 0.5)) self.ax.set_axis_off() self.ax.set_position([0.,", "([self.full_df.total_bounds[0], self.full_df.total_bounds[2]]) else: self.xlim = xlim if ylim is None: self.ylim = ([self.full_df.total_bounds[1],", "cmap=self.cmap) if self.bg_img: cx.add_basemap(self.ax, crs=self.full_df.crs.to_string(), source=self.bg_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255) #leg = self.ax.get_legend() #self.ax.legend(bbox_to_anchor=(1.0," ]
[ "flattened_img = img.reshape((len(img), -1)) loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods =", "# --- # + [markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli Mixture Model for EMNIST", "Word from jax import vmap import jax.numpy as jnp import jax from jax.random", "28, 28)) fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace = .2, wspace=.001)", "fake_test_data(test_words, dataset, targets, n_char + 1, \"all\") # + id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words,", "= 30 n_char = 52 mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min, p_max =", "# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) # + [markdown]", "jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys) # + id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\"", "* len(vocab), -1)), n_char + 1, \"all\") get_decoded_samples(decoded_words) # + [markdown] id=\"xrRy8MG0afR8\" #", "from conditional_bernoulli_mix_lib import ClassConditionalBMM from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class from", "n_pixels = 28 * 28 probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels))) class_priors", "fig.tight_layout() plt.show() # + id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # +", "plt.show() # + id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # + [markdown]", "figsize=(20, 10)) for i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)): flattened_img =", "[markdown] id=\"37mNMNrpInfh\" # ## EM Algorithm # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\"", "28 * 28 probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels))) class_priors = jnp.array(np.full((n_char,),", "[dev_array for dev_array in split(rng_key, len(vocab))] # + id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms", "id=\"FO31plUVNDSO\" # ### EM # + id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em,", "'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band'] rng_key = PRNGKey(0) keys", "'mind', 'song', 'band'] rng_key = PRNGKey(0) keys = [dev_array for dev_array in split(rng_key,", "vmap import jax.numpy as jnp import jax from jax.random import PRNGKey, split import", "1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)", "k in range(n_mix): for cls in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1)", "keys = [dev_array for dev_array in split(rng_key, len(vocab))] # + id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"}", "# ## Sampling # + id=\"wgI6sFWKN4ax\" p1, p2, p3 = 0.4, 0.1, 2e-3", "ax in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show() # + id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\",", "Mixture Model for EMNIST # + [markdown] id=\"TU1pCzcIJHTm\" # ## Setup # #", "EMNIST # + [markdown] id=\"TU1pCzcIJHTm\" # ## Setup # # + id=\"400WanLyGA2C\" #", "# jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5'", "outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled *", "\"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # + [markdown] id=\"eNDmwV7EPyrR\" # ## Calculation of", "from jax.random import PRNGKey, split import numpy as np from matplotlib import pyplot", "# ### GD # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix)", "= 0.4, 0.1, 2e-3 n_misspelled = 1 # number of misspelled words created", "[markdown] id=\"J8KLkCWpNAeF\" # ### GD # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\"", "= cbm_em.fit_em(dataset, targets, 8) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() #", "np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28)) fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10))", "30 n_char = 52 mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min, p_max = 0.4,", "img, word) in enumerate(zip(axes.flat, test_images, test_words)): flattened_img = img.reshape((len(img), -1)) loglikelihoods = jax.tree_map(lambda", "jax import vmap import jax.numpy as jnp import jax from jax.random import PRNGKey,", "targets, batch_size, num_epochs = num_epochs) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show()", "\"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word: Word(word, p1, p2, p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs,", "# ## Plot of the Probabilities of Components Distribution # + id=\"KkyAHDW4JgyM\" def", "# + [markdown] id=\"eNDmwV7EPyrR\" # ## Calculation of Log Likelihoods for Test Data", "linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"NjCQpoH1Iuuf\" # ## Plot", "class_priors=class_priors, n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) # + [markdown] id=\"Qa95Fua5Kc3i\" #", "8) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"NjCQpoH1Iuuf\"", "get_decoded_samples(decoded_words) # + [markdown] id=\"xrRy8MG0afR8\" # ### Figure # + id=\"O0-HaN5rQAvP\" def plot_samples(samples):", "plt.axis('off') plt.tight_layout() plt.show() # + [markdown] id=\"J8KLkCWpNAeF\" # ### GD # + colab={\"base_uri\":", "Figure # + id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28,", "linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"37mNMNrpInfh\" # ## EM", "for dev_array in split(rng_key, len(vocab))] # + id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms =", "= 52 mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min, p_max = 0.4, 0.6 n_pixels", "'-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-'] test_images = fake_test_data(test_words, dataset, targets,", "class_priors = jnp.array(np.full((n_char,), 1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs,", "jupytext_version: 1.11.3 # kernelspec: # display_name: Python 3 # name: python3 # ---", "axes = plt.subplots(4, 3, figsize=(20, 10)) for i, (ax, img, word) in enumerate(zip(axes.flat,", "jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout()", "+ id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28)) fig,", "jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show() # + id=\"qv-Df8GEhfC4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\":", "'-O--', 'b--d', '--n-'] test_images = fake_test_data(test_words, dataset, targets, n_char + 1, \"all\") #", "plt.show() # + [markdown] id=\"NjCQpoH1Iuuf\" # ## Plot of the Probabilities of Components", "--depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # + id=\"k1rLl6dHH7Wh\"", "Python 3 # name: python3 # --- # + [markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional", "3 # name: python3 # --- # + [markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli", "plot_log_likelihood(hmms, test_words, test_images, vocab): fig, axes = plt.subplots(4, 3, figsize=(20, 10)) for i,", "# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name:", "for cls in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)),", "from jax import vmap import jax.numpy as jnp import jax from jax.random import", "Likelihoods for Test Data # + id=\"525MUl5HPe1K\" # noisy words test_words = ['bo--',", "batch_size, num_epochs = num_epochs) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() #", "\"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab} samples = jax.tree_multimap(lambda word, key:", "jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' #", "# + id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix): fig = plt.figure(figsize=(45, 20)) for k in", "fig = plt.figure(figsize=(45, 20)) for k in range(n_mix): for cls in range(cbm.num_of_classes): plt.subplot(n_mix", "= ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) # + [markdown] id=\"Qa95Fua5Kc3i\" # ## Full Batch", "'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band'] rng_key = PRNGKey(0) keys = [dev_array", "PRNGKey, split import numpy as np from matplotlib import pyplot as plt #", "p3 = 0.4, 0.1, 2e-3 n_misspelled = 1 # number of misspelled words", "vocab): fig, axes = plt.subplots(4, 3, figsize=(20, 10)) for i, (ax, img, word)", "Descentt # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size = 100,", "as np from matplotlib import pyplot as plt # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\"", "rng_key = PRNGKey(0) keys = [dev_array for dev_array in split(rng_key, len(vocab))] # +", "install -q distrax # + id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import ClassConditionalBMM from conditional_bernoulli_mix_utils import", "# + id=\"400WanLyGA2C\" # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null #", "= ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) # +", "clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # +", "cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show() # + id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples)", "for i, ax in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show() # + id=\"EbZn9vrfhei4\"", "colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # + [markdown] id=\"eNDmwV7EPyrR\" # ## Calculation", "name: python3 # --- # + [markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli Mixture Model", "as plt # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25 dataset, targets", "in enumerate(zip(axes.flat, test_images, test_words)): flattened_img = img.reshape((len(img), -1)) loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word,", "### EM # + id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) #", "figsize=(4, 10)) fig.subplots_adjust(hspace = .2, wspace=.001) for i, ax in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\")", "Log Likelihoods for Test Data # + id=\"525MUl5HPe1K\" # noisy words test_words =", "plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"37mNMNrpInfh\" # ## EM Algorithm #", "= (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1,", "+ colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25 dataset, targets = get_emnist_images_per_class(select_n) dataset,", "= 28 * 28 probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels))) class_priors =", "color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"37mNMNrpInfh\" # ##", "+ [markdown] id=\"IqRdcklzOeAY\" # ## Sampling # + id=\"wgI6sFWKN4ax\" p1, p2, p3 =", "PRNGKey(0) keys = [dev_array for dev_array in split(rng_key, len(vocab))] # + id=\"x3GpZ8jbf11N\" colab={\"base_uri\":", "words test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--',", "&> /dev/null # %cd -q /pyprobml/scripts # + id=\"k1rLl6dHH7Wh\" # !pip install -q", "id=\"IqRdcklzOeAY\" # ## Sampling # + id=\"wgI6sFWKN4ax\" p1, p2, p3 = 0.4, 0.1,", ".py # format_name: light # format_version: '1.5' # jupytext_version: 1.11.3 # kernelspec: #", "colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25 dataset, targets = get_emnist_images_per_class(select_n) dataset, targets", "in split(rng_key, len(vocab))] # + id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word: Word(word,", "nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace = .2, wspace=.001) for i, ax in enumerate(axes.flatten()): ax.imshow(samples[i],", "n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab} samples = jax.tree_multimap(lambda word,", "batch_size = 100, len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs)", "None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, \"all\") get_decoded_samples(decoded_words) #", "id=\"400WanLyGA2C\" # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q", "id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # + [markdown] id=\"eNDmwV7EPyrR\" # ##", "colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) # + [markdown] id=\"FO31plUVNDSO\" #", "+ [markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli Mixture Model for EMNIST # + [markdown]", "import ClassConditionalBMM from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm import", "each class vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song',", "fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm import Word from jax import vmap", "# + [markdown] id=\"KwNq7HYYLPO9\" # ## Initialization of Class Conditional BMMs # +", "of Class Conditional BMMs # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30", "the Probabilities of Components Distribution # + id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix): fig =", "\"all\") # + id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words, test_images, vocab): fig, axes = plt.subplots(4,", "# jupytext_version: 1.11.3 # kernelspec: # display_name: Python 3 # name: python3 #", "decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab),", "-1)), n_char + 1, \"all\") get_decoded_samples(decoded_words) # + [markdown] id=\"xrRy8MG0afR8\" # ### Figure", "'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-'] test_images = fake_test_data(test_words,", "# ## EM Algorithm # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses", "# + id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # + [markdown] id=\"eNDmwV7EPyrR\"", "targets, n_char + 1, \"all\") # + id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words, test_images, vocab):", "-q superimport # !pip install -q distrax # + id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import", "ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) # + [markdown]", "misspelled words created for each class vocab = ['book', 'bird', 'bond', 'bone', 'bank',", "'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-'] test_images = fake_test_data(test_words, dataset,", "n_mix) # + [markdown] id=\"FO31plUVNDSO\" # ### EM # + id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\",", "initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab} samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key),", "+ id=\"525MUl5HPe1K\" # noisy words test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D',", "Batch Gradient Descentt # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size", "# + [markdown] id=\"TU1pCzcIJHTm\" # ## Setup # # + id=\"400WanLyGA2C\" # !git", "np from matplotlib import pyplot as plt # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\"", "n_char + 1, \"all\") get_decoded_samples(decoded_words) # + [markdown] id=\"xrRy8MG0afR8\" # ### Figure #", "= jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys) # + id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"}", "# kernelspec: # display_name: Python 3 # name: python3 # --- # +", "matplotlib import pyplot as plt # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n =", "from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm import Word from", "\"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30 n_char = 52 mixing_coeffs = jnp.array(np.full((n_char, n_mix),", "num_epochs, batch_size = 100, len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs =", "jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version:", "1, \"all\") get_decoded_samples(decoded_words) # + [markdown] id=\"xrRy8MG0afR8\" # ### Figure # + id=\"O0-HaN5rQAvP\"", "outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size = 100, len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs", "in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show() # + id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\":", "plt.tight_layout() plt.show() # + id=\"qv-Df8GEhfC4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 784} outputId=\"9be6abf3-0ecc-4ef5-e301-380c5eac38ff\" plot_log_likelihood(hmms, test_words, test_images,", "\"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) # + [markdown] id=\"FO31plUVNDSO\" # ### EM", "# number of misspelled words created for each class vocab = ['book', 'bird',", "'1.5' # jupytext_version: 1.11.3 # kernelspec: # display_name: Python 3 # name: python3", "Test Data # + id=\"525MUl5HPe1K\" # noisy words test_words = ['bo--', '-On-', 'b-N-',", "# + id=\"k1rLl6dHH7Wh\" # !pip install -q superimport # !pip install -q distrax", "id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix): fig = plt.figure(figsize=(45, 20)) for k in range(n_mix): for", "cls +1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\") plt.axis('off') plt.tight_layout() plt.show() # +", "\"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size = 100, len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)),", "p2, p3 = 0.4, 0.1, 2e-3 n_misspelled = 1 # number of misspelled", ":, :-1].reshape((-1, 28, 28)) fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace =", "test_images = fake_test_data(test_words, dataset, targets, n_char + 1, \"all\") # + id=\"1dFCdVNgPYtJ\" def", "id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset, targets, 8) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log", "probs=probs, class_priors=class_priors, n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) # + [markdown] id=\"Qa95Fua5Kc3i\"", "id=\"wgI6sFWKN4ax\" p1, p2, p3 = 0.4, 0.1, 2e-3 n_misspelled = 1 # number", "# # + id=\"400WanLyGA2C\" # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null", "of the Probabilities of Components Distribution # + id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix): fig", "for i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)): flattened_img = img.reshape((len(img), -1))", "[markdown] id=\"TU1pCzcIJHTm\" # ## Setup # # + id=\"400WanLyGA2C\" # !git clone --depth", "= jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}')", "import Word from jax import vmap import jax.numpy as jnp import jax from", "word) in enumerate(zip(axes.flat, test_images, test_words)): flattened_img = img.reshape((len(img), -1)) loglikelihoods = jax.tree_map(lambda w:", "# + id=\"wgI6sFWKN4ax\" p1, p2, p3 = 0.4, 0.1, 2e-3 n_misspelled = 1", "= ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band'] rng_key =", "= {word: Word(word, p1, p2, p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word", "for EMNIST # + [markdown] id=\"TU1pCzcIJHTm\" # ## Setup # # + id=\"400WanLyGA2C\"", "[markdown] id=\"Qa95Fua5Kc3i\" # ## Full Batch Gradient Descentt # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\":", "+ [markdown] id=\"KwNq7HYYLPO9\" # ## Initialization of Class Conditional BMMs # + colab={\"base_uri\":", "EM # + id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) # +", "n_mix = 30 n_char = 52 mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min, p_max", "666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) # + [markdown] id=\"FO31plUVNDSO\" # ### EM #", "['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-'] test_images", "+1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\") plt.axis('off') plt.tight_layout() plt.show() # + [markdown]", "# ## Full Batch Gradient Descentt # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\"", "fig, axes = plt.subplots(4, 3, figsize=(20, 10)) for i, (ax, img, word) in", "52 mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min, p_max = 0.4, 0.6 n_pixels =", "= plt.figure(figsize=(45, 20)) for k in range(n_mix): for cls in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes,", "{word: Word(word, p1, p2, p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in", "created for each class vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond',", "+ [markdown] id=\"Qa95Fua5Kc3i\" # ## Full Batch Gradient Descentt # + colab={\"base_uri\": \"https://localhost:8080/\",", "'--n-'] test_images = fake_test_data(test_words, dataset, targets, n_char + 1, \"all\") # + id=\"1dFCdVNgPYtJ\"", "1./n_mix)) p_min, p_max = 0.4, 0.6 n_pixels = 28 * 28 probs =", "n_char = 52 mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min, p_max = 0.4, 0.6", "cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log", ":, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, \"all\") get_decoded_samples(decoded_words) # +", "# + [markdown] id=\"FO31plUVNDSO\" # ### EM # + id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\":", "## Initialization of Class Conditional BMMs # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix", "install -q superimport # !pip install -q distrax # + id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib", "# extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.11.3 #", "for each class vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind',", "id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word: Word(word, p1, p2, p3, n_char, \"all\",", "w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show()", "# name: python3 # --- # + [markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli Mixture", "p_max = 0.4, 0.6 n_pixels = 28 * 28 probs = jnp.array(np.random.uniform(p_min, p_max,", "10)) for i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)): flattened_img = img.reshape((len(img),", "# + [markdown] id=\"Qa95Fua5Kc3i\" # ## Full Batch Gradient Descentt # + colab={\"base_uri\":", "vocab) loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show() # + id=\"qv-Df8GEhfC4\"", "+ colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) # + [markdown] id=\"FO31plUVNDSO\"", "colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30 n_char = 52 mixing_coeffs = jnp.array(np.full((n_char,", "2e-3 n_misspelled = 1 # number of misspelled words created for each class", "= vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)),", "id=\"eNDmwV7EPyrR\" # ## Calculation of Log Likelihoods for Test Data # + id=\"525MUl5HPe1K\"", "cbm_em.fit_em(dataset, targets, 8) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # +", "+ [markdown] id=\"eNDmwV7EPyrR\" # ## Calculation of Log Likelihoods for Test Data #", "--- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light", "[markdown] id=\"IqRdcklzOeAY\" # ## Sampling # + id=\"wgI6sFWKN4ax\" p1, p2, p3 = 0.4,", "!pip install -q superimport # !pip install -q distrax # + id=\"cLpBn5KQeB46\" from", "test_images, test_words)): flattened_img = img.reshape((len(img), -1)) loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab)", "fig.subplots_adjust(hspace = .2, wspace=.001) for i, ax in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout()", "n_misspelled = 1 # number of misspelled words created for each class vocab", "plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"NjCQpoH1Iuuf\" # ## Plot of the", "id=\"525MUl5HPe1K\" # noisy words test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D',", "ax.set_axis_off() fig.tight_layout() plt.show() # + id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) #", "EM Algorithm # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset,", "\"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled", "'--Nd', 'B-nD', '-O--', 'b--d', '--n-'] test_images = fake_test_data(test_words, dataset, targets, n_char + 1,", "'---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-'] test_images = fake_test_data(test_words, dataset, targets, n_char +", "[markdown] id=\"FO31plUVNDSO\" # ### EM # + id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\"", "i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)): flattened_img = img.reshape((len(img), -1)) loglikelihoods", "Likelihood\") plt.show() # + [markdown] id=\"37mNMNrpInfh\" # ## EM Algorithm # + colab={\"base_uri\":", "Probabilities of Components Distribution # + id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix): fig = plt.figure(figsize=(45,", "[markdown] id=\"NjCQpoH1Iuuf\" # ## Plot of the Probabilities of Components Distribution # +", "for word in vocab} samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys)", "samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys) # + id=\"7VXVsobcg_KO\" colab={\"base_uri\":", "ClassConditionalBMM from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm import Word", "Word(word, p1, p2, p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab}", "n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) # + [markdown] id=\"Qa95Fua5Kc3i\" # ##", "i, ax in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show() # + id=\"EbZn9vrfhei4\" colab={\"base_uri\":", "in range(n_mix): for cls in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1) plt.imshow(1", "# + id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word: Word(word, p1, p2, p3,", "outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word: Word(word, p1, p2, p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix)", "test_words)): flattened_img = img.reshape((len(img), -1)) loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods", "= PRNGKey(0) keys = [dev_array for dev_array in split(rng_key, len(vocab))] # + id=\"x3GpZ8jbf11N\"", "plot_components_dist(cbm_em, n_mix) # + [markdown] id=\"IqRdcklzOeAY\" # ## Sampling # + id=\"wgI6sFWKN4ax\" p1,", "+ colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30 n_char = 52 mixing_coeffs =", "- cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\") plt.axis('off') plt.tight_layout() plt.show() # + [markdown] id=\"J8KLkCWpNAeF\" #", "jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show() #", "'byte', 'pond', 'mind', 'song', 'band'] rng_key = PRNGKey(0) keys = [dev_array for dev_array", "import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm import Word from jax import", "from noisy_spelling_hmm import Word from jax import vmap import jax.numpy as jnp import", "# ## Initialization of Class Conditional BMMs # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\"", "num_epochs = num_epochs) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # +", "# ### EM # + id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix)", "id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) # + [markdown] id=\"IqRdcklzOeAY\" #", "# + id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words, test_images, vocab): fig, axes = plt.subplots(4, 3,", "loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\")", "= img.reshape((len(img), -1)) loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods = jnp.array(loglikelihoods)", "\"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size = 100, len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1,", "+ id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words, test_images, vocab): fig, axes = plt.subplots(4, 3, figsize=(20,", "mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab} samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled,", "+ id=\"k1rLl6dHH7Wh\" # !pip install -q superimport # !pip install -q distrax #", "# !pip install -q superimport # !pip install -q distrax # + id=\"cLpBn5KQeB46\"", "number of misspelled words created for each class vocab = ['book', 'bird', 'bond',", "plt.subplots(4, 3, figsize=(20, 10)) for i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)):", "display_name: Python 3 # name: python3 # --- # + [markdown] id=\"M_qo7DmLJKLP\" #", "%cd -q /pyprobml/scripts # + id=\"k1rLl6dHH7Wh\" # !pip install -q superimport # !pip", "jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels))) class_priors = jnp.array(np.full((n_char,), 1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs,", "Initialization of Class Conditional BMMs # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix =", "336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size = 100, len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets,", "id=\"37mNMNrpInfh\" # ## EM Algorithm # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\"", "# ## Setup # # + id=\"400WanLyGA2C\" # !git clone --depth 1 https://github.com/probml/pyprobml", "plt.tight_layout() plt.show() # + [markdown] id=\"J8KLkCWpNAeF\" # ### GD # + colab={\"base_uri\": \"https://localhost:8080/\",", "dev_array in split(rng_key, len(vocab))] # + id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word:", "light # format_version: '1.5' # jupytext_version: 1.11.3 # kernelspec: # display_name: Python 3", "of misspelled words created for each class vocab = ['book', 'bird', 'bond', 'bone',", "cmap = \"gray\") plt.axis('off') plt.tight_layout() plt.show() # + [markdown] id=\"J8KLkCWpNAeF\" # ### GD", "p1, p2, p3 = 0.4, 0.1, 2e-3 n_misspelled = 1 # number of", "# ### Figure # + id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples = np.array(samples)[:, :, :,", "# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25 dataset, targets = get_emnist_images_per_class(select_n)", "= [dev_array for dev_array in split(rng_key, len(vocab))] # + id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\"", "# !pip install -q distrax # + id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import ClassConditionalBMM from", "/dev/null # %cd -q /pyprobml/scripts # + id=\"k1rLl6dHH7Wh\" # !pip install -q superimport", "BMMs # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30 n_char = 52", "\"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # + [markdown] id=\"eNDmwV7EPyrR\" # ## Calculation of Log", "python3 # --- # + [markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli Mixture Model for", "class vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band']", "Full Batch Gradient Descentt # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs,", "/pyprobml &> /dev/null # %cd -q /pyprobml/scripts # + id=\"k1rLl6dHH7Wh\" # !pip install", "plt.subplots(ncols=4, nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace = .2, wspace=.001) for i, ax in enumerate(axes.flatten()):", "# + [markdown] id=\"J8KLkCWpNAeF\" # ### GD # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666}", "'-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-'] test_images =", "20)) for k in range(n_mix): for cls in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k +", "colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset, targets, 8) plt.plot(losses, color=\"k\",", "words created for each class vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte',", "id=\"NjCQpoH1Iuuf\" # ## Plot of the Probabilities of Components Distribution # + id=\"KkyAHDW4JgyM\"", "colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :,", "100, len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs) plt.plot(losses, color=\"k\",", "'pond', 'mind', 'song', 'band'] rng_key = PRNGKey(0) keys = [dev_array for dev_array in", "+ id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) # + [markdown] id=\"IqRdcklzOeAY\"", "plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"37mNMNrpInfh\" # ## EM Algorithm", "jax.random import PRNGKey, split import numpy as np from matplotlib import pyplot as", "import jax from jax.random import PRNGKey, split import numpy as np from matplotlib", "p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab} samples = jax.tree_multimap(lambda", "# + id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode, in_axes = (0, None,", "= .2, wspace=.001) for i, ax in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show()", "vocab} samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys) # + id=\"7VXVsobcg_KO\"", "# + id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import ClassConditionalBMM from conditional_bernoulli_mix_utils import fake_test_data, encode, decode,", "0.4, 0.1, 2e-3 n_misspelled = 1 # number of misspelled words created for", "pyplot as plt # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25 dataset,", "# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size = 100, len(dataset)", "+ [markdown] id=\"NjCQpoH1Iuuf\" # ## Plot of the Probabilities of Components Distribution #", "GD # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) # +", "mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min, p_max = 0.4, 0.6 n_pixels = 28", "plt # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25 dataset, targets =", "\"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25 dataset, targets = get_emnist_images_per_class(select_n) dataset, targets =", "cbm.num_of_classes*k + cls +1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\") plt.axis('off') plt.tight_layout() plt.show()", "Log Likelihood\") plt.show() # + [markdown] id=\"37mNMNrpInfh\" # ## EM Algorithm # +", "ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show() # + id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\"", "fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace = .2, wspace=.001) for i,", "dataset, targets = jnp.array(dataset), jnp.array(targets) # + [markdown] id=\"KwNq7HYYLPO9\" # ## Initialization of", "vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char", "+ [markdown] id=\"37mNMNrpInfh\" # ## EM Algorithm # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336}", "!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts #", "key), vocab, keys) # + id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode, in_axes", "# + [markdown] id=\"IqRdcklzOeAY\" # ## Sampling # + id=\"wgI6sFWKN4ax\" p1, p2, p3", "img.reshape((len(img), -1)) loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab,", "select_n = 25 dataset, targets = get_emnist_images_per_class(select_n) dataset, targets = jnp.array(dataset), jnp.array(targets) #", "outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) # + [markdown] id=\"IqRdcklzOeAY\" # ## Sampling # + id=\"wgI6sFWKN4ax\"", "-1)) loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)), vocab) loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)),", "of Log Likelihoods for Test Data # + id=\"525MUl5HPe1K\" # noisy words test_words", "(n_char, n_mix, n_pixels))) class_priors = jnp.array(np.full((n_char,), 1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char)", "for Test Data # + id=\"525MUl5HPe1K\" # noisy words test_words = ['bo--', '-On-',", "= jnp.array(dataset), jnp.array(targets) # + [markdown] id=\"KwNq7HYYLPO9\" # ## Initialization of Class Conditional", "losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\")", "jnp import jax from jax.random import PRNGKey, split import numpy as np from", "'B-nD', '-O--', 'b--d', '--n-'] test_images = fake_test_data(test_words, dataset, targets, n_char + 1, \"all\")", "# #Class-Conditional Bernoulli Mixture Model for EMNIST # + [markdown] id=\"TU1pCzcIJHTm\" # ##", "[markdown] id=\"eNDmwV7EPyrR\" # ## Calculation of Log Likelihoods for Test Data # +", "= 100, len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs) plt.plot(losses,", "3, figsize=(20, 10)) for i, (ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)): flattened_img", "'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band'] rng_key = PRNGKey(0) keys =", "outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) # + [markdown] id=\"FO31plUVNDSO\" # ### EM # + id=\"ZM43qs6FfvlP\"", "n_pixels)), targets, batch_size, num_epochs = num_epochs) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\")", "= 0.4, 0.6 n_pixels = 28 * 28 probs = jnp.array(np.random.uniform(p_min, p_max, (n_char,", "import jax.numpy as jnp import jax from jax.random import PRNGKey, split import numpy", "plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\") plt.axis('off') plt.tight_layout() plt.show() # + [markdown] id=\"J8KLkCWpNAeF\"", "# + [markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli Mixture Model for EMNIST # +", "in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap =", "noisy_spelling_hmm import Word from jax import vmap import jax.numpy as jnp import jax", "+ id=\"wgI6sFWKN4ax\" p1, p2, p3 = 0.4, 0.1, 2e-3 n_misspelled = 1 #", "ax.set_title(f'{word}') plt.tight_layout() plt.show() # + id=\"qv-Df8GEhfC4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 784} outputId=\"9be6abf3-0ecc-4ef5-e301-380c5eac38ff\" plot_log_likelihood(hmms, test_words,", "id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli Mixture Model for EMNIST # + [markdown] id=\"TU1pCzcIJHTm\" #", "+ [markdown] id=\"J8KLkCWpNAeF\" # ### GD # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\"", "!pip install -q distrax # + id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import ClassConditionalBMM from conditional_bernoulli_mix_utils", "jnp.array(np.full((n_char,), 1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors,", "--- # + [markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli Mixture Model for EMNIST #", "p_min, p_max = 0.4, 0.6 n_pixels = 28 * 28 probs = jnp.array(np.random.uniform(p_min,", "https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # + id=\"k1rLl6dHH7Wh\" # !pip", "axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace = .2, wspace=.001) for i, ax", "None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, \"all\") get_decoded_samples(decoded_words)", "'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-'] test_images = fake_test_data(test_words, dataset, targets, n_char", "colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size = 100, len(dataset) losses =", "= 25 dataset, targets = get_emnist_images_per_class(select_n) dataset, targets = jnp.array(dataset), jnp.array(targets) # +", "'song', 'band'] rng_key = PRNGKey(0) keys = [dev_array for dev_array in split(rng_key, len(vocab))]", "+ colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset, targets, 8) plt.plot(losses,", "# ## Calculation of Log Likelihoods for Test Data # + id=\"525MUl5HPe1K\" #", "dataset, targets, n_char + 1, \"all\") # + id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words, test_images,", "# text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version:", "'b--d', '--n-'] test_images = fake_test_data(test_words, dataset, targets, n_char + 1, \"all\") # +", "1.11.3 # kernelspec: # display_name: Python 3 # name: python3 # --- #", "= get_emnist_images_per_class(select_n) dataset, targets = jnp.array(dataset), jnp.array(targets) # + [markdown] id=\"KwNq7HYYLPO9\" # ##", "jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show() # + id=\"qv-Df8GEhfC4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 784} outputId=\"9be6abf3-0ecc-4ef5-e301-380c5eac38ff\"", "plt.show() # + id=\"qv-Df8GEhfC4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 784} outputId=\"9be6abf3-0ecc-4ef5-e301-380c5eac38ff\" plot_log_likelihood(hmms, test_words, test_images, vocab)", "decode, get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm import Word from jax import vmap import jax.numpy", "id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:, :,", "cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\") plt.axis('off') plt.tight_layout() plt.show() # + [markdown] id=\"J8KLkCWpNAeF\" # ###", "plot_samples(samples): samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28)) fig, axes = plt.subplots(ncols=4,", "1 # number of misspelled words created for each class vocab = ['book',", "+ id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import ClassConditionalBMM from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples,", "outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # + [markdown] id=\"eNDmwV7EPyrR\" # ## Calculation of Log Likelihoods for", "* 28 probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels))) class_priors = jnp.array(np.full((n_char,), 1./n_char))", "+ id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode, in_axes = (0, None, None))(jnp.array(samples)[:,", "# + [markdown] id=\"xrRy8MG0afR8\" # ### Figure # + id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples", "#Class-Conditional Bernoulli Mixture Model for EMNIST # + [markdown] id=\"TU1pCzcIJHTm\" # ## Setup", "n_char=n_char) # + [markdown] id=\"Qa95Fua5Kc3i\" # ## Full Batch Gradient Descentt # +", "id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28)) fig, axes", "## Calculation of Log Likelihoods for Test Data # + id=\"525MUl5HPe1K\" # noisy", "\"all\") get_decoded_samples(decoded_words) # + [markdown] id=\"xrRy8MG0afR8\" # ### Figure # + id=\"O0-HaN5rQAvP\" def", "### Figure # + id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples = np.array(samples)[:, :, :, :-1].reshape((-1,", "Setup # # + id=\"400WanLyGA2C\" # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &>", "= jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min, p_max = 0.4, 0.6 n_pixels = 28 *", "id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) # + [markdown] id=\"FO31plUVNDSO\" # ### EM # +", "[markdown] id=\"xrRy8MG0afR8\" # ### Figure # + id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples = np.array(samples)[:,", "+ id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word: Word(word, p1, p2, p3, n_char,", "/pyprobml/scripts # + id=\"k1rLl6dHH7Wh\" # !pip install -q superimport # !pip install -q", "format_version: '1.5' # jupytext_version: 1.11.3 # kernelspec: # display_name: Python 3 # name:", "ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) # + [markdown] id=\"Qa95Fua5Kc3i\" # ## Full Batch Gradient", "p_max, (n_char, n_mix, n_pixels))) class_priors = jnp.array(np.full((n_char,), 1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors,", ":, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, \"all\") get_decoded_samples(decoded_words) # + [markdown]", "# + [markdown] id=\"NjCQpoH1Iuuf\" # ## Plot of the Probabilities of Components Distribution", "conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm import Word from jax", "jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min, p_max = 0.4, 0.6 n_pixels = 28 * 28", "class_priors=class_priors, n_char=n_char) # + [markdown] id=\"Qa95Fua5Kc3i\" # ## Full Batch Gradient Descentt #", "n_mix), 1./n_mix)) p_min, p_max = 0.4, 0.6 n_pixels = 28 * 28 probs", "id=\"Qa95Fua5Kc3i\" # ## Full Batch Gradient Descentt # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336}", "cls in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap", "# + id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) # + [markdown]", "targets, 8) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown]", "1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # + id=\"k1rLl6dHH7Wh\" #", "word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys) # + id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words", "colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word: Word(word, p1, p2, p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs,", "## Plot of the Probabilities of Components Distribution # + id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm,", "= np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28)) fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4,", "targets = jnp.array(dataset), jnp.array(targets) # + [markdown] id=\"KwNq7HYYLPO9\" # ## Initialization of Class", "# + [markdown] id=\"37mNMNrpInfh\" # ## EM Algorithm # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\":", "= ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d', '--n-']", "Log Likelihood\") plt.show() # + [markdown] id=\"NjCQpoH1Iuuf\" # ## Plot of the Probabilities", "jnp.array(dataset), jnp.array(targets) # + [markdown] id=\"KwNq7HYYLPO9\" # ## Initialization of Class Conditional BMMs", "+ cls +1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\") plt.axis('off') plt.tight_layout() plt.show() #", "cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) #", "enumerate(zip(axes.flat, test_images, test_words)): flattened_img = img.reshape((len(img), -1)) loglikelihoods = jax.tree_map(lambda w: jnp.sum(hmms[w].loglikelihood(word, flattened_img)),", "\"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) # + [markdown] id=\"IqRdcklzOeAY\" # ## Sampling", "numpy as np from matplotlib import pyplot as plt # + colab={\"base_uri\": \"https://localhost:8080/\"}", "outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30 n_char = 52 mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix)) p_min,", "## EM Algorithm # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses =", "hmms[word].n_sample(n_misspelled, key), vocab, keys) # + id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode,", "get_emnist_images_per_class from noisy_spelling_hmm import Word from jax import vmap import jax.numpy as jnp", "= jnp.array(np.full((n_char,), 1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs,", "p1, p2, p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab} samples", "probs=probs, class_priors=class_priors, n_char=n_char) # + [markdown] id=\"Qa95Fua5Kc3i\" # ## Full Batch Gradient Descentt", "Likelihood\") plt.show() # + [markdown] id=\"NjCQpoH1Iuuf\" # ## Plot of the Probabilities of", "\"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset, targets, 8) plt.plot(losses, color=\"k\", linewidth=3)", "Gradient Descentt # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size =", "# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset, targets, 8)", "colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) # + [markdown] id=\"IqRdcklzOeAY\" # ##", "'bank', 'byte', 'pond', 'mind', 'song', 'band'] rng_key = PRNGKey(0) keys = [dev_array for", "# format_name: light # format_version: '1.5' # jupytext_version: 1.11.3 # kernelspec: # display_name:", "Bernoulli Mixture Model for EMNIST # + [markdown] id=\"TU1pCzcIJHTm\" # ## Setup #", "728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # + [markdown] id=\"eNDmwV7EPyrR\" # ## Calculation of Log Likelihoods", "extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.11.3 # kernelspec:", "n_char + 1, \"all\") # + id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words, test_images, vocab): fig,", "ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show() # + id=\"qv-Df8GEhfC4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 784}", "test_words, test_images, vocab): fig, axes = plt.subplots(4, 3, figsize=(20, 10)) for i, (ax,", "format_name: light # format_version: '1.5' # jupytext_version: 1.11.3 # kernelspec: # display_name: Python", "import vmap import jax.numpy as jnp import jax from jax.random import PRNGKey, split", "flattened_img)), vocab) loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show() # +", "plt.figure(figsize=(45, 20)) for k in range(n_mix): for cls in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k", "Sampling # + id=\"wgI6sFWKN4ax\" p1, p2, p3 = 0.4, 0.1, 2e-3 n_misspelled =", "in vocab} samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys) # +", "key: hmms[word].n_sample(n_misspelled, key), vocab, keys) # + id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words =", "get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm import Word from jax import vmap import jax.numpy as", "0.4, 0.6 n_pixels = 28 * 28 probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix,", "plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"NjCQpoH1Iuuf\" # ## Plot of", "jax from jax.random import PRNGKey, split import numpy as np from matplotlib import", "= 1 # number of misspelled words created for each class vocab =", "vocab = ['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band'] rng_key", "split(rng_key, len(vocab))] # + id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word: Word(word, p1,", "1, \"all\") # + id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words, test_images, vocab): fig, axes =", "def plot_samples(samples): samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28)) fig, axes =", "+ id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix): fig = plt.figure(figsize=(45, 20)) for k in range(n_mix):", "plot_components_dist(cbm_gd, n_mix) # + [markdown] id=\"FO31plUVNDSO\" # ### EM # + id=\"ZM43qs6FfvlP\" colab={\"base_uri\":", "+ [markdown] id=\"TU1pCzcIJHTm\" # ## Setup # # + id=\"400WanLyGA2C\" # !git clone", "targets = get_emnist_images_per_class(select_n) dataset, targets = jnp.array(dataset), jnp.array(targets) # + [markdown] id=\"KwNq7HYYLPO9\" #", "plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"37mNMNrpInfh\" #", "in_axes = (0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char +", "Calculation of Log Likelihoods for Test Data # + id=\"525MUl5HPe1K\" # noisy words", "hmms = {word: Word(word, p1, p2, p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for", "color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show() # + id=\"qv-Df8GEhfC4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 784} outputId=\"9be6abf3-0ecc-4ef5-e301-380c5eac38ff\" plot_log_likelihood(hmms,", "# %cd -q /pyprobml/scripts # + id=\"k1rLl6dHH7Wh\" # !pip install -q superimport #", "id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words, test_images, vocab): fig, axes = plt.subplots(4, 3, figsize=(20, 10))", "= plt.subplots(4, 3, figsize=(20, 10)) for i, (ax, img, word) in enumerate(zip(axes.flat, test_images,", "for k in range(n_mix): for cls in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls", "dataset, targets = get_emnist_images_per_class(select_n) dataset, targets = jnp.array(dataset), jnp.array(targets) # + [markdown] id=\"KwNq7HYYLPO9\"", "# noisy words test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd',", "split import numpy as np from matplotlib import pyplot as plt # +", "test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD', '-O--', 'b--d',", "n_pixels))) class_priors = jnp.array(np.full((n_char,), 1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) cbm_em =", "Algorithm # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset, targets,", "samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28)) fig, axes = plt.subplots(ncols=4, nrows=10,", "Conditional BMMs # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30 n_char =", "len(vocab))] # + id=\"x3GpZ8jbf11N\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"5a348b69-bdf4-4f80-f059-1062ba2fbb88\" hmms = {word: Word(word, p1, p2,", "+ 1, \"all\") # + id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms, test_words, test_images, vocab): fig, axes", "id=\"xrRy8MG0afR8\" # ### Figure # + id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples = np.array(samples)[:, :,", "conditional_bernoulli_mix_lib import ClassConditionalBMM from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm", "+ [markdown] id=\"xrRy8MG0afR8\" # ### Figure # + id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples =", "range(n_mix): for cls in range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1) plt.imshow(1 -", "plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\") plt.axis('off')", "losses = cbm_em.fit_em(dataset, targets, 8) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show()", "id=\"J8KLkCWpNAeF\" # ### GD # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd,", "Plot of the Probabilities of Components Distribution # + id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix):", "= plt.subplots(ncols=4, nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace = .2, wspace=.001) for i, ax in", "= \"gray\") plt.axis('off') plt.tight_layout() plt.show() # + [markdown] id=\"J8KLkCWpNAeF\" # ### GD #", "kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown]", "25 dataset, targets = get_emnist_images_per_class(select_n) dataset, targets = jnp.array(dataset), jnp.array(targets) # + [markdown]", "range(cbm.num_of_classes): plt.subplot(n_mix ,cbm.num_of_classes, cbm.num_of_classes*k + cls +1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\")", "Components Distribution # + id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix): fig = plt.figure(figsize=(45, 20)) for", "## Full Batch Gradient Descentt # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\"", "= cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative", "import pyplot as plt # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25", "id=\"k1rLl6dHH7Wh\" # !pip install -q superimport # !pip install -q distrax # +", "plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"NjCQpoH1Iuuf\" #", "color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"NjCQpoH1Iuuf\" # ##", "plot_components_dist(cbm, n_mix): fig = plt.figure(figsize=(45, 20)) for k in range(n_mix): for cls in", "import numpy as np from matplotlib import pyplot as plt # + colab={\"base_uri\":", "\"gray\") plt.axis('off') plt.tight_layout() plt.show() # + [markdown] id=\"J8KLkCWpNAeF\" # ### GD # +", "## Setup # # + id=\"400WanLyGA2C\" # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml", "enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show() # + id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728}", "-q /pyprobml/scripts # + id=\"k1rLl6dHH7Wh\" # !pip install -q superimport # !pip install", "superimport # !pip install -q distrax # + id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import ClassConditionalBMM", "id=\"TU1pCzcIJHTm\" # ## Setup # # + id=\"400WanLyGA2C\" # !git clone --depth 1", "0.6 n_pixels = 28 * 28 probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels)))", "plt.show() # + [markdown] id=\"J8KLkCWpNAeF\" # ### GD # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\":", "+ id=\"EbZn9vrfhei4\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 728} outputId=\"114217bf-cadb-4331-82ef-b4844c038342\" plot_samples(samples) # + [markdown] id=\"eNDmwV7EPyrR\" #", "# format_version: '1.5' # jupytext_version: 1.11.3 # kernelspec: # display_name: Python 3 #", "id=\"KwNq7HYYLPO9\" # ## Initialization of Class Conditional BMMs # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\"", "get_emnist_images_per_class(select_n) dataset, targets = jnp.array(dataset), jnp.array(targets) # + [markdown] id=\"KwNq7HYYLPO9\" # ## Initialization", "+ colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 336} id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size = 100, len(dataset) losses", "loglikelihoods = jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show() # + id=\"qv-Df8GEhfC4\" colab={\"base_uri\":", "336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset, targets, 8) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative", "plt.show() # + [markdown] id=\"37mNMNrpInfh\" # ## EM Algorithm # + colab={\"base_uri\": \"https://localhost:8080/\",", "wspace=.001) for i, ax in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show() # +", "(0, None, None))(jnp.array(samples)[:, :, :, -1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, \"all\")", "jnp.array(targets) # + [markdown] id=\"KwNq7HYYLPO9\" # ## Initialization of Class Conditional BMMs #", "cbm_em = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) # + [markdown] id=\"Qa95Fua5Kc3i\" # ## Full", "Class Conditional BMMs # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30 n_char", "of Components Distribution # + id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix): fig = plt.figure(figsize=(45, 20))", "test_images, vocab): fig, axes = plt.subplots(4, 3, figsize=(20, 10)) for i, (ax, img,", "10)) fig.subplots_adjust(hspace = .2, wspace=.001) for i, ax in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off()", "as jnp import jax from jax.random import PRNGKey, split import numpy as np", "outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25 dataset, targets = get_emnist_images_per_class(select_n) dataset, targets = jnp.array(dataset), jnp.array(targets)", "id=\"PDzuEjs9Kewi\" outputId=\"c81916c0-c6b7-45bd-d308-eab878afe281\" num_epochs, batch_size = 100, len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size,", "id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n = 25 dataset, targets = get_emnist_images_per_class(select_n) dataset, targets = jnp.array(dataset),", "outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset, targets, 8) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\")", "n_mix) # + [markdown] id=\"IqRdcklzOeAY\" # ## Sampling # + id=\"wgI6sFWKN4ax\" p1, p2,", "id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import ClassConditionalBMM from conditional_bernoulli_mix_utils import fake_test_data, encode, decode, get_decoded_samples, get_emnist_images_per_class", "['book', 'bird', 'bond', 'bone', 'bank', 'byte', 'pond', 'mind', 'song', 'band'] rng_key = PRNGKey(0)", ":-1].reshape((-1, 28, 28)) fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace = .2,", "noisy words test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD', 'b--D', '---D', '--Nd', 'B-nD',", "len(dataset) losses = cbm_gd.fit_sgd(dataset.reshape((-1, n_pixels)), targets, batch_size, num_epochs = num_epochs) plt.plot(losses, color=\"k\", linewidth=3)", "= num_epochs) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown]", "encode, decode, get_decoded_samples, get_emnist_images_per_class from noisy_spelling_hmm import Word from jax import vmap import", "# jupyter: # jupytext: # text_representation: # extension: .py # format_name: light #", "Model for EMNIST # + [markdown] id=\"TU1pCzcIJHTm\" # ## Setup # # +", "## Sampling # + id=\"wgI6sFWKN4ax\" p1, p2, p3 = 0.4, 0.1, 2e-3 n_misspelled", "keys) # + id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode, in_axes = (0,", "\"height\": 336} id=\"FJeBzIKYfsUk\" outputId=\"9d8db485-a251-4b1a-a6e5-93833c83dce6\" losses = cbm_em.fit_em(dataset, targets, 8) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\")", "def plot_log_likelihood(hmms, test_words, test_images, vocab): fig, axes = plt.subplots(4, 3, figsize=(20, 10)) for", "-1].reshape((n_misspelled * len(vocab), -1)), n_char + 1, \"all\") get_decoded_samples(decoded_words) # + [markdown] id=\"xrRy8MG0afR8\"", "Data # + id=\"525MUl5HPe1K\" # noisy words test_words = ['bo--', '-On-', 'b-N-', 'B---',", "def plot_components_dist(cbm, n_mix): fig = plt.figure(figsize=(45, 20)) for k in range(n_mix): for cls", "28)) fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace = .2, wspace=.001) for", "jax.numpy as jnp import jax from jax.random import PRNGKey, split import numpy as", "\"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) # + [markdown] id=\"FO31plUVNDSO\" # ###", "+ [markdown] id=\"FO31plUVNDSO\" # ### EM # + id=\"ZM43qs6FfvlP\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666}", "'band'] rng_key = PRNGKey(0) keys = [dev_array for dev_array in split(rng_key, len(vocab))] #", "word in vocab} samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab, keys) #", "text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.11.3", "num_epochs) plt.plot(losses, color=\"k\", linewidth=3) plt.xlabel(\"Iteration\") plt.ylabel(\"Negative Log Likelihood\") plt.show() # + [markdown] id=\"37mNMNrpInfh\"", ".2, wspace=.001) for i, ax in enumerate(axes.flatten()): ax.imshow(samples[i], cmap=\"gray\") ax.set_axis_off() fig.tight_layout() plt.show() #", "n_mix, n_pixels))) class_priors = jnp.array(np.full((n_char,), 1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs, probs=probs, class_priors=class_priors, n_char=n_char) cbm_em", "plot_samples(samples) # + [markdown] id=\"eNDmwV7EPyrR\" # ## Calculation of Log Likelihoods for Test", "n_mix): fig = plt.figure(figsize=(45, 20)) for k in range(n_mix): for cls in range(cbm.num_of_classes):", "+ id=\"400WanLyGA2C\" # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd", "= jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels))) class_priors = jnp.array(np.full((n_char,), 1./n_char)) cbm_gd = ClassConditionalBMM(mixing_coeffs=mixing_coeffs,", "# display_name: Python 3 # name: python3 # --- # + [markdown] id=\"M_qo7DmLJKLP\"", "= fake_test_data(test_words, dataset, targets, n_char + 1, \"all\") # + id=\"1dFCdVNgPYtJ\" def plot_log_likelihood(hmms,", "# !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts", "id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30 n_char = 52 mixing_coeffs = jnp.array(np.full((n_char, n_mix), 1./n_mix))", "= jnp.array(loglikelihoods) ax.bar(vocab, jnp.exp(jax.nn.log_softmax(loglikelihoods)), color=\"black\") ax.set_title(f'{word}') plt.tight_layout() plt.show() # + id=\"qv-Df8GEhfC4\" colab={\"base_uri\": \"https://localhost:8080/\",", "probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels))) class_priors = jnp.array(np.full((n_char,), 1./n_char)) cbm_gd =", "# + id=\"O0-HaN5rQAvP\" def plot_samples(samples): samples = np.array(samples)[:, :, :, :-1].reshape((-1, 28, 28))", "+ 1, \"all\") get_decoded_samples(decoded_words) # + [markdown] id=\"xrRy8MG0afR8\" # ### Figure # +", "28 probs = jnp.array(np.random.uniform(p_min, p_max, (n_char, n_mix, n_pixels))) class_priors = jnp.array(np.full((n_char,), 1./n_char)) cbm_gd", "from matplotlib import pyplot as plt # + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"ey9k06RweuKc\" outputId=\"38131e5a-82fb-49db-c4d3-f4364a643152\" select_n", "n_mix=n_mix) for word in vocab} samples = jax.tree_multimap(lambda word, key: hmms[word].n_sample(n_misspelled, key), vocab,", ":, :, :-1].reshape((-1, 28, 28)) fig, axes = plt.subplots(ncols=4, nrows=10, figsize=(4, 10)) fig.subplots_adjust(hspace", "# + id=\"525MUl5HPe1K\" # noisy words test_words = ['bo--', '-On-', 'b-N-', 'B---', '-OnD',", "[markdown] id=\"M_qo7DmLJKLP\" # #Class-Conditional Bernoulli Mixture Model for EMNIST # + [markdown] id=\"TU1pCzcIJHTm\"", "distrax # + id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import ClassConditionalBMM from conditional_bernoulli_mix_utils import fake_test_data, encode,", "p2, p3, n_char, \"all\", mixing_coeffs=cbm_em.model.mixture_distribution.probs, initial_probs=cbm_em.model.components_distribution.distribution.probs, n_mix=n_mix) for word in vocab} samples =", "\"height\": 666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) # + [markdown] id=\"IqRdcklzOeAY\" # ## Sampling #", "(ax, img, word) in enumerate(zip(axes.flat, test_images, test_words)): flattened_img = img.reshape((len(img), -1)) loglikelihoods =", "# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"UABtUDPjffFt\" outputId=\"d873a708-542c-44e6-8c72-2c5908c7bbad\" n_mix = 30 n_char = 52 mixing_coeffs", ",cbm.num_of_classes, cbm.num_of_classes*k + cls +1) plt.imshow(1 - cbm.model.components_distribution.distribution.probs[cls][k,:].reshape((28,28)), cmap = \"gray\") plt.axis('off') plt.tight_layout()", "### GD # + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 666} id=\"DSOiuNeAM8gl\" outputId=\"dce9416a-b646-423d-b4bf-c78728db1cab\" plot_components_dist(cbm_gd, n_mix) #", "[markdown] id=\"KwNq7HYYLPO9\" # ## Initialization of Class Conditional BMMs # + colab={\"base_uri\": \"https://localhost:8080/\"}", "-q distrax # + id=\"cLpBn5KQeB46\" from conditional_bernoulli_mix_lib import ClassConditionalBMM from conditional_bernoulli_mix_utils import fake_test_data,", "import PRNGKey, split import numpy as np from matplotlib import pyplot as plt", "vocab, keys) # + id=\"7VXVsobcg_KO\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"3e915a79-7f5c-4131-d6ee-97f11c83d86f\" decoded_words = vmap(decode, in_axes =", "Distribution # + id=\"KkyAHDW4JgyM\" def plot_components_dist(cbm, n_mix): fig = plt.figure(figsize=(45, 20)) for k", "len(vocab), -1)), n_char + 1, \"all\") get_decoded_samples(decoded_words) # + [markdown] id=\"xrRy8MG0afR8\" # ###", "0.1, 2e-3 n_misspelled = 1 # number of misspelled words created for each", "666} outputId=\"81a095f1-1099-4809-90a8-272dbed11662\" plot_components_dist(cbm_em, n_mix) # + [markdown] id=\"IqRdcklzOeAY\" # ## Sampling # +" ]
[ "unittest from ddf import DDFManager, DDF_HOME class BaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.dm_spark =", "from {}'.format(table_name), False) @classmethod def loadMtCars(cls, dm): table_name = 'mtcars_pyddf_unittest' if table_name not", "class BaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.dm_spark = DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars =", "in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table if", "string, FlightNum int, TailNum string, ActualElapsedTime int, CRSElapsedTime int, AirTime int, ArrDelay int,", "int, TaxiOut int, Cancelled int, CancellationCode string, Diverted string, CarrierDelay int, WeatherDelay int,", "\" \"into table {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from {}'.format(table_name), False) @classmethod", "AirTime int, ArrDelay int, DepDelay int, Origin string, Dest string, Distance int, TaxiIn", "False) return dm.sql2ddf('select * from {}'.format(table_name), False) @classmethod def loadMtCars(cls, dm): table_name =", "int, TaxiIn int, TaxiOut int, Cancelled int, CancellationCode string, Diverted string, CarrierDelay int,", "int, LateAircraftDelay int ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' \"\"\".format(table_name), False)", "int, CRSArrTime int,UniqueCarrier string, FlightNum int, TailNum string, ActualElapsedTime int, CRSElapsedTime int, AirTime", "exists {}'.format(table_name), False) dm.sql(\"\"\"create table {} (Year int,Month int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime", "string, Distance int, TaxiIn int, TaxiOut int, Cancelled int, CancellationCode string, Diverted string,", "table_name), False) return dm.sql2ddf('select * from {}'.format(table_name), False) @classmethod def loadMtCars(cls, dm): table_name", "False) dm.sql(\"CREATE TABLE {} (mpg double, cyl int, disp double, \" \"hp int,", "am int, gear int, carb int)\" \" ROW FORMAT DELIMITED FIELDS TERMINATED BY", "int, TailNum string, ActualElapsedTime int, CRSElapsedTime int, AirTime int, ArrDelay int, DepDelay int,", "if exists {}'.format(table_name), False) dm.sql(\"\"\"create table {} (Year int,Month int,DayofMonth int, DayOfWeek int,DepTime", "',' \"\"\".format(table_name), False) dm.sql(\"load data local inpath '{}/resources/test/airlineWithNA.csv' \" \"into table {}\".format(DDF_HOME, table_name),", "int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier string, FlightNum int, TailNum string, ActualElapsedTime int,", "dm): table_name = 'airlines_na_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in dm.sql('show", "False) dm.sql(\"load data local inpath '{}/resources/test/airlineWithNA.csv' \" \"into table {}\".format(DDF_HOME, table_name), False) return", "[x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources', False) # session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop", "carb int)\" \" ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '\".format(table_name), False) dm.sql(\"LOAD", "DDF_HOME class BaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.dm_spark = DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars", "DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier string, FlightNum int, TailNum string, ActualElapsedTime", "dm.sql(\"\"\"create table {} (Year int,Month int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime", "TERMINATED BY ',' \"\"\".format(table_name), False) dm.sql(\"load data local inpath '{}/resources/test/airlineWithNA.csv' \" \"into table", "from ddf import DDFManager, DDF_HOME class BaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.dm_spark = DDFManager('spark')", "[x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table if exists", "dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources', False) # session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists {}'.format(table_name),", "for x in dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table if exists {}'.format(table_name),", "int,Month int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier string, FlightNum int,", "FlightNum int, TailNum string, ActualElapsedTime int, CRSElapsedTime int, AirTime int, ArrDelay int, DepDelay", "from __future__ import unicode_literals import unittest from ddf import DDFManager, DDF_HOME class BaseTest(unittest.TestCase):", "= cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def loadAirlines(cls, dm):", "string, Dest string, Distance int, TaxiIn int, TaxiOut int, Cancelled int, CancellationCode string,", "@classmethod def loadAirlines(cls, dm): table_name = 'airlines_na_pyddf_unittest' if table_name not in [x.split('\\t')[0] for", "table_name = 'airlines_na_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]:", "shark.test.data.path=resources', False) # session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"CREATE TABLE", "def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def loadAirlines(cls, dm): table_name = 'airlines_na_pyddf_unittest' if table_name not", "return dm.sql2ddf('select * from {}'.format(table_name), False) @classmethod def loadMtCars(cls, dm): table_name = 'mtcars_pyddf_unittest'", "(mpg double, cyl int, disp double, \" \"hp int, drat double, wt double,", "= 'airlines_na_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set", "'\".format(table_name), False) dm.sql(\"LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' \" \"INTO TABLE {}\".format(DDF_HOME, table_name), False)", "BY ' '\".format(table_name), False) dm.sql(\"LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' \" \"INTO TABLE {}\".format(DDF_HOME,", "int, CRSElapsedTime int, AirTime int, ArrDelay int, DepDelay int, Origin string, Dest string,", "drat double, wt double, \" \"qesc double, vs int, am int, gear int,", "(Year int,Month int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier string, FlightNum", "wt double, \" \"qesc double, vs int, am int, gear int, carb int)\"", "cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def loadAirlines(cls, dm): table_name = 'airlines_na_pyddf_unittest' if", "def loadAirlines(cls, dm): table_name = 'airlines_na_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x", "{}'.format(table_name), False) dm.sql(\"\"\"create table {} (Year int,Month int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime", "int ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' \"\"\".format(table_name), False) dm.sql(\"load data", "@classmethod def setUpClass(cls): cls.dm_spark = DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod", "int, Origin string, Dest string, Distance int, TaxiIn int, TaxiOut int, Cancelled int,", "if table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False)", "int, disp double, \" \"hp int, drat double, wt double, \" \"qesc double,", "LOCAL INPATH '{}/resources/test/mtcars' \" \"INTO TABLE {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from", "table if exists {}'.format(table_name), False) dm.sql(\"\"\"create table {} (Year int,Month int,DayofMonth int, DayOfWeek", "data local inpath '{}/resources/test/airlineWithNA.csv' \" \"into table {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select *", "int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier string, FlightNum int, TailNum", "int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier string, FlightNum int, TailNum string, ActualElapsedTime int, CRSElapsedTime", "dm): table_name = 'mtcars_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in dm.sql('show", "TaxiOut int, Cancelled int, CancellationCode string, Diverted string, CarrierDelay int, WeatherDelay int, NASDelay", "CRSArrTime int,UniqueCarrier string, FlightNum int, TailNum string, ActualElapsedTime int, CRSElapsedTime int, AirTime int,", "DELIMITED FIELDS TERMINATED BY ',' \"\"\".format(table_name), False) dm.sql(\"load data local inpath '{}/resources/test/airlineWithNA.csv' \"", "FIELDS TERMINATED BY ' '\".format(table_name), False) dm.sql(\"LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' \" \"INTO", "cls.dm_spark = DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls): cls.dm_spark.shutdown()", "int,UniqueCarrier string, FlightNum int, TailNum string, ActualElapsedTime int, CRSElapsedTime int, AirTime int, ArrDelay", "int, CancellationCode string, Diverted string, CarrierDelay int, WeatherDelay int, NASDelay int, SecurityDelay int,", "double, \" \"qesc double, vs int, am int, gear int, carb int)\" \"", "if exists {}'.format(table_name), False) dm.sql(\"CREATE TABLE {} (mpg double, cyl int, disp double,", "import DDFManager, DDF_HOME class BaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.dm_spark = DDFManager('spark') cls.airlines =", "SecurityDelay int, LateAircraftDelay int ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' \"\"\".format(table_name),", "cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def loadAirlines(cls,", "Distance int, TaxiIn int, TaxiOut int, Cancelled int, CancellationCode string, Diverted string, CarrierDelay", "BY ',' \"\"\".format(table_name), False) dm.sql(\"load data local inpath '{}/resources/test/airlineWithNA.csv' \" \"into table {}\".format(DDF_HOME,", "ActualElapsedTime int, CRSElapsedTime int, AirTime int, ArrDelay int, DepDelay int, Origin string, Dest", "'mtcars_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources',", "def setUpClass(cls): cls.dm_spark = DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod def", "False) dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"\"\"create table {} (Year int,Month int,DayofMonth", "int, SecurityDelay int, LateAircraftDelay int ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','", "{}'.format(table_name), False) dm.sql(\"CREATE TABLE {} (mpg double, cyl int, disp double, \" \"hp", "int, Cancelled int, CancellationCode string, Diverted string, CarrierDelay int, WeatherDelay int, NASDelay int,", "dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"CREATE TABLE {} (mpg double, cyl int,", "int, DepDelay int, Origin string, Dest string, Distance int, TaxiIn int, TaxiOut int,", "int, AirTime int, ArrDelay int, DepDelay int, Origin string, Dest string, Distance int,", "def loadMtCars(cls, dm): table_name = 'mtcars_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x", "TERMINATED BY ' '\".format(table_name), False) dm.sql(\"LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' \" \"INTO TABLE", "= cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def loadAirlines(cls, dm): table_name = 'airlines_na_pyddf_unittest'", "cyl int, disp double, \" \"hp int, drat double, wt double, \" \"qesc", "x in dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table if exists {}'.format(table_name), False)", "int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier string, FlightNum int, TailNum string,", "TaxiIn int, TaxiOut int, Cancelled int, CancellationCode string, Diverted string, CarrierDelay int, WeatherDelay", "'{}/resources/test/airlineWithNA.csv' \" \"into table {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from {}'.format(table_name), False)", "dm.sql('set shark.test.data.path=resources', False) # session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"CREATE", "table if exists {}'.format(table_name), False) dm.sql(\"CREATE TABLE {} (mpg double, cyl int, disp", "in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources', False) # session.sql('set hive.metastore.warehouse.dir=/tmp')", "double, wt double, \" \"qesc double, vs int, am int, gear int, carb", "LateAircraftDelay int ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' \"\"\".format(table_name), False) dm.sql(\"load", "\"qesc double, vs int, am int, gear int, carb int)\" \" ROW FORMAT", "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' \"\"\".format(table_name), False) dm.sql(\"load data local inpath", "WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int ) ROW FORMAT DELIMITED FIELDS", "# session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"CREATE TABLE {} (mpg", "int, am int, gear int, carb int)\" \" ROW FORMAT DELIMITED FIELDS TERMINATED", "\"into table {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from {}'.format(table_name), False) @classmethod def", "Cancelled int, CancellationCode string, Diverted string, CarrierDelay int, WeatherDelay int, NASDelay int, SecurityDelay", "@classmethod def loadMtCars(cls, dm): table_name = 'mtcars_pyddf_unittest' if table_name not in [x.split('\\t')[0] for", "int,ArrTime int, CRSArrTime int,UniqueCarrier string, FlightNum int, TailNum string, ActualElapsedTime int, CRSElapsedTime int,", "TABLE {} (mpg double, cyl int, disp double, \" \"hp int, drat double,", "' '\".format(table_name), False) dm.sql(\"LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' \" \"INTO TABLE {}\".format(DDF_HOME, table_name),", "in dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"\"\"create", "dm.sql(\"LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' \" \"INTO TABLE {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select", "hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"\"\"create table {} (Year int,Month", "dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"\"\"create table {} (Year int,Month int,DayofMonth int,", "dm.sql(\"CREATE TABLE {} (mpg double, cyl int, disp double, \" \"hp int, drat", "string, Diverted string, CarrierDelay int, WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int", "CancellationCode string, Diverted string, CarrierDelay int, WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay", "TailNum string, ActualElapsedTime int, CRSElapsedTime int, AirTime int, ArrDelay int, DepDelay int, Origin", "setUpClass(cls): cls.dm_spark = DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls):", "ddf import DDFManager, DDF_HOME class BaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.dm_spark = DDFManager('spark') cls.airlines", "Diverted string, CarrierDelay int, WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int )", "dm.sql(\"load data local inpath '{}/resources/test/airlineWithNA.csv' \" \"into table {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select", "FORMAT DELIMITED FIELDS TERMINATED BY ' '\".format(table_name), False) dm.sql(\"LOAD DATA LOCAL INPATH '{}/resources/test/mtcars'", "tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"\"\"create table {}", "table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources', False) #", "INPATH '{}/resources/test/mtcars' \" \"INTO TABLE {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from {}'.format(table_name),", "dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"\"\"create table", "FORMAT DELIMITED FIELDS TERMINATED BY ',' \"\"\".format(table_name), False) dm.sql(\"load data local inpath '{}/resources/test/airlineWithNA.csv'", "loadMtCars(cls, dm): table_name = 'mtcars_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in", "False) dm.sql(\"LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' \" \"INTO TABLE {}\".format(DDF_HOME, table_name), False) return", "ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '\".format(table_name), False) dm.sql(\"LOAD DATA LOCAL INPATH", "hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"CREATE TABLE {} (mpg double, cyl", "string, CarrierDelay int, WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int ) ROW", "session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"CREATE TABLE {} (mpg double,", "string, ActualElapsedTime int, CRSElapsedTime int, AirTime int, ArrDelay int, DepDelay int, Origin string,", "dm.sql2ddf('select * from {}'.format(table_name), False) @classmethod def loadMtCars(cls, dm): table_name = 'mtcars_pyddf_unittest' if", "unicode_literals import unittest from ddf import DDFManager, DDF_HOME class BaseTest(unittest.TestCase): @classmethod def setUpClass(cls):", "__future__ import unicode_literals import unittest from ddf import DDFManager, DDF_HOME class BaseTest(unittest.TestCase): @classmethod", "inpath '{}/resources/test/airlineWithNA.csv' \" \"into table {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from {}'.format(table_name),", "NASDelay int, SecurityDelay int, LateAircraftDelay int ) ROW FORMAT DELIMITED FIELDS TERMINATED BY", "False) dm.sql(\"\"\"create table {} (Year int,Month int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int,", "DELIMITED FIELDS TERMINATED BY ' '\".format(table_name), False) dm.sql(\"LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' \"", "double, \" \"hp int, drat double, wt double, \" \"qesc double, vs int,", "dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"\"\"create table {} (Year", "tables')]: dm.sql('set shark.test.data.path=resources', False) # session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists {}'.format(table_name), False)", "\" \"hp int, drat double, wt double, \" \"qesc double, vs int, am", "table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop", "int, drat double, wt double, \" \"qesc double, vs int, am int, gear", "@classmethod def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def loadAirlines(cls, dm): table_name = 'airlines_na_pyddf_unittest' if table_name", "ArrDelay int, DepDelay int, Origin string, Dest string, Distance int, TaxiIn int, TaxiOut", "loadAirlines(cls, dm): table_name = 'airlines_na_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in", "int)\" \" ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '\".format(table_name), False) dm.sql(\"LOAD DATA", "x in dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources', False) # session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if", "CarrierDelay int, WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int ) ROW FORMAT", "table {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from {}'.format(table_name), False) @classmethod def loadMtCars(cls,", "exists {}'.format(table_name), False) dm.sql(\"CREATE TABLE {} (mpg double, cyl int, disp double, \"", "DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def", ") ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' \"\"\".format(table_name), False) dm.sql(\"load data local", "cls.dm_spark.shutdown() @classmethod def loadAirlines(cls, dm): table_name = 'airlines_na_pyddf_unittest' if table_name not in [x.split('\\t')[0]", "double, vs int, am int, gear int, carb int)\" \" ROW FORMAT DELIMITED", "cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def loadAirlines(cls, dm): table_name", "int, ArrDelay int, DepDelay int, Origin string, Dest string, Distance int, TaxiIn int,", "= 'mtcars_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set", "BaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.dm_spark = DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark)", "'{}/resources/test/mtcars' \" \"INTO TABLE {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from {}'.format(table_name), False)", "{} (Year int,Month int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier string,", "disp double, \" \"hp int, drat double, wt double, \" \"qesc double, vs", "import unittest from ddf import DDFManager, DDF_HOME class BaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.dm_spark", "not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp', False) dm.sql('drop table", "gear int, carb int)\" \" ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '\".format(table_name),", "Origin string, Dest string, Distance int, TaxiIn int, TaxiOut int, Cancelled int, CancellationCode", "table {} (Year int,Month int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier", "CRSElapsedTime int, AirTime int, ArrDelay int, DepDelay int, Origin string, Dest string, Distance", "Dest string, Distance int, TaxiIn int, TaxiOut int, Cancelled int, CancellationCode string, Diverted", "{}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from {}'.format(table_name), False) @classmethod def loadMtCars(cls, dm):", "cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def loadAirlines(cls, dm): table_name =", "DepDelay int, Origin string, Dest string, Distance int, TaxiIn int, TaxiOut int, Cancelled", "tearDownClass(cls): cls.dm_spark.shutdown() @classmethod def loadAirlines(cls, dm): table_name = 'airlines_na_pyddf_unittest' if table_name not in", "FIELDS TERMINATED BY ',' \"\"\".format(table_name), False) dm.sql(\"load data local inpath '{}/resources/test/airlineWithNA.csv' \" \"into", "= DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark) cls.mtcars = cls.loadMtCars(cls.dm_spark) @classmethod def tearDownClass(cls): cls.dm_spark.shutdown() @classmethod", "local inpath '{}/resources/test/airlineWithNA.csv' \" \"into table {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select * from", "False) # session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists {}'.format(table_name), False) dm.sql(\"CREATE TABLE {}", "'airlines_na_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set hive.metastore.warehouse.dir=/tmp',", "import unicode_literals import unittest from ddf import DDFManager, DDF_HOME class BaseTest(unittest.TestCase): @classmethod def", "if table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources', False)", "int, carb int)\" \" ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '\".format(table_name), False)", "vs int, am int, gear int, carb int)\" \" ROW FORMAT DELIMITED FIELDS", "in dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources', False) # session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists", "int, gear int, carb int)\" \" ROW FORMAT DELIMITED FIELDS TERMINATED BY '", "\"\"\".format(table_name), False) dm.sql(\"load data local inpath '{}/resources/test/airlineWithNA.csv' \" \"into table {}\".format(DDF_HOME, table_name), False)", "int, WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int ) ROW FORMAT DELIMITED", "False) @classmethod def loadMtCars(cls, dm): table_name = 'mtcars_pyddf_unittest' if table_name not in [x.split('\\t')[0]", "DDFManager, DDF_HOME class BaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.dm_spark = DDFManager('spark') cls.airlines = cls.loadAirlines(cls.dm_spark)", "for x in dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources', False) # session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table", "\" \"qesc double, vs int, am int, gear int, carb int)\" \" ROW", "{} (mpg double, cyl int, disp double, \" \"hp int, drat double, wt", "* from {}'.format(table_name), False) @classmethod def loadMtCars(cls, dm): table_name = 'mtcars_pyddf_unittest' if table_name", "double, cyl int, disp double, \" \"hp int, drat double, wt double, \"", "\"hp int, drat double, wt double, \" \"qesc double, vs int, am int,", "DATA LOCAL INPATH '{}/resources/test/mtcars' \" \"INTO TABLE {}\".format(DDF_HOME, table_name), False) return dm.sql2ddf('select *", "int, NASDelay int, SecurityDelay int, LateAircraftDelay int ) ROW FORMAT DELIMITED FIELDS TERMINATED", "{}'.format(table_name), False) @classmethod def loadMtCars(cls, dm): table_name = 'mtcars_pyddf_unittest' if table_name not in", "\" ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '\".format(table_name), False) dm.sql(\"LOAD DATA LOCAL", "table_name = 'mtcars_pyddf_unittest' if table_name not in [x.split('\\t')[0] for x in dm.sql('show tables')]:", "not in [x.split('\\t')[0] for x in dm.sql('show tables')]: dm.sql('set shark.test.data.path=resources', False) # session.sql('set" ]
[]
[]
[ "logging log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def grupos_creacion(peticion): # Validando datos recibidos", "conflicto viene a decir que no existe log.warning(e) return exception.HTTPNotFound(e) except DatosException as", "e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Borrado de Grupos mediante la", "a que no se hayan enviado datos json correctamente formateados log.warning(e) return exception.HTTPBadRequest(e)", "httpexceptions as exception from ..juliette.modelGroup import Grupo from ..juliette.excepciones import DatosException, ConflictoException from", "ConflictoException from ..schemas.grupos import EsquemaGrupo import logging log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion')", "exception.HTTPNotFound() except Exception as e: log.error(e) return exception.HTTPInternalServerError() return {'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json',", "log.error(e) return exception.HTTPInternalServerError() return {'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def grupos_borrado(peticion): # Validando", "= peticion.matchdict['grupo'] except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e:", "grupos_listado(peticion): try: grupo = Grupo() contenido = grupo.obtener() except Exception as e: log.error(e)", "e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Creacion de Usuarios mediante la", "operacion Creacion de Usuarios mediante la librería try: grupo = Grupo() cn_grupo =", "se hayan enviado datos json correctamente formateados log.warning(e) return exception.HTTPBadRequest(e) except DatosException as", "exception.HTTPBadRequest(e) except TypeError as e: # Se refiere a que no se hayan", "de Usuarios mediante la librería try: grupo = Grupo() contenido = grupo.obtener(uid) except", "# Realizamos la operacion Borrado de Grupos mediante la librería try: grupo =", "ValueError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e: # Se refiere", "log.warning(e) return exception.HTTPConflict(e) # La siguiente parece ser LA FORMA de responder en", "DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Borrado de Grupos", "= 201 peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\" % str(cn_grupo)), ) ) return {'mensaje': cn_grupo}", "{'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def grupos_borrado(peticion): # Validando datos recibidos try: v", "= Grupo() contenido = grupo.borrar(cn_grupo) except ConflictoException as e: # En este caso,", "hayan enviado datos json correctamente formateados log.warning(e) return exception.HTTPBadRequest(e) except DatosException as e:", "log.warning(e) return exception.HTTPBadRequest(e) except ValueError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as", "no existe log.warning(e) return exception.HTTPNotFound(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) except", "grupos_creacion(peticion): # Validando datos recibidos try: v = EsquemaGrupo('cn') print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus'])", "= grupo.crear(cn_grupo, contenido) except ConflictoException as e: # Si el grupo ya existe,", "('Location', \"grupos/%s\" % str(cn_grupo)), ) ) return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar') def", "contenido = grupo.borrar(cn_grupo) except ConflictoException as e: # En este caso, conflicto viene", "try: grupo = Grupo() contenido = grupo.borrar(cn_grupo) except ConflictoException as e: # En", "return exception.HTTPBadRequest(e) # Realizamos la operacion Borrado de Grupos mediante la librería try:", "EsquemaGrupo('cn') print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus']) except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except", "return contenido @view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle (peticion): try:", "# Si el grupo ya existe, devolvemos un 409 Conflict log.warning(e) return exception.HTTPConflict(e)", "un 409 Conflict log.warning(e) return exception.HTTPConflict(e) # La siguiente parece ser LA FORMA", "= contenido['cn'] contenido = grupo.crear(cn_grupo, contenido) except ConflictoException as e: # Si el", "Realizamos la operacion Borrado de Grupos mediante la librería try: grupo = Grupo()", "as e: return exception.HTTPNotFound() except Exception as e: log.error(e) return exception.HTTPInternalServerError() return {'mensaje':", "en este caso # TODO: Sin embargo, mi response en este caso esta", "que no se hayan enviado datos json correctamente formateados log.warning(e) return exception.HTTPBadRequest(e) except", "try: v = EsquemaGrupo() cn_grupo = peticion.matchdict['grupo'] except KeyError as e: log.warning(e) return", "parece ser LA FORMA de responder en este caso # TODO: Sin embargo,", "try: v = EsquemaGrupo('cn') print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus']) except KeyError as e: log.warning(e)", "..juliette.modelGroup import Grupo from ..juliette.excepciones import DatosException, ConflictoException from ..schemas.grupos import EsquemaGrupo import", "contenido) except ConflictoException as e: # Si el grupo ya existe, devolvemos un", "grupo ya existe, devolvemos un 409 Conflict log.warning(e) return exception.HTTPConflict(e) # La siguiente", "EsquemaGrupo import logging log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def grupos_creacion(peticion): # Validando", "# Se refiere a que no se hayan enviado datos json correctamente formateados", "as e: # Si el grupo ya existe, devolvemos un 409 Conflict log.warning(e)", "exception.HTTPConflict(e) # La siguiente parece ser LA FORMA de responder en este caso", "renderer='json', permission='creacion') def grupos_creacion(peticion): # Validando datos recibidos try: v = EsquemaGrupo('cn') print(peticion.json_body)", "except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) except Exception as e: log.error(e) return", "# Validando datos recibidos try: v = EsquemaGrupo('cn') print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus']) except", "Si el grupo ya existe, devolvemos un 409 Conflict log.warning(e) return exception.HTTPConflict(e) #", "try: uid = peticion.matchdict['grupo'] except KeyError as e: return exception.HTTPBadRequest() # Realizamos la", "log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Borrado de Grupos mediante la librería", "renderer='json', permission='borrado') def grupos_borrado(peticion): # Validando datos recibidos try: v = EsquemaGrupo() cn_grupo", "except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Borrado de", "vacío peticion.response.status_code = 201 peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\" % str(cn_grupo)), ) ) return", "..schemas.grupos import EsquemaGrupo import logging log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def grupos_creacion(peticion):", "grupo = Grupo() contenido = grupo.borrar(cn_grupo) except ConflictoException as e: # En este", "return exception.HTTPBadRequest(e) except ValueError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e:", "LA FORMA de responder en este caso # TODO: Sin embargo, mi response", "..juliette.excepciones import DatosException, ConflictoException from ..schemas.grupos import EsquemaGrupo import logging log = logging.getLogger(__name__)", "datos recibidos try: v = EsquemaGrupo('cn') print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus']) except KeyError as", "se llama con un Request creado vacío peticion.response.status_code = 201 peticion.response.headerlist.extend( ( ('Location',", "# Realizamos la operacion Creacion de Usuarios mediante la librería try: grupo =", "que no existe log.warning(e) return exception.HTTPNotFound(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e)", "Grupo() contenido = grupo.borrar(cn_grupo) except ConflictoException as e: # En este caso, conflicto", "devolvemos un 409 Conflict log.warning(e) return exception.HTTPConflict(e) # La siguiente parece ser LA", "return exception.HTTPBadRequest(e) # Realizamos la operacion Creacion de Usuarios mediante la librería try:", "return exception.HTTPConflict(e) # La siguiente parece ser LA FORMA de responder en este", "response en este caso esta vació cuando se llama con un Request creado", "e: # En este caso, conflicto viene a decir que no existe log.warning(e)", "ConflictoException as e: # En este caso, conflicto viene a decir que no", "renderer='json') def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle (peticion): try: uid = peticion.matchdict['grupo']", "la librería try: grupo = Grupo() contenido = grupo.obtener(uid) except DatosException as e:", "permission='borrado') def grupos_borrado(peticion): # Validando datos recibidos try: v = EsquemaGrupo() cn_grupo =", "renderer='json', permission='listar') def grupos_listado(peticion): try: grupo = Grupo() contenido = grupo.obtener() except Exception", "@view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle (peticion): try: uid = peticion.matchdict['grupo'] except KeyError as e:", "exception.HTTPInternalServerError() return {'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def grupos_borrado(peticion): # Validando datos recibidos", "exception.HTTPNotFound(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) except Exception as e: log.error(e)", "pyramid.view import view_config from pyramid import httpexceptions as exception from ..juliette.modelGroup import Grupo", "enviado datos json correctamente formateados log.warning(e) return exception.HTTPBadRequest(e) except DatosException as e: log.warning(e)", "= v.validacion(peticion.json_body['corpus']) except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except ValueError as e:", "except TypeError as e: # Se refiere a que no se hayan enviado", "except ValueError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e: # Se", "refiere a que no se hayan enviado datos json correctamente formateados log.warning(e) return", "v = EsquemaGrupo('cn') print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus']) except KeyError as e: log.warning(e) return", "= EsquemaGrupo('cn') print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus']) except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e)", "json correctamente formateados log.warning(e) return exception.HTTPBadRequest(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e)", "def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle (peticion): try: uid = peticion.matchdict['grupo'] except", "try: grupo = Grupo() cn_grupo = contenido['cn'] contenido = grupo.crear(cn_grupo, contenido) except ConflictoException", "grupo.obtener(uid) except DatosException as e: return exception.HTTPNotFound() except Exception as e: log.error(e) return", "renderer='json') def grupos_detalle (peticion): try: uid = peticion.matchdict['grupo'] except KeyError as e: return", "librería try: grupo = Grupo() contenido = grupo.borrar(cn_grupo) except ConflictoException as e: #", "grupo.crear(cn_grupo, contenido) except ConflictoException as e: # Si el grupo ya existe, devolvemos", "@view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def grupos_creacion(peticion): # Validando datos recibidos try: v = EsquemaGrupo('cn')", "Request creado vacío peticion.response.status_code = 201 peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\" % str(cn_grupo)), )", "e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e: # Se refiere a que", "contenido = grupo.obtener() except Exception as e: log.error(e) return exception.HTTPInternalServerError() print(contenido) return contenido", "pass @view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle (peticion): try: uid = peticion.matchdict['grupo'] except KeyError as", "log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Creacion de Usuarios mediante la librería", "La siguiente parece ser LA FORMA de responder en este caso # TODO:", "utf-8 from pyramid.view import view_config from pyramid import httpexceptions as exception from ..juliette.modelGroup", "decir que no existe log.warning(e) return exception.HTTPNotFound(e) except DatosException as e: log.warning(e) return", "la librería try: grupo = Grupo() contenido = grupo.borrar(cn_grupo) except ConflictoException as e:", "@view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle (peticion): try: uid =", "as e: log.warning(e) return exception.HTTPBadRequest(e) except Exception as e: log.error(e) return exception.HTTPInternalServerError(e) return", "operacion Borrado de Grupos mediante la librería try: grupo = Grupo() contenido =", "KeyError as e: return exception.HTTPBadRequest() # Realizamos la operación Detalle de Usuarios mediante", "= logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def grupos_creacion(peticion): # Validando datos recibidos try: v", "la operacion Borrado de Grupos mediante la librería try: grupo = Grupo() contenido", "Validando datos recibidos try: v = EsquemaGrupo('cn') print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus']) except KeyError", "return exception.HTTPBadRequest(e) except TypeError as e: # Se refiere a que no se", "de Grupos mediante la librería try: grupo = Grupo() contenido = grupo.borrar(cn_grupo) except", "return exception.HTTPBadRequest(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion", "try: grupo = Grupo() contenido = grupo.obtener() except Exception as e: log.error(e) return", "e: return exception.HTTPBadRequest() # Realizamos la operación Detalle de Usuarios mediante la librería", "Grupos mediante la librería try: grupo = Grupo() contenido = grupo.borrar(cn_grupo) except ConflictoException", "# Validando datos recibidos try: v = EsquemaGrupo() cn_grupo = peticion.matchdict['grupo'] except KeyError", "exception.HTTPBadRequest(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Creacion", "creado vacío peticion.response.status_code = 201 peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\" % str(cn_grupo)), ) )", "log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e: # Se refiere a que no", "print(contenido) return contenido @view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle (peticion):", "En este caso, conflicto viene a decir que no existe log.warning(e) return exception.HTTPNotFound(e)", "except Exception as e: log.error(e) return exception.HTTPInternalServerError() print(contenido) return contenido @view_config(route_name='grupos_listado_options', renderer='json') def", "de Usuarios mediante la librería try: grupo = Grupo() cn_grupo = contenido['cn'] contenido", "contenido = grupo.crear(cn_grupo, contenido) except ConflictoException as e: # Si el grupo ya", "( ('Location', \"grupos/%s\" % str(cn_grupo)), ) ) return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar')", "v = EsquemaGrupo() cn_grupo = peticion.matchdict['grupo'] except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e)", "formateados log.warning(e) return exception.HTTPBadRequest(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos", "existe log.warning(e) return exception.HTTPNotFound(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) except Exception", "Creacion de Usuarios mediante la librería try: grupo = Grupo() cn_grupo = contenido['cn']", "= peticion.matchdict['grupo'] except KeyError as e: return exception.HTTPBadRequest() # Realizamos la operación Detalle", "uid = peticion.matchdict['grupo'] except KeyError as e: return exception.HTTPBadRequest() # Realizamos la operación", "a decir que no existe log.warning(e) return exception.HTTPNotFound(e) except DatosException as e: log.warning(e)", "caso # TODO: Sin embargo, mi response en este caso esta vació cuando", "exception.HTTPBadRequest() # Realizamos la operación Detalle de Usuarios mediante la librería try: grupo", "return {'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def grupos_borrado(peticion): # Validando datos recibidos try:", "grupo = Grupo() contenido = grupo.obtener() except Exception as e: log.error(e) return exception.HTTPInternalServerError()", "Grupo from ..juliette.excepciones import DatosException, ConflictoException from ..schemas.grupos import EsquemaGrupo import logging log", "cn_grupo = contenido['cn'] contenido = grupo.crear(cn_grupo, contenido) except ConflictoException as e: # Si", "TODO: Sin embargo, mi response en este caso esta vació cuando se llama", "grupo = Grupo() contenido = grupo.obtener(uid) except DatosException as e: return exception.HTTPNotFound() except", "recibidos try: v = EsquemaGrupo('cn') print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus']) except KeyError as e:", "try: grupo = Grupo() contenido = grupo.obtener(uid) except DatosException as e: return exception.HTTPNotFound()", "pyramid import httpexceptions as exception from ..juliette.modelGroup import Grupo from ..juliette.excepciones import DatosException,", "as e: log.error(e) return exception.HTTPInternalServerError() return {'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def grupos_borrado(peticion):", "log.warning(e) return exception.HTTPNotFound(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) except Exception as", ") return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar') def grupos_listado(peticion): try: grupo = Grupo()", "except DatosException as e: return exception.HTTPNotFound() except Exception as e: log.error(e) return exception.HTTPInternalServerError()", "datos recibidos try: v = EsquemaGrupo() cn_grupo = peticion.matchdict['grupo'] except KeyError as e:", "DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) except Exception as e: log.error(e) return exception.HTTPInternalServerError(e)", "exception from ..juliette.modelGroup import Grupo from ..juliette.excepciones import DatosException, ConflictoException from ..schemas.grupos import", "str(cn_grupo)), ) ) return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar') def grupos_listado(peticion): try: grupo", "= Grupo() contenido = grupo.obtener() except Exception as e: log.error(e) return exception.HTTPInternalServerError() print(contenido)", "409 Conflict log.warning(e) return exception.HTTPConflict(e) # La siguiente parece ser LA FORMA de", "except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e: # Se", "Validando datos recibidos try: v = EsquemaGrupo() cn_grupo = peticion.matchdict['grupo'] except KeyError as", "Detalle de Usuarios mediante la librería try: grupo = Grupo() contenido = grupo.obtener(uid)", ") ) return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar') def grupos_listado(peticion): try: grupo =", "% str(cn_grupo)), ) ) return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar') def grupos_listado(peticion): try:", "coding: utf-8 from pyramid.view import view_config from pyramid import httpexceptions as exception from", "import logging log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def grupos_creacion(peticion): # Validando datos", "Realizamos la operación Detalle de Usuarios mediante la librería try: grupo = Grupo()", "Exception as e: log.error(e) return exception.HTTPInternalServerError() return {'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def", "Grupo() contenido = grupo.obtener(uid) except DatosException as e: return exception.HTTPNotFound() except Exception as", "as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Creacion de Usuarios mediante", "DatosException, ConflictoException from ..schemas.grupos import EsquemaGrupo import logging log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json',", "librería try: grupo = Grupo() contenido = grupo.obtener(uid) except DatosException as e: return", "return exception.HTTPInternalServerError() return {'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def grupos_borrado(peticion): # Validando datos", "Usuarios mediante la librería try: grupo = Grupo() contenido = grupo.obtener(uid) except DatosException", "cn_grupo = peticion.matchdict['grupo'] except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as", "{'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar') def grupos_listado(peticion): try: grupo = Grupo() contenido =", "embargo, mi response en este caso esta vació cuando se llama con un", "return exception.HTTPInternalServerError() print(contenido) return contenido @view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json') def", "return exception.HTTPNotFound(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) except Exception as e:", "as e: log.error(e) return exception.HTTPInternalServerError() print(contenido) return contenido @view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion): pass", "except Exception as e: log.error(e) return exception.HTTPInternalServerError() return {'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado')", "as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Borrado de Grupos mediante", "= grupo.borrar(cn_grupo) except ConflictoException as e: # En este caso, conflicto viene a", "e: log.error(e) return exception.HTTPInternalServerError() return {'mensaje': contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def grupos_borrado(peticion): #", "llama con un Request creado vacío peticion.response.status_code = 201 peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\"", "@view_config(route_name='grupos_listado', renderer='json', permission='listar') def grupos_listado(peticion): try: grupo = Grupo() contenido = grupo.obtener() except", "(peticion): try: uid = peticion.matchdict['grupo'] except KeyError as e: return exception.HTTPBadRequest() # Realizamos", "recibidos try: v = EsquemaGrupo() cn_grupo = peticion.matchdict['grupo'] except KeyError as e: log.warning(e)", "as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e: # Se refiere a", "librería try: grupo = Grupo() cn_grupo = contenido['cn'] contenido = grupo.crear(cn_grupo, contenido) except", "Se refiere a que no se hayan enviado datos json correctamente formateados log.warning(e)", "exception.HTTPBadRequest(e) except ValueError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e: #", "FORMA de responder en este caso # TODO: Sin embargo, mi response en", "mediante la librería try: grupo = Grupo() contenido = grupo.obtener(uid) except DatosException as", "201 peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\" % str(cn_grupo)), ) ) return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado',", "permission='creacion') def grupos_creacion(peticion): # Validando datos recibidos try: v = EsquemaGrupo('cn') print(peticion.json_body) contenido", "la operacion Creacion de Usuarios mediante la librería try: grupo = Grupo() cn_grupo", "as e: # Se refiere a que no se hayan enviado datos json", "except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except ValueError as e: log.warning(e) return", "from ..schemas.grupos import EsquemaGrupo import logging log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def", "el grupo ya existe, devolvemos un 409 Conflict log.warning(e) return exception.HTTPConflict(e) # La", "contenido = v.validacion(peticion.json_body['corpus']) except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except ValueError as", "log.error(e) return exception.HTTPInternalServerError() print(contenido) return contenido @view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json')", "en este caso esta vació cuando se llama con un Request creado vacío", "from pyramid.view import view_config from pyramid import httpexceptions as exception from ..juliette.modelGroup import", "e: log.warning(e) return exception.HTTPBadRequest(e) except Exception as e: log.error(e) return exception.HTTPInternalServerError(e) return {'mensaje':", "peticion.matchdict['grupo'] except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e: #", "peticion.response.status_code = 201 peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\" % str(cn_grupo)), ) ) return {'mensaje':", "este caso, conflicto viene a decir que no existe log.warning(e) return exception.HTTPNotFound(e) except", "except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Creacion de", "def grupos_detalle (peticion): try: uid = peticion.matchdict['grupo'] except KeyError as e: return exception.HTTPBadRequest()", "e: return exception.HTTPNotFound() except Exception as e: log.error(e) return exception.HTTPInternalServerError() return {'mensaje': contenido}", "= Grupo() cn_grupo = contenido['cn'] contenido = grupo.crear(cn_grupo, contenido) except ConflictoException as e:", "cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar') def grupos_listado(peticion): try: grupo = Grupo() contenido = grupo.obtener()", "DatosException as e: return exception.HTTPNotFound() except Exception as e: log.error(e) return exception.HTTPInternalServerError() return", "view_config from pyramid import httpexceptions as exception from ..juliette.modelGroup import Grupo from ..juliette.excepciones", "no se hayan enviado datos json correctamente formateados log.warning(e) return exception.HTTPBadRequest(e) except DatosException", "grupo.borrar(cn_grupo) except ConflictoException as e: # En este caso, conflicto viene a decir", "e: log.warning(e) return exception.HTTPBadRequest(e) except ValueError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError", "operación Detalle de Usuarios mediante la librería try: grupo = Grupo() contenido =", "Grupo() cn_grupo = contenido['cn'] contenido = grupo.crear(cn_grupo, contenido) except ConflictoException as e: #", "from ..juliette.excepciones import DatosException, ConflictoException from ..schemas.grupos import EsquemaGrupo import logging log =", "grupo.obtener() except Exception as e: log.error(e) return exception.HTTPInternalServerError() print(contenido) return contenido @view_config(route_name='grupos_listado_options', renderer='json')", "ConflictoException as e: # Si el grupo ya existe, devolvemos un 409 Conflict", "contenido @view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle (peticion): try: uid", "= grupo.obtener(uid) except DatosException as e: return exception.HTTPNotFound() except Exception as e: log.error(e)", "grupos_borrado(peticion): # Validando datos recibidos try: v = EsquemaGrupo() cn_grupo = peticion.matchdict['grupo'] except", "Grupo() contenido = grupo.obtener() except Exception as e: log.error(e) return exception.HTTPInternalServerError() print(contenido) return", "Exception as e: log.error(e) return exception.HTTPInternalServerError() print(contenido) return contenido @view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion):", "KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except ValueError as e: log.warning(e) return exception.HTTPBadRequest(e)", "este caso esta vació cuando se llama con un Request creado vacío peticion.response.status_code", "responder en este caso # TODO: Sin embargo, mi response en este caso", "grupos_detalle (peticion): try: uid = peticion.matchdict['grupo'] except KeyError as e: return exception.HTTPBadRequest() #", "return exception.HTTPNotFound() except Exception as e: log.error(e) return exception.HTTPInternalServerError() return {'mensaje': contenido} @view_config(route_name='grupos_borrado',", "as e: log.warning(e) return exception.HTTPBadRequest(e) except ValueError as e: log.warning(e) return exception.HTTPBadRequest(e) except", "permission='listar') def grupos_listado(peticion): try: grupo = Grupo() contenido = grupo.obtener() except Exception as", "log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def grupos_creacion(peticion): # Validando datos recibidos try:", "return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar') def grupos_listado(peticion): try: grupo = Grupo() contenido", "mediante la librería try: grupo = Grupo() contenido = grupo.borrar(cn_grupo) except ConflictoException as", "@view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def grupos_borrado(peticion): # Validando datos recibidos try: v = EsquemaGrupo()", "Usuarios mediante la librería try: grupo = Grupo() cn_grupo = contenido['cn'] contenido =", "exception.HTTPBadRequest(e) # Realizamos la operacion Borrado de Grupos mediante la librería try: grupo", "= grupo.obtener() except Exception as e: log.error(e) return exception.HTTPInternalServerError() print(contenido) return contenido @view_config(route_name='grupos_listado_options',", "correctamente formateados log.warning(e) return exception.HTTPBadRequest(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) #", "e: log.error(e) return exception.HTTPInternalServerError() print(contenido) return contenido @view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle',", "datos json correctamente formateados log.warning(e) return exception.HTTPBadRequest(e) except DatosException as e: log.warning(e) return", "def grupos_listado(peticion): try: grupo = Grupo() contenido = grupo.obtener() except Exception as e:", "# La siguiente parece ser LA FORMA de responder en este caso #", "contenido} @view_config(route_name='grupos_borrado', renderer='json', permission='borrado') def grupos_borrado(peticion): # Validando datos recibidos try: v =", "except ConflictoException as e: # Si el grupo ya existe, devolvemos un 409", "KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError as e: # Se refiere", "siguiente parece ser LA FORMA de responder en este caso # TODO: Sin", "cuando se llama con un Request creado vacío peticion.response.status_code = 201 peticion.response.headerlist.extend( (", "log.warning(e) return exception.HTTPBadRequest(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la", "v.validacion(peticion.json_body['corpus']) except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except ValueError as e: log.warning(e)", "except KeyError as e: return exception.HTTPBadRequest() # Realizamos la operación Detalle de Usuarios", "Sin embargo, mi response en este caso esta vació cuando se llama con", "grupo = Grupo() cn_grupo = contenido['cn'] contenido = grupo.crear(cn_grupo, contenido) except ConflictoException as", "import view_config from pyramid import httpexceptions as exception from ..juliette.modelGroup import Grupo from", "as e: # En este caso, conflicto viene a decir que no existe", "grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle (peticion): try: uid = peticion.matchdict['grupo'] except KeyError", "import EsquemaGrupo import logging log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def grupos_creacion(peticion): #", "esta vació cuando se llama con un Request creado vacío peticion.response.status_code = 201", "caso, conflicto viene a decir que no existe log.warning(e) return exception.HTTPNotFound(e) except DatosException", "from pyramid import httpexceptions as exception from ..juliette.modelGroup import Grupo from ..juliette.excepciones import", "peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\" % str(cn_grupo)), ) ) return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json',", "import Grupo from ..juliette.excepciones import DatosException, ConflictoException from ..schemas.grupos import EsquemaGrupo import logging", "exception.HTTPBadRequest(e) except DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Borrado", "de responder en este caso # TODO: Sin embargo, mi response en este", "un Request creado vacío peticion.response.status_code = 201 peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\" % str(cn_grupo)),", "logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\", renderer='json', permission='creacion') def grupos_creacion(peticion): # Validando datos recibidos try: v =", "<filename>justine/views/grupos.py # coding: utf-8 from pyramid.view import view_config from pyramid import httpexceptions as", "vació cuando se llama con un Request creado vacío peticion.response.status_code = 201 peticion.response.headerlist.extend(", "ya existe, devolvemos un 409 Conflict log.warning(e) return exception.HTTPConflict(e) # La siguiente parece", "DatosException as e: log.warning(e) return exception.HTTPBadRequest(e) # Realizamos la operacion Creacion de Usuarios", "e: # Si el grupo ya existe, devolvemos un 409 Conflict log.warning(e) return", "viene a decir que no existe log.warning(e) return exception.HTTPNotFound(e) except DatosException as e:", "ser LA FORMA de responder en este caso # TODO: Sin embargo, mi", "except ConflictoException as e: # En este caso, conflicto viene a decir que", "Conflict log.warning(e) return exception.HTTPConflict(e) # La siguiente parece ser LA FORMA de responder", "existe, devolvemos un 409 Conflict log.warning(e) return exception.HTTPConflict(e) # La siguiente parece ser", "= EsquemaGrupo() cn_grupo = peticion.matchdict['grupo'] except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except", "peticion.matchdict['grupo'] except KeyError as e: return exception.HTTPBadRequest() # Realizamos la operación Detalle de", "\"grupos/%s\" % str(cn_grupo)), ) ) return {'mensaje': cn_grupo} @view_config(route_name='grupos_listado', renderer='json', permission='listar') def grupos_listado(peticion):", "contenido['cn'] contenido = grupo.crear(cn_grupo, contenido) except ConflictoException as e: # Si el grupo", "as exception from ..juliette.modelGroup import Grupo from ..juliette.excepciones import DatosException, ConflictoException from ..schemas.grupos", "Realizamos la operacion Creacion de Usuarios mediante la librería try: grupo = Grupo()", "con un Request creado vacío peticion.response.status_code = 201 peticion.response.headerlist.extend( ( ('Location', \"grupos/%s\" %", "import httpexceptions as exception from ..juliette.modelGroup import Grupo from ..juliette.excepciones import DatosException, ConflictoException", "Borrado de Grupos mediante la librería try: grupo = Grupo() contenido = grupo.borrar(cn_grupo)", "print(peticion.json_body) contenido = v.validacion(peticion.json_body['corpus']) except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except ValueError", "mediante la librería try: grupo = Grupo() cn_grupo = contenido['cn'] contenido = grupo.crear(cn_grupo,", "# Realizamos la operación Detalle de Usuarios mediante la librería try: grupo =", "TypeError as e: # Se refiere a que no se hayan enviado datos", "def grupos_borrado(peticion): # Validando datos recibidos try: v = EsquemaGrupo() cn_grupo = peticion.matchdict['grupo']", "return exception.HTTPBadRequest() # Realizamos la operación Detalle de Usuarios mediante la librería try:", "import DatosException, ConflictoException from ..schemas.grupos import EsquemaGrupo import logging log = logging.getLogger(__name__) @view_config(route_name=\"grupos_creacion\",", "EsquemaGrupo() cn_grupo = peticion.matchdict['grupo'] except KeyError as e: log.warning(e) return exception.HTTPBadRequest(e) except TypeError", "# coding: utf-8 from pyramid.view import view_config from pyramid import httpexceptions as exception", "la librería try: grupo = Grupo() cn_grupo = contenido['cn'] contenido = grupo.crear(cn_grupo, contenido)", "log.warning(e) return exception.HTTPBadRequest(e) except Exception as e: log.error(e) return exception.HTTPInternalServerError(e) return {'mensaje': contenido}", "caso esta vació cuando se llama con un Request creado vacío peticion.response.status_code =", "la operación Detalle de Usuarios mediante la librería try: grupo = Grupo() contenido", "e: # Se refiere a que no se hayan enviado datos json correctamente", "= Grupo() contenido = grupo.obtener(uid) except DatosException as e: return exception.HTTPNotFound() except Exception", "este caso # TODO: Sin embargo, mi response en este caso esta vació", "contenido = grupo.obtener(uid) except DatosException as e: return exception.HTTPNotFound() except Exception as e:", "from ..juliette.modelGroup import Grupo from ..juliette.excepciones import DatosException, ConflictoException from ..schemas.grupos import EsquemaGrupo", "mi response en este caso esta vació cuando se llama con un Request", "# TODO: Sin embargo, mi response en este caso esta vació cuando se", "exception.HTTPBadRequest(e) # Realizamos la operacion Creacion de Usuarios mediante la librería try: grupo", "def grupos_creacion(peticion): # Validando datos recibidos try: v = EsquemaGrupo('cn') print(peticion.json_body) contenido =", "as e: return exception.HTTPBadRequest() # Realizamos la operación Detalle de Usuarios mediante la", "exception.HTTPInternalServerError() print(contenido) return contenido @view_config(route_name='grupos_listado_options', renderer='json') def grupos_listado_options(peticion): pass @view_config(route_name='grupos_detalle', renderer='json') def grupos_detalle", "# En este caso, conflicto viene a decir que no existe log.warning(e) return" ]
[ "argparse.ArgumentTypeError( \"%r not a floating-point literal\" % (x,)) if x < 0.0 or", "is responsible of detecting similar pages from content. The higher the ratio, the", "a huge pdf to get a readable pdf\") parser.add_argument('input_file', type=parse_file, help='pdf file to", "raise argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\" % (x,)) return x def diff(content1:", "...', i / nb_pages) current_page = pad_input.getPage(i) current_content = current_page.extractText() prev_content = prev_page.extractText()", "fail_with_message( 'Please install required dependencies before using this package.\\n\\t> pip3 install -r requirements.txt", "content_ratio and (has_deleted_item(diff_content) or len(prev_content) > len(current_content))) if has_content(prev_content) and (title_has_changed or content_has_changed):", "pages from title. The higher the ratio, the more sensitive the sanitizer will", "return x def diff(content1: str, content2: str): return difflib.SequenceMatcher(None, content1, content2) def has_deleted_item(diff):", "name') parser.add_argument('--title-ratio', type=parse_ratio, help='float between [0, 1] which is responsible of detecting similar", "0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float between [0, 1] which is responsible of", "float, content_ratio: float): prev_page = pad_input.getPage(0) nb_pages = pad_input.getNumPages() for i in range(1,", "1.0: raise argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\" % (x,)) return x def", "diff_content = diff(get_content(prev_content), get_content(current_content)) title_has_changed = diff_title.ratio() < title_ratio content_has_changed = (diff_content.ratio() <", "def parse_ratio(x): try: x = float(x) except ValueError: raise argparse.ArgumentTypeError( \"%r not a", "pip3 install -r requirements.txt --user') def parse_file(path: str): if not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid", "import argparse from .utils import fail_with_message, progress_with_message, success_with_message try: import PyPDF2 except ImportError:", "diff_title.ratio() < title_ratio content_has_changed = (diff_content.ratio() < content_ratio and (has_deleted_item(diff_content) or len(prev_content) >", "parser.add_argument('output_file', type=str, help='output sanitized pdf file name') parser.add_argument('--title-ratio', type=parse_ratio, help='float between [0, 1]", "The higher the ratio, the more sensitive the sanitizer will be to any", "content.replace(get_title(content), '').strip() def has_content(content): return len(get_content(content)) != 0 def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter,", "str, content2: str): return difflib.SequenceMatcher(None, content1, content2) def has_deleted_item(diff): for operation, *_ in", "has_content(content): return len(get_content(content)) != 0 def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio:", "ImportError: fail_with_message( 'Please install required dependencies before using this package.\\n\\t> pip3 install -r", "if x < 0.0 or x > 1.0: raise argparse.ArgumentTypeError(\"%r not in range", "has_deleted_item(diff): for operation, *_ in diff.get_opcodes(): if operation == 'delete' or operation ==", "type=parse_file, help='pdf file to be sanitized') parser.add_argument('output_file', type=str, help='output sanitized pdf file name')", "similar pages from title. The higher the ratio, the more sensitive the sanitizer", "floating-point literal\" % (x,)) if x < 0.0 or x > 1.0: raise", "return difflib.SequenceMatcher(None, content1, content2) def has_deleted_item(diff): for operation, *_ in diff.get_opcodes(): if operation", "= diff(get_title(prev_content), get_title(current_content)) diff_content = diff(get_content(prev_content), get_content(current_content)) title_has_changed = diff_title.ratio() < title_ratio content_has_changed", "get_content(content): return content.replace(get_title(content), '').strip() def has_content(content): return len(get_content(content)) != 0 def sanitize(pad_input: PyPDF2.PdfFileReader,", "page from a huge pdf to get a readable pdf\") parser.add_argument('input_file', type=parse_file, help='pdf", "huge pdf to get a readable pdf\") parser.add_argument('input_file', type=parse_file, help='pdf file to be", "help='pdf file to be sanitized') parser.add_argument('output_file', type=str, help='output sanitized pdf file name') parser.add_argument('--title-ratio',", "str): return difflib.SequenceMatcher(None, content1, content2) def has_deleted_item(diff): for operation, *_ in diff.get_opcodes(): if", "will be to any changes. (default: 0.8)', default=.8, dest='content_ratio') def main(): args =", "True return False def get_title(content): return content.split('\\n')[0] def get_content(content): return content.replace(get_title(content), '').strip() def", "(x,)) return x def diff(content1: str, content2: str): return difflib.SequenceMatcher(None, content1, content2) def", "diff(get_content(prev_content), get_content(current_content)) title_has_changed = diff_title.ratio() < title_ratio content_has_changed = (diff_content.ratio() < content_ratio and", "pathlib import argparse from .utils import fail_with_message, progress_with_message, success_with_message try: import PyPDF2 except", "--user') def parse_file(path: str): if not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file path') return path", "install -r requirements.txt --user') def parse_file(path: str): if not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file", "a floating-point literal\" % (x,)) if x < 0.0 or x > 1.0:", "< content_ratio and (has_deleted_item(diff_content) or len(prev_content) > len(current_content))) if has_content(prev_content) and (title_has_changed or", "description=\"Quickly remove useless page from a huge pdf to get a readable pdf\")", "def has_deleted_item(diff): for operation, *_ in diff.get_opcodes(): if operation == 'delete' or operation", "len(get_content(content)) != 0 def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float): prev_page", "(has_deleted_item(diff_content) or len(prev_content) > len(current_content))) if has_content(prev_content) and (title_has_changed or content_has_changed): pdf_output.addPage(prev_page) prev_page", "from a huge pdf to get a readable pdf\") parser.add_argument('input_file', type=parse_file, help='pdf file", "type=parse_ratio, help='float between [0, 1] which is responsible of detecting similar pages from", "argparse.ArgumentTypeError('invalid file path') return path def parse_ratio(x): try: x = float(x) except ValueError:", "path def parse_ratio(x): try: x = float(x) except ValueError: raise argparse.ArgumentTypeError( \"%r not", "or x > 1.0: raise argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\" % (x,))", "return len(get_content(content)) != 0 def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float):", "float): prev_page = pad_input.getPage(0) nb_pages = pad_input.getNumPages() for i in range(1, nb_pages): progress_with_message('Sanitizing", "diff_title = diff(get_title(prev_content), get_title(current_content)) diff_content = diff(get_content(prev_content), get_content(current_content)) title_has_changed = diff_title.ratio() < title_ratio", "the more sensitive the sanitizer will be to any changes. (default: 0.5)', default=.5,", "changes. (default: 0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float between [0, 1] which is", "< title_ratio content_has_changed = (diff_content.ratio() < content_ratio and (has_deleted_item(diff_content) or len(prev_content) > len(current_content)))", "dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float between [0, 1] which is responsible of detecting similar", "type=str, help='output sanitized pdf file name') parser.add_argument('--title-ratio', type=parse_ratio, help='float between [0, 1] which", "or len(prev_content) > len(current_content))) if has_content(prev_content) and (title_has_changed or content_has_changed): pdf_output.addPage(prev_page) prev_page =", ".utils import fail_with_message, progress_with_message, success_with_message try: import PyPDF2 except ImportError: fail_with_message( 'Please install", "'delete' or operation == 'replace': return True return False def get_title(content): return content.split('\\n')[0]", "return content.split('\\n')[0] def get_content(content): return content.replace(get_title(content), '').strip() def has_content(content): return len(get_content(content)) != 0", "pdf ...', i / nb_pages) current_page = pad_input.getPage(i) current_content = current_page.extractText() prev_content =", "more sensitive the sanitizer will be to any changes. (default: 0.8)', default=.8, dest='content_ratio')", "not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file path') return path def parse_ratio(x): try: x =", "def main(): args = parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output,", "as f: pdf_output.write(f) success_with_message(f'Your file has been sanitized at {args.output_file}') if __name__ ==", "help='float between [0, 1] which is responsible of detecting similar pages from title.", "PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float): prev_page = pad_input.getPage(0) nb_pages = pad_input.getNumPages() for i", "be to any changes. (default: 0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float between [0,", "return False def get_title(content): return content.split('\\n')[0] def get_content(content): return content.replace(get_title(content), '').strip() def has_content(content):", "def has_content(content): return len(get_content(content)) != 0 def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float,", "= PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio) with open(args.output_file, 'wb') as f: pdf_output.write(f) success_with_message(f'Your", "def parse_file(path: str): if not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file path') return path def", "for operation, *_ in diff.get_opcodes(): if operation == 'delete' or operation == 'replace':", "def diff(content1: str, content2: str): return difflib.SequenceMatcher(None, content1, content2) def has_deleted_item(diff): for operation,", "pdf file name') parser.add_argument('--title-ratio', type=parse_ratio, help='float between [0, 1] which is responsible of", "nb_pages = pad_input.getNumPages() for i in range(1, nb_pages): progress_with_message('Sanitizing pdf ...', i /", "0 def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float): prev_page = pad_input.getPage(0)", "between [0, 1] which is responsible of detecting similar pages from title. The", "pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float): prev_page = pad_input.getPage(0) nb_pages = pad_input.getNumPages() for", "prev_page = pad_input.getPage(0) nb_pages = pad_input.getNumPages() for i in range(1, nb_pages): progress_with_message('Sanitizing pdf", "remove useless page from a huge pdf to get a readable pdf\") parser.add_argument('input_file',", "pages from content. The higher the ratio, the more sensitive the sanitizer will", "dependencies before using this package.\\n\\t> pip3 install -r requirements.txt --user') def parse_file(path: str):", "pdf to get a readable pdf\") parser.add_argument('input_file', type=parse_file, help='pdf file to be sanitized')", "detecting similar pages from content. The higher the ratio, the more sensitive the", "current_content = current_page.extractText() prev_content = prev_page.extractText() diff_title = diff(get_title(prev_content), get_title(current_content)) diff_content = diff(get_content(prev_content),", "operation == 'replace': return True return False def get_title(content): return content.split('\\n')[0] def get_content(content):", "difflib import pathlib import argparse from .utils import fail_with_message, progress_with_message, success_with_message try: import", "open(args.output_file, 'wb') as f: pdf_output.write(f) success_with_message(f'Your file has been sanitized at {args.output_file}') if", "> 1.0: raise argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\" % (x,)) return x", "== 'delete' or operation == 'replace': return True return False def get_title(content): return", "prev_page = current_page pdf_output.addPage(prev_page) parser = argparse.ArgumentParser( description=\"Quickly remove useless page from a", "range(1, nb_pages): progress_with_message('Sanitizing pdf ...', i / nb_pages) current_page = pad_input.getPage(i) current_content =", "1] which is responsible of detecting similar pages from title. The higher the", "more sensitive the sanitizer will be to any changes. (default: 0.5)', default=.5, dest='title_ratio')", "args = parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio)", "in range(1, nb_pages): progress_with_message('Sanitizing pdf ...', i / nb_pages) current_page = pad_input.getPage(i) current_content", "PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float): prev_page = pad_input.getPage(0) nb_pages = pad_input.getNumPages()", "get a readable pdf\") parser.add_argument('input_file', type=parse_file, help='pdf file to be sanitized') parser.add_argument('output_file', type=str,", "title_ratio: float, content_ratio: float): prev_page = pad_input.getPage(0) nb_pages = pad_input.getNumPages() for i in", "= (diff_content.ratio() < content_ratio and (has_deleted_item(diff_content) or len(prev_content) > len(current_content))) if has_content(prev_content) and", "a readable pdf\") parser.add_argument('input_file', type=parse_file, help='pdf file to be sanitized') parser.add_argument('output_file', type=str, help='output", "before using this package.\\n\\t> pip3 install -r requirements.txt --user') def parse_file(path: str): if", "from content. The higher the ratio, the more sensitive the sanitizer will be", "content.split('\\n')[0] def get_content(content): return content.replace(get_title(content), '').strip() def has_content(content): return len(get_content(content)) != 0 def", "to be sanitized') parser.add_argument('output_file', type=str, help='output sanitized pdf file name') parser.add_argument('--title-ratio', type=parse_ratio, help='float", "pad_input.getPage(i) current_content = current_page.extractText() prev_content = prev_page.extractText() diff_title = diff(get_title(prev_content), get_title(current_content)) diff_content =", "sanitized') parser.add_argument('output_file', type=str, help='output sanitized pdf file name') parser.add_argument('--title-ratio', type=parse_ratio, help='float between [0,", "x < 0.0 or x > 1.0: raise argparse.ArgumentTypeError(\"%r not in range [0.0,", "or operation == 'replace': return True return False def get_title(content): return content.split('\\n')[0] def", "in diff.get_opcodes(): if operation == 'delete' or operation == 'replace': return True return", "import pathlib import argparse from .utils import fail_with_message, progress_with_message, success_with_message try: import PyPDF2", "(x,)) if x < 0.0 or x > 1.0: raise argparse.ArgumentTypeError(\"%r not in", "raise argparse.ArgumentTypeError('invalid file path') return path def parse_ratio(x): try: x = float(x) except", "the more sensitive the sanitizer will be to any changes. (default: 0.8)', default=.8,", "pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio) with open(args.output_file, 'wb')", "= diff(get_content(prev_content), get_content(current_content)) title_has_changed = diff_title.ratio() < title_ratio content_has_changed = (diff_content.ratio() < content_ratio", "pad_input.getNumPages() for i in range(1, nb_pages): progress_with_message('Sanitizing pdf ...', i / nb_pages) current_page", "f: pdf_output.write(f) success_with_message(f'Your file has been sanitized at {args.output_file}') if __name__ == '__main__':", "def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float): prev_page = pad_input.getPage(0) nb_pages", "is responsible of detecting similar pages from title. The higher the ratio, the", "title. The higher the ratio, the more sensitive the sanitizer will be to", "of detecting similar pages from content. The higher the ratio, the more sensitive", "any changes. (default: 0.8)', default=.8, dest='content_ratio') def main(): args = parser.parse_args() pdf_input =", "i in range(1, nb_pages): progress_with_message('Sanitizing pdf ...', i / nb_pages) current_page = pad_input.getPage(i)", "try: import PyPDF2 except ImportError: fail_with_message( 'Please install required dependencies before using this", "if not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file path') return path def parse_ratio(x): try: x", "get_content(current_content)) title_has_changed = diff_title.ratio() < title_ratio content_has_changed = (diff_content.ratio() < content_ratio and (has_deleted_item(diff_content)", "in range [0.0, 1.0]\" % (x,)) return x def diff(content1: str, content2: str):", "(default: 0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float between [0, 1] which is responsible", "content_ratio: float): prev_page = pad_input.getPage(0) nb_pages = pad_input.getNumPages() for i in range(1, nb_pages):", "float(x) except ValueError: raise argparse.ArgumentTypeError( \"%r not a floating-point literal\" % (x,)) if", "argparse.ArgumentParser( description=\"Quickly remove useless page from a huge pdf to get a readable", "= diff_title.ratio() < title_ratio content_has_changed = (diff_content.ratio() < content_ratio and (has_deleted_item(diff_content) or len(prev_content)", "args.title_ratio, args.content_ratio) with open(args.output_file, 'wb') as f: pdf_output.write(f) success_with_message(f'Your file has been sanitized", "return True return False def get_title(content): return content.split('\\n')[0] def get_content(content): return content.replace(get_title(content), '').strip()", "x > 1.0: raise argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\" % (x,)) return", "len(current_content))) if has_content(prev_content) and (title_has_changed or content_has_changed): pdf_output.addPage(prev_page) prev_page = current_page pdf_output.addPage(prev_page) parser", "fail_with_message, progress_with_message, success_with_message try: import PyPDF2 except ImportError: fail_with_message( 'Please install required dependencies", "has_content(prev_content) and (title_has_changed or content_has_changed): pdf_output.addPage(prev_page) prev_page = current_page pdf_output.addPage(prev_page) parser = argparse.ArgumentParser(", "PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio) with open(args.output_file, 'wb') as f:", "-r requirements.txt --user') def parse_file(path: str): if not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file path')", "not in range [0.0, 1.0]\" % (x,)) return x def diff(content1: str, content2:", "sensitive the sanitizer will be to any changes. (default: 0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio',", "x def diff(content1: str, content2: str): return difflib.SequenceMatcher(None, content1, content2) def has_deleted_item(diff): for", "operation == 'delete' or operation == 'replace': return True return False def get_title(content):", "parser = argparse.ArgumentParser( description=\"Quickly remove useless page from a huge pdf to get", "higher the ratio, the more sensitive the sanitizer will be to any changes.", "from .utils import fail_with_message, progress_with_message, success_with_message try: import PyPDF2 except ImportError: fail_with_message( 'Please", "parse_file(path: str): if not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file path') return path def parse_ratio(x):", "(default: 0.8)', default=.8, dest='content_ratio') def main(): args = parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output", "= parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio) with", "= argparse.ArgumentParser( description=\"Quickly remove useless page from a huge pdf to get a", "the sanitizer will be to any changes. (default: 0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio,", "package.\\n\\t> pip3 install -r requirements.txt --user') def parse_file(path: str): if not pathlib.Path(path).exists(): raise", "will be to any changes. (default: 0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float between", "content. The higher the ratio, the more sensitive the sanitizer will be to", "the sanitizer will be to any changes. (default: 0.8)', default=.8, dest='content_ratio') def main():", "> len(current_content))) if has_content(prev_content) and (title_has_changed or content_has_changed): pdf_output.addPage(prev_page) prev_page = current_page pdf_output.addPage(prev_page)", "any changes. (default: 0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float between [0, 1] which", "'').strip() def has_content(content): return len(get_content(content)) != 0 def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio:", "\"%r not a floating-point literal\" % (x,)) if x < 0.0 or x", "title_has_changed = diff_title.ratio() < title_ratio content_has_changed = (diff_content.ratio() < content_ratio and (has_deleted_item(diff_content) or", "using this package.\\n\\t> pip3 install -r requirements.txt --user') def parse_file(path: str): if not", "/ nb_pages) current_page = pad_input.getPage(i) current_content = current_page.extractText() prev_content = prev_page.extractText() diff_title =", "str): if not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file path') return path def parse_ratio(x): try:", "def get_title(content): return content.split('\\n')[0] def get_content(content): return content.replace(get_title(content), '').strip() def has_content(content): return len(get_content(content))", "install required dependencies before using this package.\\n\\t> pip3 install -r requirements.txt --user') def", "diff(get_title(prev_content), get_title(current_content)) diff_content = diff(get_content(prev_content), get_content(current_content)) title_has_changed = diff_title.ratio() < title_ratio content_has_changed =", "readable pdf\") parser.add_argument('input_file', type=parse_file, help='pdf file to be sanitized') parser.add_argument('output_file', type=str, help='output sanitized", "0.8)', default=.8, dest='content_ratio') def main(): args = parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output =", "changes. (default: 0.8)', default=.8, dest='content_ratio') def main(): args = parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file)", "current_page pdf_output.addPage(prev_page) parser = argparse.ArgumentParser( description=\"Quickly remove useless page from a huge pdf", "or content_has_changed): pdf_output.addPage(prev_page) prev_page = current_page pdf_output.addPage(prev_page) parser = argparse.ArgumentParser( description=\"Quickly remove useless", "file name') parser.add_argument('--title-ratio', type=parse_ratio, help='float between [0, 1] which is responsible of detecting", "== 'replace': return True return False def get_title(content): return content.split('\\n')[0] def get_content(content): return", "[0.0, 1.0]\" % (x,)) return x def diff(content1: str, content2: str): return difflib.SequenceMatcher(None,", "= PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio) with open(args.output_file, 'wb') as", "'replace': return True return False def get_title(content): return content.split('\\n')[0] def get_content(content): return content.replace(get_title(content),", "range [0.0, 1.0]\" % (x,)) return x def diff(content1: str, content2: str): return", "(diff_content.ratio() < content_ratio and (has_deleted_item(diff_content) or len(prev_content) > len(current_content))) if has_content(prev_content) and (title_has_changed", "ValueError: raise argparse.ArgumentTypeError( \"%r not a floating-point literal\" % (x,)) if x <", "< 0.0 or x > 1.0: raise argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\"", "nb_pages) current_page = pad_input.getPage(i) current_content = current_page.extractText() prev_content = prev_page.extractText() diff_title = diff(get_title(prev_content),", "except ValueError: raise argparse.ArgumentTypeError( \"%r not a floating-point literal\" % (x,)) if x", "[0, 1] which is responsible of detecting similar pages from content. The higher", "0.0 or x > 1.0: raise argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\" %", "return content.replace(get_title(content), '').strip() def has_content(content): return len(get_content(content)) != 0 def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output:", "= float(x) except ValueError: raise argparse.ArgumentTypeError( \"%r not a floating-point literal\" % (x,))", "help='output sanitized pdf file name') parser.add_argument('--title-ratio', type=parse_ratio, help='float between [0, 1] which is", "prev_content = prev_page.extractText() diff_title = diff(get_title(prev_content), get_title(current_content)) diff_content = diff(get_content(prev_content), get_content(current_content)) title_has_changed =", "parse_ratio(x): try: x = float(x) except ValueError: raise argparse.ArgumentTypeError( \"%r not a floating-point", "[0, 1] which is responsible of detecting similar pages from title. The higher", "= current_page pdf_output.addPage(prev_page) parser = argparse.ArgumentParser( description=\"Quickly remove useless page from a huge", "diff(content1: str, content2: str): return difflib.SequenceMatcher(None, content1, content2) def has_deleted_item(diff): for operation, *_", "= prev_page.extractText() diff_title = diff(get_title(prev_content), get_title(current_content)) diff_content = diff(get_content(prev_content), get_content(current_content)) title_has_changed = diff_title.ratio()", "help='float between [0, 1] which is responsible of detecting similar pages from content.", "sanitizer will be to any changes. (default: 0.8)', default=.8, dest='content_ratio') def main(): args", "import difflib import pathlib import argparse from .utils import fail_with_message, progress_with_message, success_with_message try:", "content_has_changed = (diff_content.ratio() < content_ratio and (has_deleted_item(diff_content) or len(prev_content) > len(current_content))) if has_content(prev_content)", "parser.add_argument('--content-ratio', type=parse_ratio, help='float between [0, 1] which is responsible of detecting similar pages", "sanitizer will be to any changes. (default: 0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float", "file path') return path def parse_ratio(x): try: x = float(x) except ValueError: raise", "pdf_output.write(f) success_with_message(f'Your file has been sanitized at {args.output_file}') if __name__ == '__main__': main()", "file to be sanitized') parser.add_argument('output_file', type=str, help='output sanitized pdf file name') parser.add_argument('--title-ratio', type=parse_ratio,", "= pad_input.getPage(i) current_content = current_page.extractText() prev_content = prev_page.extractText() diff_title = diff(get_title(prev_content), get_title(current_content)) diff_content", "1] which is responsible of detecting similar pages from content. The higher the", "args.content_ratio) with open(args.output_file, 'wb') as f: pdf_output.write(f) success_with_message(f'Your file has been sanitized at", "x = float(x) except ValueError: raise argparse.ArgumentTypeError( \"%r not a floating-point literal\" %", "requirements.txt --user') def parse_file(path: str): if not pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file path') return", "be sanitized') parser.add_argument('output_file', type=str, help='output sanitized pdf file name') parser.add_argument('--title-ratio', type=parse_ratio, help='float between", "detecting similar pages from title. The higher the ratio, the more sensitive the", "progress_with_message, success_with_message try: import PyPDF2 except ImportError: fail_with_message( 'Please install required dependencies before", "False def get_title(content): return content.split('\\n')[0] def get_content(content): return content.replace(get_title(content), '').strip() def has_content(content): return", "the ratio, the more sensitive the sanitizer will be to any changes. (default:", "content2) def has_deleted_item(diff): for operation, *_ in diff.get_opcodes(): if operation == 'delete' or", "= pad_input.getPage(0) nb_pages = pad_input.getNumPages() for i in range(1, nb_pages): progress_with_message('Sanitizing pdf ...',", "content_has_changed): pdf_output.addPage(prev_page) prev_page = current_page pdf_output.addPage(prev_page) parser = argparse.ArgumentParser( description=\"Quickly remove useless page", "with open(args.output_file, 'wb') as f: pdf_output.write(f) success_with_message(f'Your file has been sanitized at {args.output_file}')", "get_title(current_content)) diff_content = diff(get_content(prev_content), get_content(current_content)) title_has_changed = diff_title.ratio() < title_ratio content_has_changed = (diff_content.ratio()", "pathlib.Path(path).exists(): raise argparse.ArgumentTypeError('invalid file path') return path def parse_ratio(x): try: x = float(x)", "prev_page.extractText() diff_title = diff(get_title(prev_content), get_title(current_content)) diff_content = diff(get_content(prev_content), get_content(current_content)) title_has_changed = diff_title.ratio() <", "dest='content_ratio') def main(): args = parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input,", "pdf_output.addPage(prev_page) prev_page = current_page pdf_output.addPage(prev_page) parser = argparse.ArgumentParser( description=\"Quickly remove useless page from", "which is responsible of detecting similar pages from title. The higher the ratio,", "def get_content(content): return content.replace(get_title(content), '').strip() def has_content(content): return len(get_content(content)) != 0 def sanitize(pad_input:", "for i in range(1, nb_pages): progress_with_message('Sanitizing pdf ...', i / nb_pages) current_page =", "parser.add_argument('input_file', type=parse_file, help='pdf file to be sanitized') parser.add_argument('output_file', type=str, help='output sanitized pdf file", "import fail_with_message, progress_with_message, success_with_message try: import PyPDF2 except ImportError: fail_with_message( 'Please install required", "between [0, 1] which is responsible of detecting similar pages from content. The", "default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float between [0, 1] which is responsible of detecting", "pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio) with open(args.output_file, 'wb') as f: pdf_output.write(f)", "sensitive the sanitizer will be to any changes. (default: 0.8)', default=.8, dest='content_ratio') def", "parser.add_argument('--title-ratio', type=parse_ratio, help='float between [0, 1] which is responsible of detecting similar pages", "pdf_output, args.title_ratio, args.content_ratio) with open(args.output_file, 'wb') as f: pdf_output.write(f) success_with_message(f'Your file has been", "1.0]\" % (x,)) return x def diff(content1: str, content2: str): return difflib.SequenceMatcher(None, content1,", "literal\" % (x,)) if x < 0.0 or x > 1.0: raise argparse.ArgumentTypeError(\"%r", "title_ratio content_has_changed = (diff_content.ratio() < content_ratio and (has_deleted_item(diff_content) or len(prev_content) > len(current_content))) if", "to any changes. (default: 0.8)', default=.8, dest='content_ratio') def main(): args = parser.parse_args() pdf_input", "try: x = float(x) except ValueError: raise argparse.ArgumentTypeError( \"%r not a floating-point literal\"", "from title. The higher the ratio, the more sensitive the sanitizer will be", "to get a readable pdf\") parser.add_argument('input_file', type=parse_file, help='pdf file to be sanitized') parser.add_argument('output_file',", "and (title_has_changed or content_has_changed): pdf_output.addPage(prev_page) prev_page = current_page pdf_output.addPage(prev_page) parser = argparse.ArgumentParser( description=\"Quickly", "ratio, the more sensitive the sanitizer will be to any changes. (default: 0.5)',", "current_page.extractText() prev_content = prev_page.extractText() diff_title = diff(get_title(prev_content), get_title(current_content)) diff_content = diff(get_content(prev_content), get_content(current_content)) title_has_changed", "similar pages from content. The higher the ratio, the more sensitive the sanitizer", "= current_page.extractText() prev_content = prev_page.extractText() diff_title = diff(get_title(prev_content), get_title(current_content)) diff_content = diff(get_content(prev_content), get_content(current_content))", "argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\" % (x,)) return x def diff(content1: str,", "which is responsible of detecting similar pages from content. The higher the ratio,", "PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio) with open(args.output_file, 'wb') as f: pdf_output.write(f) success_with_message(f'Your file", "'wb') as f: pdf_output.write(f) success_with_message(f'Your file has been sanitized at {args.output_file}') if __name__", "this package.\\n\\t> pip3 install -r requirements.txt --user') def parse_file(path: str): if not pathlib.Path(path).exists():", "useless page from a huge pdf to get a readable pdf\") parser.add_argument('input_file', type=parse_file,", "sanitized pdf file name') parser.add_argument('--title-ratio', type=parse_ratio, help='float between [0, 1] which is responsible", "to any changes. (default: 0.5)', default=.5, dest='title_ratio') parser.add_argument('--content-ratio', type=parse_ratio, help='float between [0, 1]", "!= 0 def sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float): prev_page =", "nb_pages): progress_with_message('Sanitizing pdf ...', i / nb_pages) current_page = pad_input.getPage(i) current_content = current_page.extractText()", "difflib.SequenceMatcher(None, content1, content2) def has_deleted_item(diff): for operation, *_ in diff.get_opcodes(): if operation ==", "and (has_deleted_item(diff_content) or len(prev_content) > len(current_content))) if has_content(prev_content) and (title_has_changed or content_has_changed): pdf_output.addPage(prev_page)", "len(prev_content) > len(current_content))) if has_content(prev_content) and (title_has_changed or content_has_changed): pdf_output.addPage(prev_page) prev_page = current_page", "parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio) with open(args.output_file,", "success_with_message try: import PyPDF2 except ImportError: fail_with_message( 'Please install required dependencies before using", "= pad_input.getNumPages() for i in range(1, nb_pages): progress_with_message('Sanitizing pdf ...', i / nb_pages)", "if has_content(prev_content) and (title_has_changed or content_has_changed): pdf_output.addPage(prev_page) prev_page = current_page pdf_output.addPage(prev_page) parser =", "import PyPDF2 except ImportError: fail_with_message( 'Please install required dependencies before using this package.\\n\\t>", "operation, *_ in diff.get_opcodes(): if operation == 'delete' or operation == 'replace': return", "pad_input.getPage(0) nb_pages = pad_input.getNumPages() for i in range(1, nb_pages): progress_with_message('Sanitizing pdf ...', i", "except ImportError: fail_with_message( 'Please install required dependencies before using this package.\\n\\t> pip3 install", "PyPDF2 except ImportError: fail_with_message( 'Please install required dependencies before using this package.\\n\\t> pip3", "diff.get_opcodes(): if operation == 'delete' or operation == 'replace': return True return False", "% (x,)) return x def diff(content1: str, content2: str): return difflib.SequenceMatcher(None, content1, content2)", "argparse from .utils import fail_with_message, progress_with_message, success_with_message try: import PyPDF2 except ImportError: fail_with_message(", "content1, content2) def has_deleted_item(diff): for operation, *_ in diff.get_opcodes(): if operation == 'delete'", "(title_has_changed or content_has_changed): pdf_output.addPage(prev_page) prev_page = current_page pdf_output.addPage(prev_page) parser = argparse.ArgumentParser( description=\"Quickly remove", "if operation == 'delete' or operation == 'replace': return True return False def", "not a floating-point literal\" % (x,)) if x < 0.0 or x >", "pdf\") parser.add_argument('input_file', type=parse_file, help='pdf file to be sanitized') parser.add_argument('output_file', type=str, help='output sanitized pdf", "responsible of detecting similar pages from content. The higher the ratio, the more", "path') return path def parse_ratio(x): try: x = float(x) except ValueError: raise argparse.ArgumentTypeError(", "ratio, the more sensitive the sanitizer will be to any changes. (default: 0.8)',", "*_ in diff.get_opcodes(): if operation == 'delete' or operation == 'replace': return True", "be to any changes. (default: 0.8)', default=.8, dest='content_ratio') def main(): args = parser.parse_args()", "default=.8, dest='content_ratio') def main(): args = parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter()", "'Please install required dependencies before using this package.\\n\\t> pip3 install -r requirements.txt --user')", "progress_with_message('Sanitizing pdf ...', i / nb_pages) current_page = pad_input.getPage(i) current_content = current_page.extractText() prev_content", "of detecting similar pages from title. The higher the ratio, the more sensitive", "return path def parse_ratio(x): try: x = float(x) except ValueError: raise argparse.ArgumentTypeError( \"%r", "i / nb_pages) current_page = pad_input.getPage(i) current_content = current_page.extractText() prev_content = prev_page.extractText() diff_title", "get_title(content): return content.split('\\n')[0] def get_content(content): return content.replace(get_title(content), '').strip() def has_content(content): return len(get_content(content)) !=", "current_page = pad_input.getPage(i) current_content = current_page.extractText() prev_content = prev_page.extractText() diff_title = diff(get_title(prev_content), get_title(current_content))", "main(): args = parser.parse_args() pdf_input = PyPDF2.PdfFileReader(args.input_file) pdf_output = PyPDF2.PdfFileWriter() sanitize(pdf_input, pdf_output, args.title_ratio,", "sanitize(pad_input: PyPDF2.PdfFileReader, pdf_output: PyPDF2.PdfFileWriter, title_ratio: float, content_ratio: float): prev_page = pad_input.getPage(0) nb_pages =", "required dependencies before using this package.\\n\\t> pip3 install -r requirements.txt --user') def parse_file(path:", "% (x,)) if x < 0.0 or x > 1.0: raise argparse.ArgumentTypeError(\"%r not", "raise argparse.ArgumentTypeError( \"%r not a floating-point literal\" % (x,)) if x < 0.0", "sanitize(pdf_input, pdf_output, args.title_ratio, args.content_ratio) with open(args.output_file, 'wb') as f: pdf_output.write(f) success_with_message(f'Your file has", "content2: str): return difflib.SequenceMatcher(None, content1, content2) def has_deleted_item(diff): for operation, *_ in diff.get_opcodes():", "pdf_output.addPage(prev_page) parser = argparse.ArgumentParser( description=\"Quickly remove useless page from a huge pdf to", "responsible of detecting similar pages from title. The higher the ratio, the more" ]
[ "of image number to plate, well img_to_pw = {} # populate the lookup", "well = (int(rec['Row']) - 1) * 24 + int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well) #", "parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify the suffix for data files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read", "a csv file describing the data set here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify the", "into an EArray. ds = h5file.create_earray(where=well_group, name=str(well), atom=atom, shape=(0,my_data.shape[1]), filters=zlib_filters) ds.append(objs) h5file.flush() os.chdir(cur_dir)", "data set here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify the .h5 filename that will contain", "hdf5 file filename = options.filename h5file = openFile(filename, mode = \"w\", title =", "# Read all the files, process 'em. zlib_filters = Filters(complib='zlib', complevel=5) for i,f", "Plates go 1 .. 14. Rows go 1 ... 16, Cols 1 ...", "images belonging to this well have yet been dumped into an EArray. ds", "plate, well = img_to_pw[img_num] except KeyError as e: print \"image number not found", "for this replicate') # Create a group for each plate for plate in", "10 == 0: print \"processing %s, %d files done of %d total\" %", "= (int(rec['Row']) - 1) * 24 + int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well) # get", "the layout of the experimental data df = pandas.read_csv(options.dataframe) all_plates = set(df['Plate']) #", "the lookup table of image number to (plate, well) for index, rec in", "1 ... 16, Cols 1 ... 24. \"\"\" import os from optparse import", "\"w\", title = \"Data File\") # Load the dataframe describing the layout of", "an hdf5 file that is organized by plate.well Plates go 1 .. 14.", "well map: \" + str(img_num) continue objs = my_data[my_data[:,0] == img_num] well_group =", "# Open and prepare an hdf5 file filename = options.filename h5file = openFile(filename,", "= (rec['Plate'],well) # get the root root = h5file.root # Go and read", "#! /usr/bin/env python \"\"\" Vacuum up all the object.CSV files from the given", "plate for plate in all_plates: desc = \"plate number \" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc)", "no data from images belonging to this well have yet been dumped into", "not found in image to well map: \" + str(img_num) continue objs =", "mode = \"w\", title = \"Data File\") # Load the dataframe describing the", "for img_num in xrange(min_img,max_img+1): try: plate, well = img_to_pw[img_num] except KeyError as e:", "for img_num in xrange(rec['Low'],rec['High'] + 1): well = (int(rec['Row']) - 1) * 24", "plate in all_plates: desc = \"plate number \" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build", "options are present, else print help msg parser = OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\",", "in all_plates: desc = \"plate number \" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build a", "= options.indir suffix = options.suffix cur_dir = os.getcwd() try: files = os.listdir(input_dir) os.chdir(input_dir)", "(plate, well) for index, rec in df.iterrows(): for img_num in xrange(rec['Low'],rec['High'] + 1):", "np.genfromtxt(f, delimiter=',', autostrip = True) atom = Atom.from_dtype(my_data.dtype) # slice this data file", "that options are present, else print help msg parser = OptionParser() parser.add_option(\"-i\", \"--input\",", "Read all the files, process 'em. zlib_filters = Filters(complib='zlib', complevel=5) for i,f in", "\"/plates/\" + str(plate) well_node = \"/plates/\" + str(plate) + \"/\" + str(well) if", "total\" % (f,i,len(files)) if f.endswith(suffix): my_data = np.genfromtxt(f, delimiter=',', autostrip = True) atom", "root root = h5file.root # Go and read the files, input_dir = options.indir", "= parser.parse_args() # Open and prepare an hdf5 file filename = options.filename h5file", "else: # no data from images belonging to this well have yet been", "%s, %d files done of %d total\" % (f,i,len(files)) if f.endswith(suffix): my_data =", "== img_num] well_group = \"/plates/\" + str(plate) well_node = \"/plates/\" + str(plate) +", "pandas from tables.file import File, openFile from tables import Filters from tables import", "str(plate) well_node = \"/plates/\" + str(plate) + \"/\" + str(well) if h5file.__contains__(well_node): #", "+ int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well) # get the root root = h5file.root #", "python \"\"\" Vacuum up all the object.CSV files from the given input directory,", "data from images belonging to this well have yet been dumped into an", "input_dir # Read all the files, process 'em. zlib_filters = Filters(complib='zlib', complevel=5) for", "'em. zlib_filters = Filters(complib='zlib', complevel=5) for i,f in enumerate(files): if i % 10", "process 'em. zlib_filters = Filters(complib='zlib', complevel=5) for i,f in enumerate(files): if i %", "parser = OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read input from here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\",", "import Atom import numpy as np # Check that options are present, else", "\"\"\" import os from optparse import OptionParser import pandas from tables.file import File,", "= openFile(filename, mode = \"w\", title = \"Data File\") # Load the dataframe", "input from here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify the suffix for data files\") parser.add_option(\"-d\",", "from tables import Atom import numpy as np # Check that options are", "the files, process 'em. zlib_filters = Filters(complib='zlib', complevel=5) for i,f in enumerate(files): if", "for index, rec in df.iterrows(): for img_num in xrange(rec['Low'],rec['High'] + 1): well =", "dataframe describing the layout of the experimental data df = pandas.read_csv(options.dataframe) all_plates =", "Go and read the files, input_dir = options.indir suffix = options.suffix cur_dir =", "set here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify the .h5 filename that will contain all", "complevel=5) for i,f in enumerate(files): if i % 10 == 0: print \"processing", "autostrip = True) atom = Atom.from_dtype(my_data.dtype) # slice this data file by grouped", "well exists in an EArray already, append this data to it. ds =", "xrange(rec['Low'],rec['High'] + 1): well = (int(rec['Row']) - 1) * 24 + int(rec['Col']) img_to_pw[img_num]", "# build a lookup of image number to plate, well img_to_pw = {}", "done of %d total\" % (f,i,len(files)) if f.endswith(suffix): my_data = np.genfromtxt(f, delimiter=',', autostrip", "in enumerate(files): if i % 10 == 0: print \"processing %s, %d files", "by grouped image numbers min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num in xrange(min_img,max_img+1):", "as e: print \"image number not found in image to well map: \"", "= {} # populate the lookup table of image number to (plate, well)", "parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read input from here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify the suffix", "data file by grouped image numbers min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num", "if f.endswith(suffix): my_data = np.genfromtxt(f, delimiter=',', autostrip = True) atom = Atom.from_dtype(my_data.dtype) #", "already, append this data to it. ds = h5file.get_node(where=well_node) ds.append(objs) else: # no", "the experimental data df = pandas.read_csv(options.dataframe) all_plates = set(df['Plate']) # Create a new", "File\") # Load the dataframe describing the layout of the experimental data df", "# Create a group for each plate for plate in all_plates: desc =", "from optparse import OptionParser import pandas from tables.file import File, openFile from tables", "Create a group for each plate for plate in all_plates: desc = \"plate", "plate, well img_to_pw = {} # populate the lookup table of image number", "24 + int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well) # get the root root = h5file.root", "well_group = \"/plates/\" + str(plate) well_node = \"/plates/\" + str(plate) + \"/\" +", "cur_dir = os.getcwd() try: files = os.listdir(input_dir) os.chdir(input_dir) except: print \"Could not read", "1): well = (int(rec['Row']) - 1) * 24 + int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well)", "df.iterrows(): for img_num in xrange(rec['Low'],rec['High'] + 1): well = (int(rec['Row']) - 1) *", "import numpy as np # Check that options are present, else print help", "\" + str(img_num) continue objs = my_data[my_data[:,0] == img_num] well_group = \"/plates/\" +", "tables import Atom import numpy as np # Check that options are present,", "files from the given input directory, and pack them into an hdf5 file", "this data to it. ds = h5file.get_node(where=well_node) ds.append(objs) else: # no data from", "index, rec in df.iterrows(): for img_num in xrange(rec['Low'],rec['High'] + 1): well = (int(rec['Row'])", "\"/plates/\" + str(plate) + \"/\" + str(well) if h5file.__contains__(well_node): # some data for", "contain all the data\") (options, args) = parser.parse_args() # Open and prepare an", "Create a new group under \"/\" (root) plates_group = h5file.createGroup(\"/\", 'plates', 'the plates", "dest=\"dataframe\", help=\"read a csv file describing the data set here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\",", "hdf5 file that is organized by plate.well Plates go 1 .. 14. Rows", "well have yet been dumped into an EArray. ds = h5file.create_earray(where=well_group, name=str(well), atom=atom,", "try: files = os.listdir(input_dir) os.chdir(input_dir) except: print \"Could not read files from \"", "str(plate) + \"/\" + str(well) if h5file.__contains__(well_node): # some data for this well", "in an EArray already, append this data to it. ds = h5file.get_node(where=well_node) ds.append(objs)", "files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read a csv file describing the data set here\")", "+ str(img_num) continue objs = my_data[my_data[:,0] == img_num] well_group = \"/plates/\" + str(plate)", "plate.well Plates go 1 .. 14. Rows go 1 ... 16, Cols 1", "in image to well map: \" + str(img_num) continue objs = my_data[my_data[:,0] ==", "data for this well exists in an EArray already, append this data to", "number \" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build a lookup of image number to", "the given input directory, and pack them into an hdf5 file that is", "from images belonging to this well have yet been dumped into an EArray.", "file describing the data set here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify the .h5 filename", "desc = \"plate number \" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build a lookup of", "input_dir = options.indir suffix = options.suffix cur_dir = os.getcwd() try: files = os.listdir(input_dir)", "\"Could not read files from \" + input_dir # Read all the files,", "parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify the .h5 filename that will contain all the data\")", "# Go and read the files, input_dir = options.indir suffix = options.suffix cur_dir", "= OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read input from here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify", "\" + input_dir # Read all the files, process 'em. zlib_filters = Filters(complib='zlib',", "ds = h5file.get_node(where=well_node) ds.append(objs) else: # no data from images belonging to this", "os.chdir(input_dir) except: print \"Could not read files from \" + input_dir # Read", "min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num in xrange(min_img,max_img+1): try: plate, well =", "(int(rec['Row']) - 1) * 24 + int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well) # get the", "\"processing %s, %d files done of %d total\" % (f,i,len(files)) if f.endswith(suffix): my_data", "\"--suffix\", dest=\"suffix\", help=\"specify the suffix for data files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read a", "KeyError as e: print \"image number not found in image to well map:", "import OptionParser import pandas from tables.file import File, openFile from tables import Filters", "1 ... 24. \"\"\" import os from optparse import OptionParser import pandas from", "print help msg parser = OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read input from here\")", "well img_to_pw = {} # populate the lookup table of image number to", "go 1 ... 16, Cols 1 ... 24. \"\"\" import os from optparse", "% (f,i,len(files)) if f.endswith(suffix): my_data = np.genfromtxt(f, delimiter=',', autostrip = True) atom =", "an EArray. ds = h5file.create_earray(where=well_group, name=str(well), atom=atom, shape=(0,my_data.shape[1]), filters=zlib_filters) ds.append(objs) h5file.flush() os.chdir(cur_dir) print", "table of image number to (plate, well) for index, rec in df.iterrows(): for", "Atom import numpy as np # Check that options are present, else print", "grouped image numbers min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num in xrange(min_img,max_img+1): try:", "help=\"read input from here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify the suffix for data files\")", "will contain all the data\") (options, args) = parser.parse_args() # Open and prepare", "dest=\"filename\", help=\"specify the .h5 filename that will contain all the data\") (options, args)", "slice this data file by grouped image numbers min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0]))", "optparse import OptionParser import pandas from tables.file import File, openFile from tables import", "plates_group = h5file.createGroup(\"/\", 'plates', 'the plates for this replicate') # Create a group", "# Create a new group under \"/\" (root) plates_group = h5file.createGroup(\"/\", 'plates', 'the", "in xrange(rec['Low'],rec['High'] + 1): well = (int(rec['Row']) - 1) * 24 + int(rec['Col'])", "image to well map: \" + str(img_num) continue objs = my_data[my_data[:,0] == img_num]", "help=\"specify the .h5 filename that will contain all the data\") (options, args) =", "\"--input\", dest=\"indir\", help=\"read input from here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify the suffix for", "dest=\"suffix\", help=\"specify the suffix for data files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read a csv", "= options.suffix cur_dir = os.getcwd() try: files = os.listdir(input_dir) os.chdir(input_dir) except: print \"Could", "pack them into an hdf5 file that is organized by plate.well Plates go", "the object.CSV files from the given input directory, and pack them into an", "in xrange(min_img,max_img+1): try: plate, well = img_to_pw[img_num] except KeyError as e: print \"image", "import pandas from tables.file import File, openFile from tables import Filters from tables", "openFile(filename, mode = \"w\", title = \"Data File\") # Load the dataframe describing", "parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read a csv file describing the data set here\") parser.add_option(\"-o\",", "this well have yet been dumped into an EArray. ds = h5file.create_earray(where=well_group, name=str(well),", "in df.iterrows(): for img_num in xrange(rec['Low'],rec['High'] + 1): well = (int(rec['Row']) - 1)", "+ str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build a lookup of image number to plate, well", "files = os.listdir(input_dir) os.chdir(input_dir) except: print \"Could not read files from \" +", "image number to (plate, well) for index, rec in df.iterrows(): for img_num in", "= \"w\", title = \"Data File\") # Load the dataframe describing the layout", "EArray already, append this data to it. ds = h5file.get_node(where=well_node) ds.append(objs) else: #", "of the experimental data df = pandas.read_csv(options.dataframe) all_plates = set(df['Plate']) # Create a", "title = \"Data File\") # Load the dataframe describing the layout of the", "and prepare an hdf5 file filename = options.filename h5file = openFile(filename, mode =", "%d total\" % (f,i,len(files)) if f.endswith(suffix): my_data = np.genfromtxt(f, delimiter=',', autostrip = True)", "Filters from tables import Atom import numpy as np # Check that options", "# get the root root = h5file.root # Go and read the files,", "int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well) # get the root root = h5file.root # Go", "the data\") (options, args) = parser.parse_args() # Open and prepare an hdf5 file", "# Load the dataframe describing the layout of the experimental data df =", "\"\"\" Vacuum up all the object.CSV files from the given input directory, and", "if i % 10 == 0: print \"processing %s, %d files done of", "from here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify the suffix for data files\") parser.add_option(\"-d\", \"--dataframe\",", "well_node = \"/plates/\" + str(plate) + \"/\" + str(well) if h5file.__contains__(well_node): # some", "that is organized by plate.well Plates go 1 .. 14. Rows go 1", "print \"image number not found in image to well map: \" + str(img_num)", "describing the layout of the experimental data df = pandas.read_csv(options.dataframe) all_plates = set(df['Plate'])", "ds.append(objs) else: # no data from images belonging to this well have yet", "e: print \"image number not found in image to well map: \" +", "organized by plate.well Plates go 1 .. 14. Rows go 1 ... 16,", "= set(df['Plate']) # Create a new group under \"/\" (root) plates_group = h5file.createGroup(\"/\",", "my_data[my_data[:,0] == img_num] well_group = \"/plates/\" + str(plate) well_node = \"/plates/\" + str(plate)", "here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify the suffix for data files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\",", "my_data = np.genfromtxt(f, delimiter=',', autostrip = True) atom = Atom.from_dtype(my_data.dtype) # slice this", "= np.genfromtxt(f, delimiter=',', autostrip = True) atom = Atom.from_dtype(my_data.dtype) # slice this data", "number to (plate, well) for index, rec in df.iterrows(): for img_num in xrange(rec['Low'],rec['High']", "img_num in xrange(min_img,max_img+1): try: plate, well = img_to_pw[img_num] except KeyError as e: print", "(root) plates_group = h5file.createGroup(\"/\", 'plates', 'the plates for this replicate') # Create a", "\"--filename\", dest=\"filename\", help=\"specify the .h5 filename that will contain all the data\") (options,", "= Filters(complib='zlib', complevel=5) for i,f in enumerate(files): if i % 10 == 0:", "data\") (options, args) = parser.parse_args() # Open and prepare an hdf5 file filename", "from the given input directory, and pack them into an hdf5 file that", "(options, args) = parser.parse_args() # Open and prepare an hdf5 file filename =", "for plate in all_plates: desc = \"plate number \" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) #", "f.endswith(suffix): my_data = np.genfromtxt(f, delimiter=',', autostrip = True) atom = Atom.from_dtype(my_data.dtype) # slice", "= h5file.get_node(where=well_node) ds.append(objs) else: # no data from images belonging to this well", "from tables.file import File, openFile from tables import Filters from tables import Atom", "under \"/\" (root) plates_group = h5file.createGroup(\"/\", 'plates', 'the plates for this replicate') #", "Check that options are present, else print help msg parser = OptionParser() parser.add_option(\"-i\",", "objs = my_data[my_data[:,0] == img_num] well_group = \"/plates/\" + str(plate) well_node = \"/plates/\"", "this replicate') # Create a group for each plate for plate in all_plates:", "not read files from \" + input_dir # Read all the files, process", "\"image number not found in image to well map: \" + str(img_num) continue", "img_num in xrange(rec['Low'],rec['High'] + 1): well = (int(rec['Row']) - 1) * 24 +", "the suffix for data files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read a csv file describing", "of %d total\" % (f,i,len(files)) if f.endswith(suffix): my_data = np.genfromtxt(f, delimiter=',', autostrip =", "# no data from images belonging to this well have yet been dumped", "h5file = openFile(filename, mode = \"w\", title = \"Data File\") # Load the", "populate the lookup table of image number to (plate, well) for index, rec", "append this data to it. ds = h5file.get_node(where=well_node) ds.append(objs) else: # no data", "... 24. \"\"\" import os from optparse import OptionParser import pandas from tables.file", "this data file by grouped image numbers min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0])) for", "been dumped into an EArray. ds = h5file.create_earray(where=well_group, name=str(well), atom=atom, shape=(0,my_data.shape[1]), filters=zlib_filters) ds.append(objs)", "lookup table of image number to (plate, well) for index, rec in df.iterrows():", "import Filters from tables import Atom import numpy as np # Check that", "\"plate number \" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build a lookup of image number", "args) = parser.parse_args() # Open and prepare an hdf5 file filename = options.filename", "read the files, input_dir = options.indir suffix = options.suffix cur_dir = os.getcwd() try:", "number to plate, well img_to_pw = {} # populate the lookup table of", "map: \" + str(img_num) continue objs = my_data[my_data[:,0] == img_num] well_group = \"/plates/\"", "number not found in image to well map: \" + str(img_num) continue objs", "get the root root = h5file.root # Go and read the files, input_dir", "img_to_pw[img_num] = (rec['Plate'],well) # get the root root = h5file.root # Go and", "= img_to_pw[img_num] except KeyError as e: print \"image number not found in image", "img_num] well_group = \"/plates/\" + str(plate) well_node = \"/plates/\" + str(plate) + \"/\"", "= h5file.createGroup(\"/\", 'plates', 'the plates for this replicate') # Create a group for", "tables import Filters from tables import Atom import numpy as np # Check", "EArray. ds = h5file.create_earray(where=well_group, name=str(well), atom=atom, shape=(0,my_data.shape[1]), filters=zlib_filters) ds.append(objs) h5file.flush() os.chdir(cur_dir) print \"done!\"", "here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify the .h5 filename that will contain all the", "data df = pandas.read_csv(options.dataframe) all_plates = set(df['Plate']) # Create a new group under", ".h5 filename that will contain all the data\") (options, args) = parser.parse_args() #", "a new group under \"/\" (root) plates_group = h5file.createGroup(\"/\", 'plates', 'the plates for", "zlib_filters = Filters(complib='zlib', complevel=5) for i,f in enumerate(files): if i % 10 ==", "%d files done of %d total\" % (f,i,len(files)) if f.endswith(suffix): my_data = np.genfromtxt(f,", "os.getcwd() try: files = os.listdir(input_dir) os.chdir(input_dir) except: print \"Could not read files from", "0: print \"processing %s, %d files done of %d total\" % (f,i,len(files)) if", "Rows go 1 ... 16, Cols 1 ... 24. \"\"\" import os from", "{} # populate the lookup table of image number to (plate, well) for", "% 10 == 0: print \"processing %s, %d files done of %d total\"", "present, else print help msg parser = OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read input", "Open and prepare an hdf5 file filename = options.filename h5file = openFile(filename, mode", "new group under \"/\" (root) plates_group = h5file.createGroup(\"/\", 'plates', 'the plates for this", "\"/\" + str(well) if h5file.__contains__(well_node): # some data for this well exists in", "and read the files, input_dir = options.indir suffix = options.suffix cur_dir = os.getcwd()", "True) atom = Atom.from_dtype(my_data.dtype) # slice this data file by grouped image numbers", "xrange(min_img,max_img+1): try: plate, well = img_to_pw[img_num] except KeyError as e: print \"image number", "all the data\") (options, args) = parser.parse_args() # Open and prepare an hdf5", "object.CSV files from the given input directory, and pack them into an hdf5", "go 1 .. 14. Rows go 1 ... 16, Cols 1 ... 24.", "well = img_to_pw[img_num] except KeyError as e: print \"image number not found in", "+ str(plate) well_node = \"/plates/\" + str(plate) + \"/\" + str(well) if h5file.__contains__(well_node):", "= \"/plates/\" + str(plate) well_node = \"/plates/\" + str(plate) + \"/\" + str(well)", "\" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build a lookup of image number to plate,", "14. Rows go 1 ... 16, Cols 1 ... 24. \"\"\" import os", "image number to plate, well img_to_pw = {} # populate the lookup table", "h5file.get_node(where=well_node) ds.append(objs) else: # no data from images belonging to this well have", "build a lookup of image number to plate, well img_to_pw = {} #", "str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build a lookup of image number to plate, well img_to_pw", "img_to_pw = {} # populate the lookup table of image number to (plate,", "OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read input from here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify the", "help=\"specify the suffix for data files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read a csv file", "set(df['Plate']) # Create a new group under \"/\" (root) plates_group = h5file.createGroup(\"/\", 'plates',", "import os from optparse import OptionParser import pandas from tables.file import File, openFile", "csv file describing the data set here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify the .h5", "files done of %d total\" % (f,i,len(files)) if f.endswith(suffix): my_data = np.genfromtxt(f, delimiter=',',", "found in image to well map: \" + str(img_num) continue objs = my_data[my_data[:,0]", "h5file.createGroup(\"/\", 'plates', 'the plates for this replicate') # Create a group for each", "files from \" + input_dir # Read all the files, process 'em. zlib_filters", "np # Check that options are present, else print help msg parser =", "1 .. 14. Rows go 1 ... 16, Cols 1 ... 24. \"\"\"", "belonging to this well have yet been dumped into an EArray. ds =", "\"Data File\") # Load the dataframe describing the layout of the experimental data", "numpy as np # Check that options are present, else print help msg", "each plate for plate in all_plates: desc = \"plate number \" + str(plate)", "file that is organized by plate.well Plates go 1 .. 14. Rows go", "filename = options.filename h5file = openFile(filename, mode = \"w\", title = \"Data File\")", "== 0: print \"processing %s, %d files done of %d total\" % (f,i,len(files))", "an hdf5 file filename = options.filename h5file = openFile(filename, mode = \"w\", title", "# populate the lookup table of image number to (plate, well) for index,", "from \" + input_dir # Read all the files, process 'em. zlib_filters =", "an EArray already, append this data to it. ds = h5file.get_node(where=well_node) ds.append(objs) else:", "rec in df.iterrows(): for img_num in xrange(rec['Low'],rec['High'] + 1): well = (int(rec['Row']) -", "file filename = options.filename h5file = openFile(filename, mode = \"w\", title = \"Data", "data files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read a csv file describing the data set", "import File, openFile from tables import Filters from tables import Atom import numpy", "'plates', 'the plates for this replicate') # Create a group for each plate", "suffix for data files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read a csv file describing the", "to it. ds = h5file.get_node(where=well_node) ds.append(objs) else: # no data from images belonging", "(rec['Plate'],well) # get the root root = h5file.root # Go and read the", "to this well have yet been dumped into an EArray. ds = h5file.create_earray(where=well_group,", "openFile from tables import Filters from tables import Atom import numpy as np", "a group for each plate for plate in all_plates: desc = \"plate number", "them into an hdf5 file that is organized by plate.well Plates go 1", "except: print \"Could not read files from \" + input_dir # Read all", "# slice this data file by grouped image numbers min_img, max_img = int(min(my_data[:,0])),", "Atom.from_dtype(my_data.dtype) # slice this data file by grouped image numbers min_img, max_img =", "image numbers min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num in xrange(min_img,max_img+1): try: plate,", "os from optparse import OptionParser import pandas from tables.file import File, openFile from", "as np # Check that options are present, else print help msg parser", "read files from \" + input_dir # Read all the files, process 'em.", "and pack them into an hdf5 file that is organized by plate.well Plates", "dest=\"indir\", help=\"read input from here\") parser.add_option(\"-s\", \"--suffix\", dest=\"suffix\", help=\"specify the suffix for data", "filename that will contain all the data\") (options, args) = parser.parse_args() # Open", "all the files, process 'em. zlib_filters = Filters(complib='zlib', complevel=5) for i,f in enumerate(files):", "h5file.root # Go and read the files, input_dir = options.indir suffix = options.suffix", "all the object.CSV files from the given input directory, and pack them into", "this well exists in an EArray already, append this data to it. ds", "up all the object.CSV files from the given input directory, and pack them", "the root root = h5file.root # Go and read the files, input_dir =", "= pandas.read_csv(options.dataframe) all_plates = set(df['Plate']) # Create a new group under \"/\" (root)", "24. \"\"\" import os from optparse import OptionParser import pandas from tables.file import", "that will contain all the data\") (options, args) = parser.parse_args() # Open and", "h5file.createGroup(\"/plates/\",str(plate),desc) # build a lookup of image number to plate, well img_to_pw =", "h5file.__contains__(well_node): # some data for this well exists in an EArray already, append", "plates for this replicate') # Create a group for each plate for plate", "continue objs = my_data[my_data[:,0] == img_num] well_group = \"/plates/\" + str(plate) well_node =", "options.indir suffix = options.suffix cur_dir = os.getcwd() try: files = os.listdir(input_dir) os.chdir(input_dir) except:", "files, process 'em. zlib_filters = Filters(complib='zlib', complevel=5) for i,f in enumerate(files): if i", "files, input_dir = options.indir suffix = options.suffix cur_dir = os.getcwd() try: files =", "File, openFile from tables import Filters from tables import Atom import numpy as", "= Atom.from_dtype(my_data.dtype) # slice this data file by grouped image numbers min_img, max_img", "i % 10 == 0: print \"processing %s, %d files done of %d", "OptionParser import pandas from tables.file import File, openFile from tables import Filters from", "for data files\") parser.add_option(\"-d\", \"--dataframe\", dest=\"dataframe\", help=\"read a csv file describing the data", "+ str(well) if h5file.__contains__(well_node): # some data for this well exists in an", "+ 1): well = (int(rec['Row']) - 1) * 24 + int(rec['Col']) img_to_pw[img_num] =", "are present, else print help msg parser = OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read", "= True) atom = Atom.from_dtype(my_data.dtype) # slice this data file by grouped image", "\"--dataframe\", dest=\"dataframe\", help=\"read a csv file describing the data set here\") parser.add_option(\"-o\", \"--filename\",", "= os.getcwd() try: files = os.listdir(input_dir) os.chdir(input_dir) except: print \"Could not read files", "of image number to (plate, well) for index, rec in df.iterrows(): for img_num", "pandas.read_csv(options.dataframe) all_plates = set(df['Plate']) # Create a new group under \"/\" (root) plates_group", "to well map: \" + str(img_num) continue objs = my_data[my_data[:,0] == img_num] well_group", "the .h5 filename that will contain all the data\") (options, args) = parser.parse_args()", "df = pandas.read_csv(options.dataframe) all_plates = set(df['Plate']) # Create a new group under \"/\"", "/usr/bin/env python \"\"\" Vacuum up all the object.CSV files from the given input", "data to it. ds = h5file.get_node(where=well_node) ds.append(objs) else: # no data from images", "given input directory, and pack them into an hdf5 file that is organized", "* 24 + int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well) # get the root root =", "print \"processing %s, %d files done of %d total\" % (f,i,len(files)) if f.endswith(suffix):", "int(max(my_data[:,0])) for img_num in xrange(min_img,max_img+1): try: plate, well = img_to_pw[img_num] except KeyError as", "1) * 24 + int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well) # get the root root", "describing the data set here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify the .h5 filename that", "have yet been dumped into an EArray. ds = h5file.create_earray(where=well_group, name=str(well), atom=atom, shape=(0,my_data.shape[1]),", "numbers min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num in xrange(min_img,max_img+1): try: plate, well", "atom = Atom.from_dtype(my_data.dtype) # slice this data file by grouped image numbers min_img,", "to (plate, well) for index, rec in df.iterrows(): for img_num in xrange(rec['Low'],rec['High'] +", "tables.file import File, openFile from tables import Filters from tables import Atom import", "some data for this well exists in an EArray already, append this data", "group under \"/\" (root) plates_group = h5file.createGroup(\"/\", 'plates', 'the plates for this replicate')", "print \"Could not read files from \" + input_dir # Read all the", "\"/\" (root) plates_group = h5file.createGroup(\"/\", 'plates', 'the plates for this replicate') # Create", "experimental data df = pandas.read_csv(options.dataframe) all_plates = set(df['Plate']) # Create a new group", "layout of the experimental data df = pandas.read_csv(options.dataframe) all_plates = set(df['Plate']) # Create", "str(well) if h5file.__contains__(well_node): # some data for this well exists in an EArray", "prepare an hdf5 file filename = options.filename h5file = openFile(filename, mode = \"w\",", "is organized by plate.well Plates go 1 .. 14. Rows go 1 ...", "str(img_num) continue objs = my_data[my_data[:,0] == img_num] well_group = \"/plates/\" + str(plate) well_node", "input directory, and pack them into an hdf5 file that is organized by", "exists in an EArray already, append this data to it. ds = h5file.get_node(where=well_node)", "... 16, Cols 1 ... 24. \"\"\" import os from optparse import OptionParser", "from tables import Filters from tables import Atom import numpy as np #", "os.listdir(input_dir) os.chdir(input_dir) except: print \"Could not read files from \" + input_dir #", "Cols 1 ... 24. \"\"\" import os from optparse import OptionParser import pandas", "help=\"read a csv file describing the data set here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify", "- 1) * 24 + int(rec['Col']) img_to_pw[img_num] = (rec['Plate'],well) # get the root", "else print help msg parser = OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read input from", "by plate.well Plates go 1 .. 14. Rows go 1 ... 16, Cols", "= os.listdir(input_dir) os.chdir(input_dir) except: print \"Could not read files from \" + input_dir", "Vacuum up all the object.CSV files from the given input directory, and pack", "'the plates for this replicate') # Create a group for each plate for", "if h5file.__contains__(well_node): # some data for this well exists in an EArray already,", "all_plates: desc = \"plate number \" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build a lookup", "enumerate(files): if i % 10 == 0: print \"processing %s, %d files done", "all_plates = set(df['Plate']) # Create a new group under \"/\" (root) plates_group =", "the dataframe describing the layout of the experimental data df = pandas.read_csv(options.dataframe) all_plates", "Filters(complib='zlib', complevel=5) for i,f in enumerate(files): if i % 10 == 0: print", "img_to_pw[img_num] except KeyError as e: print \"image number not found in image to", "directory, and pack them into an hdf5 file that is organized by plate.well", "replicate') # Create a group for each plate for plate in all_plates: desc", "# Check that options are present, else print help msg parser = OptionParser()", "dumped into an EArray. ds = h5file.create_earray(where=well_group, name=str(well), atom=atom, shape=(0,my_data.shape[1]), filters=zlib_filters) ds.append(objs) h5file.flush()", "ds = h5file.create_earray(where=well_group, name=str(well), atom=atom, shape=(0,my_data.shape[1]), filters=zlib_filters) ds.append(objs) h5file.flush() os.chdir(cur_dir) print \"done!\" h5file.close()", "to plate, well img_to_pw = {} # populate the lookup table of image", "+ str(plate) + \"/\" + str(well) if h5file.__contains__(well_node): # some data for this", "the data set here\") parser.add_option(\"-o\", \"--filename\", dest=\"filename\", help=\"specify the .h5 filename that will", "# some data for this well exists in an EArray already, append this", "root = h5file.root # Go and read the files, input_dir = options.indir suffix", "+ input_dir # Read all the files, process 'em. zlib_filters = Filters(complib='zlib', complevel=5)", "Load the dataframe describing the layout of the experimental data df = pandas.read_csv(options.dataframe)", "msg parser = OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read input from here\") parser.add_option(\"-s\", \"--suffix\",", "into an hdf5 file that is organized by plate.well Plates go 1 ..", "help msg parser = OptionParser() parser.add_option(\"-i\", \"--input\", dest=\"indir\", help=\"read input from here\") parser.add_option(\"-s\",", "= options.filename h5file = openFile(filename, mode = \"w\", title = \"Data File\") #", "(f,i,len(files)) if f.endswith(suffix): my_data = np.genfromtxt(f, delimiter=',', autostrip = True) atom = Atom.from_dtype(my_data.dtype)", "options.suffix cur_dir = os.getcwd() try: files = os.listdir(input_dir) os.chdir(input_dir) except: print \"Could not", "it. ds = h5file.get_node(where=well_node) ds.append(objs) else: # no data from images belonging to", "yet been dumped into an EArray. ds = h5file.create_earray(where=well_group, name=str(well), atom=atom, shape=(0,my_data.shape[1]), filters=zlib_filters)", "the files, input_dir = options.indir suffix = options.suffix cur_dir = os.getcwd() try: files", "for this well exists in an EArray already, append this data to it.", "= int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num in xrange(min_img,max_img+1): try: plate, well = img_to_pw[img_num] except", "i,f in enumerate(files): if i % 10 == 0: print \"processing %s, %d", "= h5file.root # Go and read the files, input_dir = options.indir suffix =", "options.filename h5file = openFile(filename, mode = \"w\", title = \"Data File\") # Load", "+ \"/\" + str(well) if h5file.__contains__(well_node): # some data for this well exists", "try: plate, well = img_to_pw[img_num] except KeyError as e: print \"image number not", "group for each plate for plate in all_plates: desc = \"plate number \"", "= \"plate number \" + str(plate) h5file.createGroup(\"/plates/\",str(plate),desc) # build a lookup of image", "max_img = int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num in xrange(min_img,max_img+1): try: plate, well = img_to_pw[img_num]", "except KeyError as e: print \"image number not found in image to well", ".. 14. Rows go 1 ... 16, Cols 1 ... 24. \"\"\" import", "for each plate for plate in all_plates: desc = \"plate number \" +", "= \"/plates/\" + str(plate) + \"/\" + str(well) if h5file.__contains__(well_node): # some data", "suffix = options.suffix cur_dir = os.getcwd() try: files = os.listdir(input_dir) os.chdir(input_dir) except: print", "a lookup of image number to plate, well img_to_pw = {} # populate", "delimiter=',', autostrip = True) atom = Atom.from_dtype(my_data.dtype) # slice this data file by", "well) for index, rec in df.iterrows(): for img_num in xrange(rec['Low'],rec['High'] + 1): well", "16, Cols 1 ... 24. \"\"\" import os from optparse import OptionParser import", "= my_data[my_data[:,0] == img_num] well_group = \"/plates/\" + str(plate) well_node = \"/plates/\" +", "parser.parse_args() # Open and prepare an hdf5 file filename = options.filename h5file =", "lookup of image number to plate, well img_to_pw = {} # populate the", "file by grouped image numbers min_img, max_img = int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num in", "int(min(my_data[:,0])), int(max(my_data[:,0])) for img_num in xrange(min_img,max_img+1): try: plate, well = img_to_pw[img_num] except KeyError", "= \"Data File\") # Load the dataframe describing the layout of the experimental", "for i,f in enumerate(files): if i % 10 == 0: print \"processing %s," ]
[ "method in smoothing should accept (im,**args) def median(im, **kwargs): from scipy import ndimage", "as np xic_q = np.percentile(xic, q) xic[xic > xic_q] = xic_q return xic", "ndimage.filters.median_filter(im,**kwargs) return im def hot_spot_removal(xic, q=99.): import numpy as np xic_q = np.percentile(xic,", "hot_spot_removal(xic, q=99.): import numpy as np xic_q = np.percentile(xic, q) xic[xic > xic_q]", "ndimage im = ndimage.filters.median_filter(im,**kwargs) return im def hot_spot_removal(xic, q=99.): import numpy as np", "should accept (im,**args) def median(im, **kwargs): from scipy import ndimage im = ndimage.filters.median_filter(im,**kwargs)", "import ndimage im = ndimage.filters.median_filter(im,**kwargs) return im def hot_spot_removal(xic, q=99.): import numpy as", "q=99.): import numpy as np xic_q = np.percentile(xic, q) xic[xic > xic_q] =", "import numpy as np xic_q = np.percentile(xic, q) xic[xic > xic_q] = xic_q", "every method in smoothing should accept (im,**args) def median(im, **kwargs): from scipy import", "def median(im, **kwargs): from scipy import ndimage im = ndimage.filters.median_filter(im,**kwargs) return im def", "'palmer' # every method in smoothing should accept (im,**args) def median(im, **kwargs): from", "im def hot_spot_removal(xic, q=99.): import numpy as np xic_q = np.percentile(xic, q) xic[xic", "def hot_spot_removal(xic, q=99.): import numpy as np xic_q = np.percentile(xic, q) xic[xic >", "smoothing should accept (im,**args) def median(im, **kwargs): from scipy import ndimage im =", "# every method in smoothing should accept (im,**args) def median(im, **kwargs): from scipy", "**kwargs): from scipy import ndimage im = ndimage.filters.median_filter(im,**kwargs) return im def hot_spot_removal(xic, q=99.):", "in smoothing should accept (im,**args) def median(im, **kwargs): from scipy import ndimage im", "return im def hot_spot_removal(xic, q=99.): import numpy as np xic_q = np.percentile(xic, q)", "numpy as np xic_q = np.percentile(xic, q) xic[xic > xic_q] = xic_q return", "from scipy import ndimage im = ndimage.filters.median_filter(im,**kwargs) return im def hot_spot_removal(xic, q=99.): import", "= 'palmer' # every method in smoothing should accept (im,**args) def median(im, **kwargs):", "accept (im,**args) def median(im, **kwargs): from scipy import ndimage im = ndimage.filters.median_filter(im,**kwargs) return", "im = ndimage.filters.median_filter(im,**kwargs) return im def hot_spot_removal(xic, q=99.): import numpy as np xic_q", "median(im, **kwargs): from scipy import ndimage im = ndimage.filters.median_filter(im,**kwargs) return im def hot_spot_removal(xic,", "__author__ = 'palmer' # every method in smoothing should accept (im,**args) def median(im,", "(im,**args) def median(im, **kwargs): from scipy import ndimage im = ndimage.filters.median_filter(im,**kwargs) return im", "scipy import ndimage im = ndimage.filters.median_filter(im,**kwargs) return im def hot_spot_removal(xic, q=99.): import numpy", "= ndimage.filters.median_filter(im,**kwargs) return im def hot_spot_removal(xic, q=99.): import numpy as np xic_q =" ]
[ "aldryn_apphooks_config.utils import get_app_instance def apphooks_config(request): namespace, config = get_app_instance(request) return { 'namespace': namespace,", "-*- from __future__ import unicode_literals from aldryn_apphooks_config.utils import get_app_instance def apphooks_config(request): namespace, config", "import get_app_instance def apphooks_config(request): namespace, config = get_app_instance(request) return { 'namespace': namespace, 'config':", "unicode_literals from aldryn_apphooks_config.utils import get_app_instance def apphooks_config(request): namespace, config = get_app_instance(request) return {", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from aldryn_apphooks_config.utils import get_app_instance", "from aldryn_apphooks_config.utils import get_app_instance def apphooks_config(request): namespace, config = get_app_instance(request) return { 'namespace':", "__future__ import unicode_literals from aldryn_apphooks_config.utils import get_app_instance def apphooks_config(request): namespace, config = get_app_instance(request)", "get_app_instance def apphooks_config(request): namespace, config = get_app_instance(request) return { 'namespace': namespace, 'config': config,", "utf-8 -*- from __future__ import unicode_literals from aldryn_apphooks_config.utils import get_app_instance def apphooks_config(request): namespace,", "def apphooks_config(request): namespace, config = get_app_instance(request) return { 'namespace': namespace, 'config': config, }", "from __future__ import unicode_literals from aldryn_apphooks_config.utils import get_app_instance def apphooks_config(request): namespace, config =", "-*- coding: utf-8 -*- from __future__ import unicode_literals from aldryn_apphooks_config.utils import get_app_instance def", "coding: utf-8 -*- from __future__ import unicode_literals from aldryn_apphooks_config.utils import get_app_instance def apphooks_config(request):", "import unicode_literals from aldryn_apphooks_config.utils import get_app_instance def apphooks_config(request): namespace, config = get_app_instance(request) return" ]
[ "\"c\": Decimal('1200000.0000021'), } output = json.dumps(test_input, cls=MultiJSONEncoder) assert output == '{\"a\": 3.9, \"b\":", "age): self.name = name self.age = age class TestMultiJSONEncoder: def test_dump_uuid(self): test_input =", "__init__(self, name, age): self.name = name self.age = age class TestMultiJSONEncoder: def test_dump_uuid(self):", "test_dump_uuid(self): test_input = { \"id\": uuid.uuid4(), } output = json.dumps(test_input, cls=MultiJSONEncoder) result =", "= json.loads(output) assert \"date\" in result assert result[\"date\"] == \"2017-07-01\" def test_dump_datetime(self): test_input", "uuid import datetime import pytest from decimal import Decimal from multijson import MultiJSONEncoder", "assert \"date\" in result assert result[\"date\"] == \"2017-07-01\" def test_dump_datetime(self): test_input = {", "assert result[\"id\"] == str(test_input[\"id\"]) def test_dump_date(self): test_input = { \"date\": datetime.date(2017, 7, 1),", "class TestMultiJSONEncoder: def test_dump_uuid(self): test_input = { \"id\": uuid.uuid4(), } output = json.dumps(test_input,", "test_input = { \"date\": datetime.date(2017, 7, 1), } output = json.dumps(test_input, cls=MultiJSONEncoder) result", "json.loads(output) assert \"time\" in result assert result[\"time\"] == \"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input =", "} output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"time\" in result assert", "assert output == '{\"a\": 3.9, \"b\": 0.0003, \"c\": 1200000.0000021}' def test_dump_custom_object(self): test_input =", "== \"2017-07-01\" def test_dump_datetime(self): test_input = { \"time\": datetime.datetime(2017, 7, 1, 23, 11,", "} output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"id\" in result assert", "output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"id\" in result assert result[\"id\"]", "class Custom: def __init__(self, name, age): self.name = name self.age = age class", "= name self.age = age class TestMultiJSONEncoder: def test_dump_uuid(self): test_input = { \"id\":", "json.dumps(test_input, cls=MultiJSONEncoder) assert output == '{\"a\": 3.9, \"b\": 0.0003, \"c\": 1200000.0000021}' def test_dump_custom_object(self):", "\"time\" in result assert result[\"time\"] == \"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input = { \"a\":", "from multijson import MultiJSONEncoder class Custom: def __init__(self, name, age): self.name = name", "test_dump_date(self): test_input = { \"date\": datetime.date(2017, 7, 1), } output = json.dumps(test_input, cls=MultiJSONEncoder)", "import Decimal from multijson import MultiJSONEncoder class Custom: def __init__(self, name, age): self.name", "output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"time\" in result assert result[\"time\"]", "== str(test_input[\"id\"]) def test_dump_date(self): test_input = { \"date\": datetime.date(2017, 7, 1), } output", "json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"date\" in result assert result[\"date\"] == \"2017-07-01\"", "3.9, \"b\": 0.0003, \"c\": 1200000.0000021}' def test_dump_custom_object(self): test_input = { \"custom\": Custom(\"Rincewind\", 120),", "0.0003, \"c\": 1200000.0000021}' def test_dump_custom_object(self): test_input = { \"custom\": Custom(\"Rincewind\", 120), } with", "<gh_stars>1-10 import json import uuid import datetime import pytest from decimal import Decimal", "11), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"time\" in result", "Decimal('1200000.0000021'), } output = json.dumps(test_input, cls=MultiJSONEncoder) assert output == '{\"a\": 3.9, \"b\": 0.0003,", "pytest from decimal import Decimal from multijson import MultiJSONEncoder class Custom: def __init__(self,", "import json import uuid import datetime import pytest from decimal import Decimal from", "import MultiJSONEncoder class Custom: def __init__(self, name, age): self.name = name self.age =", "7, 1, 23, 11, 11), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output)", "import pytest from decimal import Decimal from multijson import MultiJSONEncoder class Custom: def", "test_dump_decimal(self): test_input = { \"a\": Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), } output =", "== '{\"a\": 3.9, \"b\": 0.0003, \"c\": 1200000.0000021}' def test_dump_custom_object(self): test_input = { \"custom\":", "= { \"id\": uuid.uuid4(), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert", "output = json.dumps(test_input, cls=MultiJSONEncoder) assert output == '{\"a\": 3.9, \"b\": 0.0003, \"c\": 1200000.0000021}'", "cls=MultiJSONEncoder) assert output == '{\"a\": 3.9, \"b\": 0.0003, \"c\": 1200000.0000021}' def test_dump_custom_object(self): test_input", "} output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"date\" in result assert", "{ \"a\": Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), } output = json.dumps(test_input, cls=MultiJSONEncoder) assert", "result assert result[\"id\"] == str(test_input[\"id\"]) def test_dump_date(self): test_input = { \"date\": datetime.date(2017, 7,", "= json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"id\" in result assert result[\"id\"] ==", "Decimal from multijson import MultiJSONEncoder class Custom: def __init__(self, name, age): self.name =", "test_dump_datetime(self): test_input = { \"time\": datetime.datetime(2017, 7, 1, 23, 11, 11), } output", "from decimal import Decimal from multijson import MultiJSONEncoder class Custom: def __init__(self, name,", "in result assert result[\"time\"] == \"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input = { \"a\": Decimal('3.9'),", "def test_dump_custom_object(self): test_input = { \"custom\": Custom(\"Rincewind\", 120), } with pytest.raises(TypeError): json.dumps(test_input, cls=MultiJSONEncoder)", "datetime.date(2017, 7, 1), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"date\"", "{ \"time\": datetime.datetime(2017, 7, 1, 23, 11, 11), } output = json.dumps(test_input, cls=MultiJSONEncoder)", "uuid.uuid4(), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"id\" in result", "11, 11), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"time\" in", "assert \"time\" in result assert result[\"time\"] == \"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input = {", "self.name = name self.age = age class TestMultiJSONEncoder: def test_dump_uuid(self): test_input = {", "name, age): self.name = name self.age = age class TestMultiJSONEncoder: def test_dump_uuid(self): test_input", "result[\"time\"] == \"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input = { \"a\": Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\":", "\"date\": datetime.date(2017, 7, 1), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert", "= json.dumps(test_input, cls=MultiJSONEncoder) assert output == '{\"a\": 3.9, \"b\": 0.0003, \"c\": 1200000.0000021}' def", "age class TestMultiJSONEncoder: def test_dump_uuid(self): test_input = { \"id\": uuid.uuid4(), } output =", "{ \"date\": datetime.date(2017, 7, 1), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output)", "test_input = { \"a\": Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), } output = json.dumps(test_input,", "name self.age = age class TestMultiJSONEncoder: def test_dump_uuid(self): test_input = { \"id\": uuid.uuid4(),", "7, 1), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"date\" in", "= json.loads(output) assert \"time\" in result assert result[\"time\"] == \"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input", "output == '{\"a\": 3.9, \"b\": 0.0003, \"c\": 1200000.0000021}' def test_dump_custom_object(self): test_input = {", "= { \"a\": Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), } output = json.dumps(test_input, cls=MultiJSONEncoder)", "datetime import pytest from decimal import Decimal from multijson import MultiJSONEncoder class Custom:", "== \"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input = { \"a\": Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'),", "json.loads(output) assert \"date\" in result assert result[\"date\"] == \"2017-07-01\" def test_dump_datetime(self): test_input =", "{ \"id\": uuid.uuid4(), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"id\"", "output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"date\" in result assert result[\"date\"]", "= json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"date\" in result assert result[\"date\"] ==", "test_input = { \"time\": datetime.datetime(2017, 7, 1, 23, 11, 11), } output =", "1, 23, 11, 11), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert", "assert result[\"time\"] == \"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input = { \"a\": Decimal('3.9'), \"b\": Decimal('0.0003'),", "\"time\": datetime.datetime(2017, 7, 1, 23, 11, 11), } output = json.dumps(test_input, cls=MultiJSONEncoder) result", "json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"time\" in result assert result[\"time\"] == \"2017-07-01T23:11:11Z\"", "def test_dump_uuid(self): test_input = { \"id\": uuid.uuid4(), } output = json.dumps(test_input, cls=MultiJSONEncoder) result", "result[\"date\"] == \"2017-07-01\" def test_dump_datetime(self): test_input = { \"time\": datetime.datetime(2017, 7, 1, 23,", "in result assert result[\"id\"] == str(test_input[\"id\"]) def test_dump_date(self): test_input = { \"date\": datetime.date(2017,", "\"2017-07-01\" def test_dump_datetime(self): test_input = { \"time\": datetime.datetime(2017, 7, 1, 23, 11, 11),", "Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), } output = json.dumps(test_input, cls=MultiJSONEncoder) assert output == '{\"a\": 3.9,", "def test_dump_decimal(self): test_input = { \"a\": Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), } output", "= { \"date\": datetime.date(2017, 7, 1), } output = json.dumps(test_input, cls=MultiJSONEncoder) result =", "test_input = { \"id\": uuid.uuid4(), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output)", "in result assert result[\"date\"] == \"2017-07-01\" def test_dump_datetime(self): test_input = { \"time\": datetime.datetime(2017,", "import uuid import datetime import pytest from decimal import Decimal from multijson import", "cls=MultiJSONEncoder) result = json.loads(output) assert \"date\" in result assert result[\"date\"] == \"2017-07-01\" def", "json import uuid import datetime import pytest from decimal import Decimal from multijson", "def __init__(self, name, age): self.name = name self.age = age class TestMultiJSONEncoder: def", "Custom: def __init__(self, name, age): self.name = name self.age = age class TestMultiJSONEncoder:", "\"id\": uuid.uuid4(), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"id\" in", "Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), } output = json.dumps(test_input, cls=MultiJSONEncoder) assert output ==", "\"b\": 0.0003, \"c\": 1200000.0000021}' def test_dump_custom_object(self): test_input = { \"custom\": Custom(\"Rincewind\", 120), }", "result assert result[\"date\"] == \"2017-07-01\" def test_dump_datetime(self): test_input = { \"time\": datetime.datetime(2017, 7,", "result = json.loads(output) assert \"date\" in result assert result[\"date\"] == \"2017-07-01\" def test_dump_datetime(self):", "} output = json.dumps(test_input, cls=MultiJSONEncoder) assert output == '{\"a\": 3.9, \"b\": 0.0003, \"c\":", "\"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), } output = json.dumps(test_input, cls=MultiJSONEncoder) assert output == '{\"a\":", "= json.loads(output) assert \"id\" in result assert result[\"id\"] == str(test_input[\"id\"]) def test_dump_date(self): test_input", "cls=MultiJSONEncoder) result = json.loads(output) assert \"time\" in result assert result[\"time\"] == \"2017-07-01T23:11:11Z\" def", "decimal import Decimal from multijson import MultiJSONEncoder class Custom: def __init__(self, name, age):", "\"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input = { \"a\": Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), }", "result = json.loads(output) assert \"time\" in result assert result[\"time\"] == \"2017-07-01T23:11:11Z\" def test_dump_decimal(self):", "result[\"id\"] == str(test_input[\"id\"]) def test_dump_date(self): test_input = { \"date\": datetime.date(2017, 7, 1), }", "MultiJSONEncoder class Custom: def __init__(self, name, age): self.name = name self.age = age", "assert result[\"date\"] == \"2017-07-01\" def test_dump_datetime(self): test_input = { \"time\": datetime.datetime(2017, 7, 1,", "1200000.0000021}' def test_dump_custom_object(self): test_input = { \"custom\": Custom(\"Rincewind\", 120), } with pytest.raises(TypeError): json.dumps(test_input,", "'{\"a\": 3.9, \"b\": 0.0003, \"c\": 1200000.0000021}' def test_dump_custom_object(self): test_input = { \"custom\": Custom(\"Rincewind\",", "= age class TestMultiJSONEncoder: def test_dump_uuid(self): test_input = { \"id\": uuid.uuid4(), } output", "result assert result[\"time\"] == \"2017-07-01T23:11:11Z\" def test_dump_decimal(self): test_input = { \"a\": Decimal('3.9'), \"b\":", "23, 11, 11), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"time\"", "multijson import MultiJSONEncoder class Custom: def __init__(self, name, age): self.name = name self.age", "json.loads(output) assert \"id\" in result assert result[\"id\"] == str(test_input[\"id\"]) def test_dump_date(self): test_input =", "= { \"time\": datetime.datetime(2017, 7, 1, 23, 11, 11), } output = json.dumps(test_input,", "datetime.datetime(2017, 7, 1, 23, 11, 11), } output = json.dumps(test_input, cls=MultiJSONEncoder) result =", "\"date\" in result assert result[\"date\"] == \"2017-07-01\" def test_dump_datetime(self): test_input = { \"time\":", "str(test_input[\"id\"]) def test_dump_date(self): test_input = { \"date\": datetime.date(2017, 7, 1), } output =", "cls=MultiJSONEncoder) result = json.loads(output) assert \"id\" in result assert result[\"id\"] == str(test_input[\"id\"]) def", "= json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"time\" in result assert result[\"time\"] ==", "result = json.loads(output) assert \"id\" in result assert result[\"id\"] == str(test_input[\"id\"]) def test_dump_date(self):", "def test_dump_date(self): test_input = { \"date\": datetime.date(2017, 7, 1), } output = json.dumps(test_input,", "\"c\": 1200000.0000021}' def test_dump_custom_object(self): test_input = { \"custom\": Custom(\"Rincewind\", 120), } with pytest.raises(TypeError):", "1), } output = json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"date\" in result", "\"id\" in result assert result[\"id\"] == str(test_input[\"id\"]) def test_dump_date(self): test_input = { \"date\":", "\"a\": Decimal('3.9'), \"b\": Decimal('0.0003'), \"c\": Decimal('1200000.0000021'), } output = json.dumps(test_input, cls=MultiJSONEncoder) assert output", "TestMultiJSONEncoder: def test_dump_uuid(self): test_input = { \"id\": uuid.uuid4(), } output = json.dumps(test_input, cls=MultiJSONEncoder)", "self.age = age class TestMultiJSONEncoder: def test_dump_uuid(self): test_input = { \"id\": uuid.uuid4(), }", "json.dumps(test_input, cls=MultiJSONEncoder) result = json.loads(output) assert \"id\" in result assert result[\"id\"] == str(test_input[\"id\"])", "def test_dump_datetime(self): test_input = { \"time\": datetime.datetime(2017, 7, 1, 23, 11, 11), }", "import datetime import pytest from decimal import Decimal from multijson import MultiJSONEncoder class", "assert \"id\" in result assert result[\"id\"] == str(test_input[\"id\"]) def test_dump_date(self): test_input = {" ]
[ "GetTradingday(object): def __init__(self, _start, _end, _symbol='a'): self.start = _start self.end = _end self.symbol", "yesterday = tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol, day) data = fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0])", "import (BIAS, CCI, EFF, EMA, KDJ, MA, RSI, SAR, BBands, StepDrawdownStop) from ParadoxTrading.Utils", "import pickle import shutil import numpy as np import pandas as pd from", "pandas as pd from ParadoxTrading.Chart import Wizard from ParadoxTrading.Engine import (MarketEvent, SettlementEvent, StrategyAbstract)", "'20150101' end = '20180428' get_tradingday = GetTradingday(start, end) tradingday_list = get_tradingday.gettradingday() symbollist =", "'192.168.4.103' def gettradingday(self): market_data = self.fetcherindex.fetchDayData( self.start, self.end, self.symbol) tradingday_list = market_data['tradingday'] return", "import multiprocessing as mp import os import pickle import shutil import numpy as", "from ParadoxTrading.Chart import Wizard from ParadoxTrading.Engine import (MarketEvent, SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures import", "= self.fetcherindex.fetchDayData( self.start, self.end, self.symbol) tradingday_list = market_data['tradingday'] return tradingday_list fetcher = FetchInstrumentDayData()", "pd.DataFrame() for index in range(1, len(tradingday_list)): pricelist = [] day = tradingday_list[index] yesterday", "get_tradingday = GetTradingday(start, end) tradingday_list = get_tradingday.gettradingday() symbollist = ['oi', 'y'] for symbol", "SAR, BBands, StepDrawdownStop) from ParadoxTrading.Utils import DataStruct class GetTradingday(object): def __init__(self, _start, _end,", "= _end self.symbol = _symbol self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user =", "return tradingday_list fetcher = FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103' fetcher.psql_user = 'ubuntu' fetcher.mongo_host =", "np import pandas as pd from ParadoxTrading.Chart import Wizard from ParadoxTrading.Engine import (MarketEvent,", "self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103' def gettradingday(self): market_data = self.fetcherindex.fetchDayData( self.start, self.end,", "(BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy from", "= _start self.end = _end self.symbol = _symbol self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host =", "self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103' def gettradingday(self): market_data =", "= 'ubuntu' fetcher.mongo_host = '192.168.4.103' start = '20150101' end = '20180428' get_tradingday =", "'192.168.4.103' start = '20150101' end = '20180428' get_tradingday = GetTradingday(start, end) tradingday_list =", "tradingday_list fetcher = FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103' fetcher.psql_user = 'ubuntu' fetcher.mongo_host = '192.168.4.103'", "def __init__(self, _start, _end, _symbol='a'): self.start = _start self.end = _end self.symbol =", "pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T df.columns = ['tradingday', 'openprice', 'highprice', 'lowprice', 'closeprice']", "CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData,", "day) data = fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df =", "= ['oi', 'y'] for symbol in symbollist: data_df = pd.DataFrame() for index in", "ParadoxTrading.Utils import DataStruct class GetTradingday(object): def __init__(self, _start, _end, _symbol='a'): self.start = _start", "get_tradingday.gettradingday() symbollist = ['oi', 'y'] for symbol in symbollist: data_df = pd.DataFrame() for", "_symbol self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103'", "InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex)", "= pd.DataFrame() for index in range(1, len(tradingday_list)): pricelist = [] day = tradingday_list[index]", "= pd.DataFrame(pricelist).T df.columns = ['tradingday', 'openprice', 'highprice', 'lowprice', 'closeprice'] data_df = pd.concat([data_df, df])", "import pandas as pd from ParadoxTrading.Chart import Wizard from ParadoxTrading.Engine import (MarketEvent, SettlementEvent,", "RSI, SAR, BBands, StepDrawdownStop) from ParadoxTrading.Utils import DataStruct class GetTradingday(object): def __init__(self, _start,", "start = '20150101' end = '20180428' get_tradingday = GetTradingday(start, end) tradingday_list = get_tradingday.gettradingday()", "data_df = pd.DataFrame() for index in range(1, len(tradingday_list)): pricelist = [] day =", "CCI, EFF, EMA, KDJ, MA, RSI, SAR, BBands, StepDrawdownStop) from ParadoxTrading.Utils import DataStruct", "ParadoxTrading.Indicator import (BIAS, CCI, EFF, EMA, KDJ, MA, RSI, SAR, BBands, StepDrawdownStop) from", "import CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator import", "import datetime import logging import math import multiprocessing as mp import os import", "for symbol in symbollist: data_df = pd.DataFrame() for index in range(1, len(tradingday_list)): pricelist", "fetcher.psql_host = '192.168.4.103' fetcher.psql_user = 'ubuntu' fetcher.mongo_host = '192.168.4.103' start = '20150101' end", "index in range(1, len(tradingday_list)): pricelist = [] day = tradingday_list[index] yesterday = tradingday_list[index-1]", "fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T df.columns =", "range(1, len(tradingday_list)): pricelist = [] day = tradingday_list[index] yesterday = tradingday_list[index-1] domian_instrument =", "CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import", "_symbol='a'): self.start = _start self.end = _end self.symbol = _symbol self.fetcherindex = FetchProductIndex()", "self.start = _start self.end = _end self.symbol = _symbol self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host", "GetTradingday(start, end) tradingday_list = get_tradingday.gettradingday() symbollist = ['oi', 'y'] for symbol in symbollist:", "ParadoxTrading.Chart import Wizard from ParadoxTrading.Engine import (MarketEvent, SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures import (BacktestEngine,", "domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T df.columns = ['tradingday', 'openprice',", "= fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T df.columns", "= GetTradingday(start, end) tradingday_list = get_tradingday.gettradingday() symbollist = ['oi', 'y'] for symbol in", "numpy as np import pandas as pd from ParadoxTrading.Chart import Wizard from ParadoxTrading.Engine", "domian_instrument = fetcher.fetchDominant(symbol, day) data = fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0])", "from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from", "from ParadoxTrading.Utils import DataStruct class GetTradingday(object): def __init__(self, _start, _end, _symbol='a'): self.start =", "ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType,", "self.start, self.end, self.symbol) tradingday_list = market_data['tradingday'] return tradingday_list fetcher = FetchInstrumentDayData() fetcher.psql_host =", "self.symbol = _symbol self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host", "import math import multiprocessing as mp import os import pickle import shutil import", "SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from", "= tradingday_list[index] yesterday = tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol, day) data = fetcher.fetchDayData(yesterday, day,", "__init__(self, _start, _end, _symbol='a'): self.start = _start self.end = _end self.symbol = _symbol", "pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T df.columns = ['tradingday', 'openprice', 'highprice', 'lowprice', 'closeprice'] data_df", "= 'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103' def gettradingday(self): market_data = self.fetcherindex.fetchDayData( self.start, self.end, self.symbol)", "[] day = tradingday_list[index] yesterday = tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol, day) data =", "KDJ, MA, RSI, SAR, BBands, StepDrawdownStop) from ParadoxTrading.Utils import DataStruct class GetTradingday(object): def", "RegisterIndex) from ParadoxTrading.Indicator import (BIAS, CCI, EFF, EMA, KDJ, MA, RSI, SAR, BBands,", "day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T df.columns = ['tradingday',", "len(tradingday_list)): pricelist = [] day = tradingday_list[index] yesterday = tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol,", "['oi', 'y'] for symbol in symbollist: data_df = pd.DataFrame() for index in range(1,", "in symbollist: data_df = pd.DataFrame() for index in range(1, len(tradingday_list)): pricelist = []", "import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator import (BIAS, CCI, EFF, EMA, KDJ,", "class GetTradingday(object): def __init__(self, _start, _end, _symbol='a'): self.start = _start self.end = _end", "as np import pandas as pd from ParadoxTrading.Chart import Wizard from ParadoxTrading.Engine import", "from ParadoxTrading.Engine import (MarketEvent, SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio,", "market_data['tradingday'] return tradingday_list fetcher = FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103' fetcher.psql_user = 'ubuntu' fetcher.mongo_host", "pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T df.columns = ['tradingday', 'openprice', 'highprice',", "df = pd.DataFrame(pricelist).T df.columns = ['tradingday', 'openprice', 'highprice', 'lowprice', 'closeprice'] data_df = pd.concat([data_df,", "import (MarketEvent, SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution,", "import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy", "MA, RSI, SAR, BBands, StepDrawdownStop) from ParadoxTrading.Utils import DataStruct class GetTradingday(object): def __init__(self,", "= get_tradingday.gettradingday() symbollist = ['oi', 'y'] for symbol in symbollist: data_df = pd.DataFrame()", "as pd from ParadoxTrading.Chart import Wizard from ParadoxTrading.Engine import (MarketEvent, SettlementEvent, StrategyAbstract) from", "datetime import logging import math import multiprocessing as mp import os import pickle", "ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator", "self.fetcherindex.mongo_host = '192.168.4.103' def gettradingday(self): market_data = self.fetcherindex.fetchDayData( self.start, self.end, self.symbol) tradingday_list =", "market_data = self.fetcherindex.fetchDayData( self.start, self.end, self.symbol) tradingday_list = market_data['tradingday'] return tradingday_list fetcher =", "_start self.end = _end self.symbol = _symbol self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103'", "def gettradingday(self): market_data = self.fetcherindex.fetchDayData( self.start, self.end, self.symbol) tradingday_list = market_data['tradingday'] return tradingday_list", "<filename>QUANTAXIS_Trade/WindowsCTP/test.py<gh_stars>1-10 import datetime import logging import math import multiprocessing as mp import os", "'20180428' get_tradingday = GetTradingday(start, end) tradingday_list = get_tradingday.gettradingday() symbollist = ['oi', 'y'] for", "tradingday_list = market_data['tradingday'] return tradingday_list fetcher = FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103' fetcher.psql_user =", "df.columns = ['tradingday', 'openprice', 'highprice', 'lowprice', 'closeprice'] data_df = pd.concat([data_df, df]) pd.DataFrame.to_csv(data_df, '.\\\\data\\\\{0}.csv'.format(symbol))", "= market_data['tradingday'] return tradingday_list fetcher = FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103' fetcher.psql_user = 'ubuntu'", "tradingday_list[index] yesterday = tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol, day) data = fetcher.fetchDayData(yesterday, day, domian_instrument)", "symbol in symbollist: data_df = pd.DataFrame() for index in range(1, len(tradingday_list)): pricelist =", "ParadoxTrading.Engine import (MarketEvent, SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio,", "fetcher.mongo_host = '192.168.4.103' start = '20150101' end = '20180428' get_tradingday = GetTradingday(start, end)", "day = tradingday_list[index] yesterday = tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol, day) data = fetcher.fetchDayData(yesterday,", "pd from ParadoxTrading.Chart import Wizard from ParadoxTrading.Engine import (MarketEvent, SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures", "= _symbol self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host =", "= FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103' def gettradingday(self):", "from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator import (BIAS, CCI, EFF,", "mp import os import pickle import shutil import numpy as np import pandas", "= '192.168.4.103' def gettradingday(self): market_data = self.fetcherindex.fetchDayData( self.start, self.end, self.symbol) tradingday_list = market_data['tradingday']", "end = '20180428' get_tradingday = GetTradingday(start, end) tradingday_list = get_tradingday.gettradingday() symbollist = ['oi',", "pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T df.columns = ['tradingday', 'openprice', 'highprice', 'lowprice',", "self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103' def", "self.symbol) tradingday_list = market_data['tradingday'] return tradingday_list fetcher = FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103' fetcher.psql_user", "CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator import (BIAS,", "tradingday_list = get_tradingday.gettradingday() symbollist = ['oi', 'y'] for symbol in symbollist: data_df =", "gettradingday(self): market_data = self.fetcherindex.fetchDayData( self.start, self.end, self.symbol) tradingday_list = market_data['tradingday'] return tradingday_list fetcher", "= '20180428' get_tradingday = GetTradingday(start, end) tradingday_list = get_tradingday.gettradingday() symbollist = ['oi', 'y']", "multiprocessing as mp import os import pickle import shutil import numpy as np", "for index in range(1, len(tradingday_list)): pricelist = [] day = tradingday_list[index] yesterday =", "math import multiprocessing as mp import os import pickle import shutil import numpy", "import logging import math import multiprocessing as mp import os import pickle import", "FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator import (BIAS, CCI, EFF, EMA, KDJ, MA, RSI, SAR,", "'ubuntu' fetcher.mongo_host = '192.168.4.103' start = '20150101' end = '20180428' get_tradingday = GetTradingday(start,", "EFF, EMA, KDJ, MA, RSI, SAR, BBands, StepDrawdownStop) from ParadoxTrading.Utils import DataStruct class", "pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T df.columns = ['tradingday', 'openprice', 'highprice', 'lowprice', 'closeprice'] data_df =", "in range(1, len(tradingday_list)): pricelist = [] day = tradingday_list[index] yesterday = tradingday_list[index-1] domian_instrument", "DataStruct class GetTradingday(object): def __init__(self, _start, _end, _symbol='a'): self.start = _start self.end =", "logging import math import multiprocessing as mp import os import pickle import shutil", "self.end = _end self.symbol = _symbol self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user", "_end self.symbol = _symbol self.fetcherindex = FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user = 'ubuntu'", "= '192.168.4.103' self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103' def gettradingday(self): market_data = self.fetcherindex.fetchDayData(", "_start, _end, _symbol='a'): self.start = _start self.end = _end self.symbol = _symbol self.fetcherindex", "'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103' def gettradingday(self): market_data = self.fetcherindex.fetchDayData( self.start, self.end, self.symbol) tradingday_list", "Wizard from ParadoxTrading.Engine import (MarketEvent, SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio,", "import os import pickle import shutil import numpy as np import pandas as", "= FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103' fetcher.psql_user = 'ubuntu' fetcher.mongo_host = '192.168.4.103' start =", "self.end, self.symbol) tradingday_list = market_data['tradingday'] return tradingday_list fetcher = FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103'", "= '192.168.4.103' fetcher.psql_user = 'ubuntu' fetcher.mongo_host = '192.168.4.103' start = '20150101' end =", "BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures", "(MarketEvent, SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio)", "StepDrawdownStop) from ParadoxTrading.Utils import DataStruct class GetTradingday(object): def __init__(self, _start, _end, _symbol='a'): self.start", "EMA, KDJ, MA, RSI, SAR, BBands, StepDrawdownStop) from ParadoxTrading.Utils import DataStruct class GetTradingday(object):", "os import pickle import shutil import numpy as np import pandas as pd", "shutil import numpy as np import pandas as pd from ParadoxTrading.Chart import Wizard", "symbollist: data_df = pd.DataFrame() for index in range(1, len(tradingday_list)): pricelist = [] day", "FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator import (BIAS, CCI, EFF, EMA, KDJ, MA, RSI,", "end) tradingday_list = get_tradingday.gettradingday() symbollist = ['oi', 'y'] for symbol in symbollist: data_df", "= [] day = tradingday_list[index] yesterday = tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol, day) data", "= fetcher.fetchDominant(symbol, day) data = fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0])", "CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator import (BIAS, CCI,", "fetcher.fetchDominant(symbol, day) data = fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df", "pickle import shutil import numpy as np import pandas as pd from ParadoxTrading.Chart", "CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex,", "fetcher = FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103' fetcher.psql_user = 'ubuntu' fetcher.mongo_host = '192.168.4.103' start", "'192.168.4.103' fetcher.psql_user = 'ubuntu' fetcher.mongo_host = '192.168.4.103' start = '20150101' end = '20180428'", "pricelist = [] day = tradingday_list[index] yesterday = tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol, day)", "from ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import", "from ParadoxTrading.Indicator import (BIAS, CCI, EFF, EMA, KDJ, MA, RSI, SAR, BBands, StepDrawdownStop)", "StrategyAbstract) from ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply, CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, CTAEqualRiskGARCHPortfolio, InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend", "_end, _symbol='a'): self.start = _start self.end = _end self.symbol = _symbol self.fetcherindex =", "= '192.168.4.103' start = '20150101' end = '20180428' get_tradingday = GetTradingday(start, end) tradingday_list", "= '20150101' end = '20180428' get_tradingday = GetTradingday(start, end) tradingday_list = get_tradingday.gettradingday() symbollist", "'192.168.4.103' self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103' def gettradingday(self): market_data = self.fetcherindex.fetchDayData( self.start,", "ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator import (BIAS, CCI, EFF, EMA,", "self.fetcherindex.fetchDayData( self.start, self.end, self.symbol) tradingday_list = market_data['tradingday'] return tradingday_list fetcher = FetchInstrumentDayData() fetcher.psql_host", "FetchInstrumentDayData() fetcher.psql_host = '192.168.4.103' fetcher.psql_user = 'ubuntu' fetcher.mongo_host = '192.168.4.103' start = '20150101'", "pd.DataFrame(pricelist).T df.columns = ['tradingday', 'openprice', 'highprice', 'lowprice', 'closeprice'] data_df = pd.concat([data_df, df]) pd.DataFrame.to_csv(data_df,", "tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol, day) data = fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0])", "(BIAS, CCI, EFF, EMA, KDJ, MA, RSI, SAR, BBands, StepDrawdownStop) from ParadoxTrading.Utils import", "symbollist = ['oi', 'y'] for symbol in symbollist: data_df = pd.DataFrame() for index", "fetcher.psql_user = 'ubuntu' fetcher.mongo_host = '192.168.4.103' start = '20150101' end = '20180428' get_tradingday", "import Wizard from ParadoxTrading.Engine import (MarketEvent, SettlementEvent, StrategyAbstract) from ParadoxTrading.EngineExt.Futures import (BacktestEngine, BacktestMarketSupply,", "import DataStruct class GetTradingday(object): def __init__(self, _start, _end, _symbol='a'): self.start = _start self.end", "= tradingday_list[index-1] domian_instrument = fetcher.fetchDominant(symbol, day) data = fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0])", "FetchProductIndex() self.fetcherindex.psql_host = '192.168.4.103' self.fetcherindex.psql_user = 'ubuntu' self.fetcherindex.mongo_host = '192.168.4.103' def gettradingday(self): market_data", "InterDayBacktestExecution, InterDayPortfolio) from ParadoxTrading.EngineExt.Futures.Trend import CTAStatusType, CTAStrategy from ParadoxTrading.Fetch.ChineseFutures import (FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex,", "(FetchDominantIndex, FetchInstrumentDayData, FetchProductIndex, RegisterIndex) from ParadoxTrading.Indicator import (BIAS, CCI, EFF, EMA, KDJ, MA,", "data = fetcher.fetchDayData(yesterday, day, domian_instrument) pricelist.append(data['tradingday'][0]) pricelist.append(data['openprice'][0]) pricelist.append(data['highprice'][0]) pricelist.append(data['lowprice'][0]) pricelist.append(data['closeprice'][0]) df = pd.DataFrame(pricelist).T", "'y'] for symbol in symbollist: data_df = pd.DataFrame() for index in range(1, len(tradingday_list)):", "as mp import os import pickle import shutil import numpy as np import", "import shutil import numpy as np import pandas as pd from ParadoxTrading.Chart import", "BBands, StepDrawdownStop) from ParadoxTrading.Utils import DataStruct class GetTradingday(object): def __init__(self, _start, _end, _symbol='a'):", "import numpy as np import pandas as pd from ParadoxTrading.Chart import Wizard from" ]